2019-05-22 21:16:55 +00:00
package storage
import (
"bytes"
2020-04-22 16:57:36 +00:00
"container/heap"
2019-05-22 21:16:55 +00:00
"errors"
"fmt"
"io"
"path/filepath"
"sort"
"sync"
"sync/atomic"
"time"
"unsafe"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
2021-02-16 11:03:58 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
2019-06-10 22:56:37 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
2019-05-22 21:16:55 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/mergeset"
2019-09-24 18:10:22 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/uint64set"
2019-08-13 18:35:19 +00:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/workingsetcache"
2019-05-22 21:16:55 +00:00
"github.com/VictoriaMetrics/fastcache"
xxhash "github.com/cespare/xxhash/v2"
)
const (
// Prefix for MetricName->TSID entries.
nsPrefixMetricNameToTSID = 0
// Prefix for Tag->MetricID entries.
2019-09-20 16:46:47 +00:00
nsPrefixTagToMetricIDs = 1
2019-05-22 21:16:55 +00:00
// Prefix for MetricID->TSID entries.
nsPrefixMetricIDToTSID = 2
// Prefix for MetricID->MetricName entries.
nsPrefixMetricIDToMetricName = 3
// Prefix for deleted MetricID entries.
2019-09-25 10:47:06 +00:00
nsPrefixDeletedMetricID = 4
2019-05-22 21:16:55 +00:00
// Prefix for Date->MetricID entries.
nsPrefixDateToMetricID = 5
2019-11-09 21:17:42 +00:00
// Prefix for (Date,Tag)->MetricID entries.
nsPrefixDateTagToMetricIDs = 6
2019-05-22 21:16:55 +00:00
)
// indexDB represents an index db.
type indexDB struct {
2019-10-17 15:22:56 +00:00
// Atomic counters must go at the top of the structure in order to properly align by 8 bytes on 32-bit archs.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/212 .
2019-05-22 21:16:55 +00:00
refCount uint64
2019-10-17 15:22:56 +00:00
2019-11-08 17:57:57 +00:00
// The counter for newly created time series. It can be used for determining time series churn rate.
newTimeseriesCreated uint64
2019-10-17 15:22:56 +00:00
// The number of missing MetricID -> TSID entries.
// High rate for this value means corrupted indexDB.
missingTSIDsForMetricID uint64
2019-11-09 21:17:42 +00:00
// The number of calls for date range searches.
dateRangeSearchCalls uint64
// The number of hits for date range searches.
dateRangeSearchHits uint64
2021-07-30 05:37:10 +00:00
// The number of calls for global search.
globalSearchCalls uint64
2019-12-02 18:44:18 +00:00
// missingMetricNamesForMetricID is a counter of missing MetricID -> MetricName entries.
// High rate may mean corrupted indexDB due to unclean shutdown.
// The db must be automatically recovered after that.
missingMetricNamesForMetricID uint64
2019-10-17 15:22:56 +00:00
mustDrop uint64
name string
tb * mergeset . Table
2019-05-22 21:16:55 +00:00
extDB * indexDB
extDBLock sync . Mutex
// Cache for fast TagFilters -> TSIDs lookup.
2021-07-06 08:01:51 +00:00
tagFiltersCache * workingsetcache . Cache
2019-05-22 21:16:55 +00:00
2021-06-11 09:42:26 +00:00
// The parent storage.
s * Storage
2020-07-14 11:02:14 +00:00
2021-02-23 13:47:19 +00:00
// Cache for (date, tagFilter) -> loopsCount, which is used for reducing
2020-03-30 21:44:41 +00:00
// the amount of work when matching a set of filters.
2021-02-23 13:47:19 +00:00
loopsPerDateTagFilterCache * workingsetcache . Cache
2020-03-30 21:44:41 +00:00
2019-05-22 21:16:55 +00:00
indexSearchPool sync . Pool
}
// openIndexDB opens index db from the given path with the given caches.
2021-06-11 09:42:26 +00:00
func openIndexDB ( path string , s * Storage ) ( * indexDB , error ) {
if s == nil {
logger . Panicf ( "BUG: Storage must be nin-nil" )
2020-07-14 11:02:14 +00:00
}
2019-06-25 17:09:57 +00:00
2021-07-06 08:01:51 +00:00
tb , err := mergeset . OpenTable ( path , invalidateTagFiltersCache , mergeTagToMetricIDsRows )
2019-05-22 21:16:55 +00:00
if err != nil {
2020-06-30 19:58:18 +00:00
return nil , fmt . Errorf ( "cannot open indexDB %q: %w" , path , err )
2019-05-22 21:16:55 +00:00
}
name := filepath . Base ( path )
2021-07-06 08:01:51 +00:00
// Do not persist tagFiltersCache in files, since it is very volatile.
2019-05-22 21:16:55 +00:00
mem := memory . Allowed ( )
db := & indexDB {
refCount : 1 ,
tb : tb ,
name : name ,
2021-07-06 08:01:51 +00:00
tagFiltersCache : workingsetcache . New ( mem / 32 , time . Hour ) ,
2021-06-11 09:42:26 +00:00
s : s ,
2021-03-07 19:12:28 +00:00
loopsPerDateTagFilterCache : workingsetcache . New ( mem / 128 , time . Hour ) ,
2019-05-22 21:16:55 +00:00
}
return db , nil
}
2020-07-23 17:42:57 +00:00
const noDeadline = 1 << 64 - 1
2019-05-22 21:16:55 +00:00
// IndexDBMetrics contains essential metrics for indexDB.
type IndexDBMetrics struct {
2021-12-02 08:28:45 +00:00
TagFiltersCacheSize uint64
TagFiltersCacheSizeBytes uint64
TagFiltersCacheSizeMaxBytes uint64
TagFiltersCacheRequests uint64
TagFiltersCacheMisses uint64
2019-05-22 21:16:55 +00:00
DeletedMetricsCount uint64
IndexDBRefCount uint64
2019-11-08 17:57:57 +00:00
NewTimeseriesCreated uint64
2019-05-22 21:16:55 +00:00
MissingTSIDsForMetricID uint64
2019-06-09 16:06:53 +00:00
RecentHourMetricIDsSearchCalls uint64
RecentHourMetricIDsSearchHits uint64
2019-11-09 21:17:42 +00:00
DateRangeSearchCalls uint64
DateRangeSearchHits uint64
2021-07-30 05:37:10 +00:00
GlobalSearchCalls uint64
2019-11-09 21:17:42 +00:00
2019-12-02 18:44:18 +00:00
MissingMetricNamesForMetricID uint64
2019-11-06 12:24:48 +00:00
IndexBlocksWithMetricIDsProcessed uint64
IndexBlocksWithMetricIDsIncorrectOrder uint64
2021-02-17 17:13:38 +00:00
MinTimestampForCompositeIndex uint64
CompositeFilterSuccessConversions uint64
CompositeFilterMissingConversions uint64
2021-02-10 14:53:26 +00:00
2019-05-22 21:16:55 +00:00
mergeset . TableMetrics
}
func ( db * indexDB ) scheduleToDrop ( ) {
atomic . AddUint64 ( & db . mustDrop , 1 )
}
// UpdateMetrics updates m with metrics from the db.
func ( db * indexDB ) UpdateMetrics ( m * IndexDBMetrics ) {
var cs fastcache . Stats
2019-06-10 11:02:44 +00:00
cs . Reset ( )
2021-07-06 08:01:51 +00:00
db . tagFiltersCache . UpdateStats ( & cs )
m . TagFiltersCacheSize += cs . EntriesCount
m . TagFiltersCacheSizeBytes += cs . BytesSize
2021-12-02 08:28:45 +00:00
m . TagFiltersCacheSizeMaxBytes += cs . MaxBytesSize
2021-07-06 08:01:51 +00:00
m . TagFiltersCacheRequests += cs . GetCalls
m . TagFiltersCacheMisses += cs . Misses
2019-05-22 21:16:55 +00:00
2021-06-15 11:56:51 +00:00
m . DeletedMetricsCount += uint64 ( db . s . getDeletedMetricIDs ( ) . Len ( ) )
2019-05-22 21:16:55 +00:00
m . IndexDBRefCount += atomic . LoadUint64 ( & db . refCount )
2019-11-08 17:57:57 +00:00
m . NewTimeseriesCreated += atomic . LoadUint64 ( & db . newTimeseriesCreated )
2019-05-22 21:16:55 +00:00
m . MissingTSIDsForMetricID += atomic . LoadUint64 ( & db . missingTSIDsForMetricID )
2019-11-09 21:17:42 +00:00
m . DateRangeSearchCalls += atomic . LoadUint64 ( & db . dateRangeSearchCalls )
m . DateRangeSearchHits += atomic . LoadUint64 ( & db . dateRangeSearchHits )
2021-07-30 05:37:10 +00:00
m . GlobalSearchCalls += atomic . LoadUint64 ( & db . globalSearchCalls )
2019-11-09 21:17:42 +00:00
2019-12-02 18:44:18 +00:00
m . MissingMetricNamesForMetricID += atomic . LoadUint64 ( & db . missingMetricNamesForMetricID )
2019-11-06 12:24:48 +00:00
m . IndexBlocksWithMetricIDsProcessed = atomic . LoadUint64 ( & indexBlocksWithMetricIDsProcessed )
m . IndexBlocksWithMetricIDsIncorrectOrder = atomic . LoadUint64 ( & indexBlocksWithMetricIDsIncorrectOrder )
2021-06-11 09:42:26 +00:00
m . MinTimestampForCompositeIndex = uint64 ( db . s . minTimestampForCompositeIndex )
2021-02-17 17:13:38 +00:00
m . CompositeFilterSuccessConversions = atomic . LoadUint64 ( & compositeFilterSuccessConversions )
m . CompositeFilterMissingConversions = atomic . LoadUint64 ( & compositeFilterMissingConversions )
2021-02-10 14:53:26 +00:00
2019-05-22 21:16:55 +00:00
db . tb . UpdateMetrics ( & m . TableMetrics )
db . doExtDB ( func ( extDB * indexDB ) {
extDB . tb . UpdateMetrics ( & m . TableMetrics )
m . IndexDBRefCount += atomic . LoadUint64 ( & extDB . refCount )
} )
}
func ( db * indexDB ) doExtDB ( f func ( extDB * indexDB ) ) bool {
db . extDBLock . Lock ( )
extDB := db . extDB
if extDB != nil {
extDB . incRef ( )
}
db . extDBLock . Unlock ( )
if extDB == nil {
return false
}
f ( extDB )
extDB . decRef ( )
return true
}
// SetExtDB sets external db to search.
//
// It decrements refCount for the previous extDB.
func ( db * indexDB ) SetExtDB ( extDB * indexDB ) {
db . extDBLock . Lock ( )
prevExtDB := db . extDB
db . extDB = extDB
db . extDBLock . Unlock ( )
if prevExtDB != nil {
prevExtDB . decRef ( )
}
}
// MustClose closes db.
func ( db * indexDB ) MustClose ( ) {
db . decRef ( )
}
func ( db * indexDB ) incRef ( ) {
atomic . AddUint64 ( & db . refCount , 1 )
}
func ( db * indexDB ) decRef ( ) {
n := atomic . AddUint64 ( & db . refCount , ^ uint64 ( 0 ) )
2019-06-20 11:37:46 +00:00
if int64 ( n ) < 0 {
2019-05-22 21:16:55 +00:00
logger . Panicf ( "BUG: negative refCount: %d" , n )
}
if n > 0 {
return
}
tbPath := db . tb . Path ( )
db . tb . MustClose ( )
db . SetExtDB ( nil )
2019-06-25 11:39:17 +00:00
// Free space occupied by caches owned by db.
2021-07-06 08:01:51 +00:00
db . tagFiltersCache . Stop ( )
2021-02-23 13:47:19 +00:00
db . loopsPerDateTagFilterCache . Stop ( )
2019-06-25 11:39:17 +00:00
2021-07-06 08:01:51 +00:00
db . tagFiltersCache = nil
2021-06-11 09:42:26 +00:00
db . s = nil
2021-02-23 13:47:19 +00:00
db . loopsPerDateTagFilterCache = nil
2019-06-25 11:39:17 +00:00
2019-05-22 21:16:55 +00:00
if atomic . LoadUint64 ( & db . mustDrop ) == 0 {
return
}
logger . Infof ( "dropping indexDB %q" , tbPath )
2019-06-11 22:53:43 +00:00
fs . MustRemoveAll ( tbPath )
2019-05-22 21:16:55 +00:00
logger . Infof ( "indexDB %q has been dropped" , tbPath )
}
2021-07-06 08:01:51 +00:00
func ( db * indexDB ) getFromTagFiltersCache ( key [ ] byte ) ( [ ] TSID , bool ) {
2019-08-13 22:50:20 +00:00
compressedBuf := tagBufPool . Get ( )
defer tagBufPool . Put ( compressedBuf )
2021-07-06 08:01:51 +00:00
compressedBuf . B = db . tagFiltersCache . GetBig ( compressedBuf . B [ : 0 ] , key )
2019-08-13 22:50:20 +00:00
if len ( compressedBuf . B ) == 0 {
2019-05-22 21:16:55 +00:00
return nil , false
}
2019-08-13 22:50:20 +00:00
buf := tagBufPool . Get ( )
defer tagBufPool . Put ( buf )
var err error
buf . B , err = encoding . DecompressZSTD ( buf . B [ : 0 ] , compressedBuf . B )
if err != nil {
2021-07-06 08:01:51 +00:00
logger . Panicf ( "FATAL: cannot decompress tsids from tagFiltersCache: %s" , err )
2019-08-13 22:50:20 +00:00
}
tsids , err := unmarshalTSIDs ( nil , buf . B )
2019-05-22 21:16:55 +00:00
if err != nil {
2021-07-06 08:01:51 +00:00
logger . Panicf ( "FATAL: cannot unmarshal tsids from tagFiltersCache: %s" , err )
2019-05-22 21:16:55 +00:00
}
return tsids , true
}
2019-08-13 22:50:20 +00:00
var tagBufPool bytesutil . ByteBufferPool
2021-07-06 08:01:51 +00:00
func ( db * indexDB ) putToTagFiltersCache ( tsids [ ] TSID , key [ ] byte ) {
2019-08-13 22:50:20 +00:00
buf := tagBufPool . Get ( )
buf . B = marshalTSIDs ( buf . B [ : 0 ] , tsids )
compressedBuf := tagBufPool . Get ( )
compressedBuf . B = encoding . CompressZSTDLevel ( compressedBuf . B [ : 0 ] , buf . B , 1 )
tagBufPool . Put ( buf )
2021-07-06 08:01:51 +00:00
db . tagFiltersCache . SetBig ( key , compressedBuf . B )
2019-08-13 22:50:20 +00:00
tagBufPool . Put ( compressedBuf )
2019-05-22 21:16:55 +00:00
}
func ( db * indexDB ) getFromMetricIDCache ( dst * TSID , metricID uint64 ) error {
// There is no need in checking for deleted metricIDs here, since they
// must be checked by the caller.
buf := ( * [ unsafe . Sizeof ( * dst ) ] byte ) ( unsafe . Pointer ( dst ) )
key := ( * [ unsafe . Sizeof ( metricID ) ] byte ) ( unsafe . Pointer ( & metricID ) )
2021-06-11 09:42:26 +00:00
tmp := db . s . metricIDCache . Get ( buf [ : 0 ] , key [ : ] )
2019-05-22 21:16:55 +00:00
if len ( tmp ) == 0 {
// The TSID for the given metricID wasn't found in the cache.
return io . EOF
}
if & tmp [ 0 ] != & buf [ 0 ] || len ( tmp ) != len ( buf ) {
return fmt . Errorf ( "corrupted MetricID->TSID cache: unexpected size for metricID=%d value; got %d bytes; want %d bytes" , metricID , len ( tmp ) , len ( buf ) )
}
return nil
}
func ( db * indexDB ) putToMetricIDCache ( metricID uint64 , tsid * TSID ) {
buf := ( * [ unsafe . Sizeof ( * tsid ) ] byte ) ( unsafe . Pointer ( tsid ) )
key := ( * [ unsafe . Sizeof ( metricID ) ] byte ) ( unsafe . Pointer ( & metricID ) )
2021-06-11 09:42:26 +00:00
db . s . metricIDCache . Set ( key [ : ] , buf [ : ] )
2019-05-22 21:16:55 +00:00
}
func ( db * indexDB ) getMetricNameFromCache ( dst [ ] byte , metricID uint64 ) [ ] byte {
// There is no need in checking for deleted metricIDs here, since they
// must be checked by the caller.
key := ( * [ unsafe . Sizeof ( metricID ) ] byte ) ( unsafe . Pointer ( & metricID ) )
2021-06-11 09:42:26 +00:00
return db . s . metricNameCache . Get ( dst , key [ : ] )
2019-05-22 21:16:55 +00:00
}
func ( db * indexDB ) putMetricNameToCache ( metricID uint64 , metricName [ ] byte ) {
key := ( * [ unsafe . Sizeof ( metricID ) ] byte ) ( unsafe . Pointer ( & metricID ) )
2021-06-11 09:42:26 +00:00
db . s . metricNameCache . Set ( key [ : ] , metricName )
2019-05-22 21:16:55 +00:00
}
2019-11-06 11:39:48 +00:00
func marshalTagFiltersKey ( dst [ ] byte , tfss [ ] * TagFilters , tr TimeRange , versioned bool ) [ ] byte {
2019-06-25 10:08:56 +00:00
prefix := ^ uint64 ( 0 )
if versioned {
prefix = atomic . LoadUint64 ( & tagFiltersKeyGen )
}
2020-10-01 11:35:49 +00:00
// Round start and end times to per-day granularity according to per-day inverted index.
startDate := uint64 ( tr . MinTimestamp ) / msecPerDay
endDate := uint64 ( tr . MaxTimestamp ) / msecPerDay
2019-05-22 21:16:55 +00:00
dst = encoding . MarshalUint64 ( dst , prefix )
2020-10-01 11:35:49 +00:00
dst = encoding . MarshalUint64 ( dst , startDate )
dst = encoding . MarshalUint64 ( dst , endDate )
2019-05-22 21:16:55 +00:00
for _ , tfs := range tfss {
dst = append ( dst , 0 ) // separator between tfs groups.
for i := range tfs . tfs {
dst = tfs . tfs [ i ] . Marshal ( dst )
}
}
return dst
}
2021-07-06 08:01:51 +00:00
func invalidateTagFiltersCache ( ) {
2020-10-01 11:35:49 +00:00
// This function must be fast, since it is called each
// time new timeseries is added.
atomic . AddUint64 ( & tagFiltersKeyGen , 1 )
}
var tagFiltersKeyGen uint64
2019-05-22 21:16:55 +00:00
func marshalTSIDs ( dst [ ] byte , tsids [ ] TSID ) [ ] byte {
dst = encoding . MarshalUint64 ( dst , uint64 ( len ( tsids ) ) )
for i := range tsids {
dst = tsids [ i ] . Marshal ( dst )
}
return dst
}
func unmarshalTSIDs ( dst [ ] TSID , src [ ] byte ) ( [ ] TSID , error ) {
if len ( src ) < 8 {
return dst , fmt . Errorf ( "cannot unmarshal the number of tsids from %d bytes; require at least %d bytes" , len ( src ) , 8 )
}
n := encoding . UnmarshalUint64 ( src )
src = src [ 8 : ]
dstLen := len ( dst )
if nn := dstLen + int ( n ) - cap ( dst ) ; nn > 0 {
dst = append ( dst [ : cap ( dst ) ] , make ( [ ] TSID , nn ) ... )
}
dst = dst [ : dstLen + int ( n ) ]
for i := 0 ; i < int ( n ) ; i ++ {
tail , err := dst [ dstLen + i ] . Unmarshal ( src )
if err != nil {
2020-06-30 19:58:18 +00:00
return dst , fmt . Errorf ( "cannot unmarshal tsid #%d out of %d: %w" , i , n , err )
2019-05-22 21:16:55 +00:00
}
src = tail
}
if len ( src ) > 0 {
return dst , fmt . Errorf ( "non-zero tail left after unmarshaling %d tsids; len(tail)=%d" , n , len ( src ) )
}
return dst , nil
}
// getTSIDByNameNoCreate fills the dst with TSID for the given metricName.
//
// It returns io.EOF if the given mn isn't found locally.
func ( db * indexDB ) getTSIDByNameNoCreate ( dst * TSID , metricName [ ] byte ) error {
2020-07-23 17:42:57 +00:00
is := db . getIndexSearch ( noDeadline )
2019-05-22 21:16:55 +00:00
err := is . getTSIDByMetricName ( dst , metricName )
db . putIndexSearch ( is )
if err == nil {
return nil
}
if err != io . EOF {
2020-06-30 19:58:18 +00:00
return fmt . Errorf ( "cannot search TSID by MetricName %q: %w" , metricName , err )
2019-05-22 21:16:55 +00:00
}
// Do not search for the TSID in the external storage,
// since this function is already called by another indexDB instance.
// The TSID for the given mn wasn't found.
return io . EOF
}
type indexSearch struct {
db * indexDB
ts mergeset . TableSearch
kb bytesutil . ByteBuffer
2019-09-20 16:46:47 +00:00
mp tagToMetricIDsRowParser
2019-05-22 21:16:55 +00:00
2020-07-23 17:42:57 +00:00
// deadline in unix timestamp seconds for the given search.
deadline uint64
2019-05-22 21:16:55 +00:00
// tsidByNameMisses and tsidByNameSkips is used for a performance
// hack in GetOrCreateTSIDByName. See the comment there.
tsidByNameMisses int
tsidByNameSkips int
}
// GetOrCreateTSIDByName fills the dst with TSID for the given metricName.
func ( is * indexSearch ) GetOrCreateTSIDByName ( dst * TSID , metricName [ ] byte ) error {
// A hack: skip searching for the TSID after many serial misses.
// This should improve insertion performance for big batches
// of new time series.
if is . tsidByNameMisses < 100 {
err := is . getTSIDByMetricName ( dst , metricName )
if err == nil {
is . tsidByNameMisses = 0
return nil
}
if err != io . EOF {
2020-06-30 19:58:18 +00:00
return fmt . Errorf ( "cannot search TSID by MetricName %q: %w" , metricName , err )
2019-05-22 21:16:55 +00:00
}
is . tsidByNameMisses ++
} else {
is . tsidByNameSkips ++
if is . tsidByNameSkips > 10000 {
is . tsidByNameSkips = 0
is . tsidByNameMisses = 0
}
}
// TSID for the given name wasn't found. Create it.
// It is OK if duplicate TSID for mn is created by concurrent goroutines.
// Metric results will be merged by mn after TableSearch.
if err := is . db . createTSIDByName ( dst , metricName ) ; err != nil {
2020-06-30 19:58:18 +00:00
return fmt . Errorf ( "cannot create TSID by MetricName %q: %w" , metricName , err )
2019-05-22 21:16:55 +00:00
}
return nil
}
2020-07-23 17:42:57 +00:00
func ( db * indexDB ) getIndexSearch ( deadline uint64 ) * indexSearch {
2019-05-22 21:16:55 +00:00
v := db . indexSearchPool . Get ( )
if v == nil {
v = & indexSearch {
db : db ,
}
}
is := v . ( * indexSearch )
2021-02-08 22:43:19 +00:00
is . ts . Init ( db . tb )
2020-07-23 17:42:57 +00:00
is . deadline = deadline
2019-05-22 21:16:55 +00:00
return is
}
func ( db * indexDB ) putIndexSearch ( is * indexSearch ) {
is . ts . MustClose ( )
is . kb . Reset ( )
2019-09-20 16:46:47 +00:00
is . mp . Reset ( )
2020-07-23 17:42:57 +00:00
is . deadline = 0
2019-05-22 21:16:55 +00:00
// Do not reset tsidByNameMisses and tsidByNameSkips,
// since they are used in GetOrCreateTSIDByName across call boundaries.
db . indexSearchPool . Put ( is )
}
func ( db * indexDB ) createTSIDByName ( dst * TSID , metricName [ ] byte ) error {
mn := GetMetricName ( )
defer PutMetricName ( mn )
if err := mn . Unmarshal ( metricName ) ; err != nil {
2020-06-30 19:58:18 +00:00
return fmt . Errorf ( "cannot unmarshal metricName %q: %w" , metricName , err )
2019-05-22 21:16:55 +00:00
}
if err := db . generateTSID ( dst , metricName , mn ) ; err != nil {
2020-06-30 19:58:18 +00:00
return fmt . Errorf ( "cannot generate TSID: %w" , err )
2019-05-22 21:16:55 +00:00
}
if err := db . createIndexes ( dst , mn ) ; err != nil {
2020-06-30 19:58:18 +00:00
return fmt . Errorf ( "cannot create indexes: %w" , err )
2019-05-22 21:16:55 +00:00
}
2019-08-29 11:39:05 +00:00
// There is no need in invalidating tag cache, since it is invalidated
2021-07-06 08:01:51 +00:00
// on db.tb flush via invalidateTagFiltersCache flushCallback passed to OpenTable.
2019-11-08 17:57:57 +00:00
atomic . AddUint64 ( & db . newTimeseriesCreated , 1 )
2021-03-15 20:38:50 +00:00
if logNewSeries {
logger . Infof ( "new series created: %s" , mn . String ( ) )
}
2019-05-22 21:16:55 +00:00
return nil
}
2021-03-15 20:38:50 +00:00
// SetLogNewSeries updates new series logging.
//
// This function must be called before any calling any storage functions.
func SetLogNewSeries ( ok bool ) {
logNewSeries = ok
}
var logNewSeries = false
2019-05-22 21:16:55 +00:00
func ( db * indexDB ) generateTSID ( dst * TSID , metricName [ ] byte , mn * MetricName ) error {
// Search the TSID in the external storage.
// This is usually the db from the previous period.
var err error
if db . doExtDB ( func ( extDB * indexDB ) {
err = extDB . getTSIDByNameNoCreate ( dst , metricName )
} ) {
if err == nil {
// The TSID has been found in the external storage.
return nil
}
if err != io . EOF {
2020-06-30 19:58:18 +00:00
return fmt . Errorf ( "external search failed: %w" , err )
2019-05-22 21:16:55 +00:00
}
}
2019-09-20 16:46:47 +00:00
// The TSID wasn't found in the external storage.
2019-05-22 21:16:55 +00:00
// Generate it locally.
dst . MetricGroupID = xxhash . Sum64 ( mn . MetricGroup )
if len ( mn . Tags ) > 0 {
dst . JobID = uint32 ( xxhash . Sum64 ( mn . Tags [ 0 ] . Value ) )
}
if len ( mn . Tags ) > 1 {
dst . InstanceID = uint32 ( xxhash . Sum64 ( mn . Tags [ 1 ] . Value ) )
}
2020-05-14 11:08:39 +00:00
dst . MetricID = generateUniqueMetricID ( )
2019-05-22 21:16:55 +00:00
return nil
}
func ( db * indexDB ) createIndexes ( tsid * TSID , mn * MetricName ) error {
// The order of index items is important.
// It guarantees index consistency.
2021-02-09 22:44:54 +00:00
ii := getIndexItems ( )
defer putIndexItems ( ii )
2019-05-22 21:16:55 +00:00
// Create MetricName -> TSID index.
2021-02-09 22:44:54 +00:00
ii . B = append ( ii . B , nsPrefixMetricNameToTSID )
ii . B = mn . Marshal ( ii . B )
ii . B = append ( ii . B , kvSeparatorChar )
ii . B = tsid . Marshal ( ii . B )
ii . Next ( )
2019-05-22 21:16:55 +00:00
// Create MetricID -> MetricName index.
2021-02-09 22:44:54 +00:00
ii . B = marshalCommonPrefix ( ii . B , nsPrefixMetricIDToMetricName )
ii . B = encoding . MarshalUint64 ( ii . B , tsid . MetricID )
ii . B = mn . Marshal ( ii . B )
ii . Next ( )
2019-05-22 21:16:55 +00:00
// Create MetricID -> TSID index.
2021-02-09 22:44:54 +00:00
ii . B = marshalCommonPrefix ( ii . B , nsPrefixMetricIDToTSID )
ii . B = encoding . MarshalUint64 ( ii . B , tsid . MetricID )
ii . B = tsid . Marshal ( ii . B )
ii . Next ( )
2019-05-22 21:16:55 +00:00
2021-02-09 22:44:54 +00:00
prefix := kbPool . Get ( )
prefix . B = marshalCommonPrefix ( prefix . B [ : 0 ] , nsPrefixTagToMetricIDs )
ii . registerTagIndexes ( prefix . B , mn , tsid . MetricID )
kbPool . Put ( prefix )
return db . tb . AddItems ( ii . Items )
2019-05-22 21:16:55 +00:00
}
type indexItems struct {
B [ ] byte
Items [ ] [ ] byte
start int
}
func ( ii * indexItems ) reset ( ) {
ii . B = ii . B [ : 0 ]
ii . Items = ii . Items [ : 0 ]
ii . start = 0
}
func ( ii * indexItems ) Next ( ) {
ii . Items = append ( ii . Items , ii . B [ ii . start : ] )
ii . start = len ( ii . B )
}
func getIndexItems ( ) * indexItems {
v := indexItemsPool . Get ( )
if v == nil {
return & indexItems { }
}
return v . ( * indexItems )
}
func putIndexItems ( ii * indexItems ) {
ii . reset ( )
indexItemsPool . Put ( ii )
}
var indexItemsPool sync . Pool
2020-11-04 22:15:43 +00:00
// SearchTagKeysOnTimeRange returns all the tag keys on the given tr.
func ( db * indexDB ) SearchTagKeysOnTimeRange ( tr TimeRange , maxTagKeys int , deadline uint64 ) ( [ ] string , error ) {
tks := make ( map [ string ] struct { } )
is := db . getIndexSearch ( deadline )
err := is . searchTagKeysOnTimeRange ( tks , tr , maxTagKeys )
db . putIndexSearch ( is )
if err != nil {
return nil , err
}
ok := db . doExtDB ( func ( extDB * indexDB ) {
is := extDB . getIndexSearch ( deadline )
err = is . searchTagKeysOnTimeRange ( tks , tr , maxTagKeys )
extDB . putIndexSearch ( is )
} )
if ok && err != nil {
return nil , err
}
keys := make ( [ ] string , 0 , len ( tks ) )
for key := range tks {
// Do not skip empty keys, since they are converted to __name__
keys = append ( keys , key )
}
// Do not sort keys, since they must be sorted by vmselect.
return keys , nil
}
func ( is * indexSearch ) searchTagKeysOnTimeRange ( tks map [ string ] struct { } , tr TimeRange , maxTagKeys int ) error {
minDate := uint64 ( tr . MinTimestamp ) / msecPerDay
maxDate := uint64 ( tr . MaxTimestamp ) / msecPerDay
2021-04-07 10:31:57 +00:00
if minDate > maxDate || maxDate - minDate > maxDaysForPerDaySearch {
return is . searchTagKeys ( tks , maxTagKeys )
}
2020-11-04 22:15:43 +00:00
var mu sync . Mutex
var wg sync . WaitGroup
var errGlobal error
for date := minDate ; date <= maxDate ; date ++ {
wg . Add ( 1 )
go func ( date uint64 ) {
defer wg . Done ( )
tksLocal := make ( map [ string ] struct { } )
isLocal := is . db . getIndexSearch ( is . deadline )
err := isLocal . searchTagKeysOnDate ( tksLocal , date , maxTagKeys )
is . db . putIndexSearch ( isLocal )
mu . Lock ( )
defer mu . Unlock ( )
if errGlobal != nil {
return
}
if err != nil {
errGlobal = err
return
}
if len ( tks ) >= maxTagKeys {
return
}
for k := range tksLocal {
tks [ k ] = struct { } { }
}
} ( date )
}
wg . Wait ( )
return errGlobal
}
func ( is * indexSearch ) searchTagKeysOnDate ( tks map [ string ] struct { } , date uint64 , maxTagKeys int ) error {
ts := & is . ts
kb := & is . kb
mp := & is . mp
mp . Reset ( )
2021-06-15 11:56:51 +00:00
dmis := is . db . s . getDeletedMetricIDs ( )
2020-11-04 22:15:43 +00:00
loopsPaceLimiter := 0
kb . B = is . marshalCommonPrefix ( kb . B [ : 0 ] , nsPrefixDateTagToMetricIDs )
kb . B = encoding . MarshalUint64 ( kb . B , date )
prefix := kb . B
ts . Seek ( prefix )
for len ( tks ) < maxTagKeys && ts . NextItem ( ) {
if loopsPaceLimiter & paceLimiterFastIterationsMask == 0 {
if err := checkSearchDeadlineAndPace ( is . deadline ) ; err != nil {
return err
}
}
loopsPaceLimiter ++
item := ts . Item
if ! bytes . HasPrefix ( item , prefix ) {
break
}
if err := mp . Init ( item , nsPrefixDateTagToMetricIDs ) ; err != nil {
return err
}
if mp . IsDeletedTag ( dmis ) {
continue
}
2021-02-09 22:44:54 +00:00
key := mp . Tag . Key
if isArtificialTagKey ( key ) {
// Skip artificially created tag key.
continue
}
2020-11-04 22:15:43 +00:00
// Store tag key.
2021-02-09 22:44:54 +00:00
tks [ string ( key ) ] = struct { } { }
2020-11-04 22:15:43 +00:00
// Search for the next tag key.
// The last char in kb.B must be tagSeparatorChar.
// Just increment it in order to jump to the next tag key.
kb . B = is . marshalCommonPrefix ( kb . B [ : 0 ] , nsPrefixDateTagToMetricIDs )
kb . B = encoding . MarshalUint64 ( kb . B , date )
2021-02-09 22:44:54 +00:00
kb . B = marshalTagValue ( kb . B , key )
2020-11-04 22:15:43 +00:00
kb . B [ len ( kb . B ) - 1 ] ++
ts . Seek ( kb . B )
}
if err := ts . Error ( ) ; err != nil {
return fmt . Errorf ( "error during search for prefix %q: %w" , prefix , err )
}
return nil
}
2019-05-22 21:16:55 +00:00
// SearchTagKeys returns all the tag keys.
2020-07-23 17:42:57 +00:00
func ( db * indexDB ) SearchTagKeys ( maxTagKeys int , deadline uint64 ) ( [ ] string , error ) {
2019-05-22 21:16:55 +00:00
tks := make ( map [ string ] struct { } )
2020-07-23 17:42:57 +00:00
is := db . getIndexSearch ( deadline )
2019-05-22 21:16:55 +00:00
err := is . searchTagKeys ( tks , maxTagKeys )
db . putIndexSearch ( is )
if err != nil {
return nil , err
}
ok := db . doExtDB ( func ( extDB * indexDB ) {
2020-07-23 17:42:57 +00:00
is := extDB . getIndexSearch ( deadline )
2019-05-22 21:16:55 +00:00
err = is . searchTagKeys ( tks , maxTagKeys )
extDB . putIndexSearch ( is )
} )
if ok && err != nil {
return nil , err
}
keys := make ( [ ] string , 0 , len ( tks ) )
for key := range tks {
2020-07-02 20:13:13 +00:00
// Do not skip empty keys, since they are converted to __name__
2019-05-22 21:16:55 +00:00
keys = append ( keys , key )
}
// Do not sort keys, since they must be sorted by vmselect.
return keys , nil
}
func ( is * indexSearch ) searchTagKeys ( tks map [ string ] struct { } , maxTagKeys int ) error {
ts := & is . ts
kb := & is . kb
2019-09-20 16:46:47 +00:00
mp := & is . mp
mp . Reset ( )
2021-06-15 11:56:51 +00:00
dmis := is . db . s . getDeletedMetricIDs ( )
2020-07-23 16:21:49 +00:00
loopsPaceLimiter := 0
2020-07-23 21:31:09 +00:00
kb . B = is . marshalCommonPrefix ( kb . B [ : 0 ] , nsPrefixTagToMetricIDs )
2019-09-20 16:46:47 +00:00
prefix := kb . B
ts . Seek ( prefix )
2019-05-22 21:16:55 +00:00
for len ( tks ) < maxTagKeys && ts . NextItem ( ) {
2020-08-07 05:37:33 +00:00
if loopsPaceLimiter & paceLimiterFastIterationsMask == 0 {
2020-07-23 17:42:57 +00:00
if err := checkSearchDeadlineAndPace ( is . deadline ) ; err != nil {
return err
}
2020-07-23 16:21:49 +00:00
}
loopsPaceLimiter ++
2019-05-22 21:16:55 +00:00
item := ts . Item
2019-09-20 16:46:47 +00:00
if ! bytes . HasPrefix ( item , prefix ) {
2019-05-22 21:16:55 +00:00
break
}
2019-11-09 21:17:42 +00:00
if err := mp . Init ( item , nsPrefixTagToMetricIDs ) ; err != nil {
2019-09-20 16:46:47 +00:00
return err
2019-05-22 21:16:55 +00:00
}
2019-09-20 16:46:47 +00:00
if mp . IsDeletedTag ( dmis ) {
2019-05-22 21:16:55 +00:00
continue
}
2021-02-09 22:44:54 +00:00
key := mp . Tag . Key
if isArtificialTagKey ( key ) {
// Skip artificailly created tag keys.
continue
}
2019-05-22 21:16:55 +00:00
// Store tag key.
2021-02-09 22:44:54 +00:00
tks [ string ( key ) ] = struct { } { }
2019-05-22 21:16:55 +00:00
// Search for the next tag key.
2019-09-20 16:46:47 +00:00
// The last char in kb.B must be tagSeparatorChar.
// Just increment it in order to jump to the next tag key.
2020-07-23 21:31:09 +00:00
kb . B = is . marshalCommonPrefix ( kb . B [ : 0 ] , nsPrefixTagToMetricIDs )
2021-02-09 22:44:54 +00:00
kb . B = marshalTagValue ( kb . B , key )
2019-05-22 21:16:55 +00:00
kb . B [ len ( kb . B ) - 1 ] ++
ts . Seek ( kb . B )
}
if err := ts . Error ( ) ; err != nil {
2020-06-30 19:58:18 +00:00
return fmt . Errorf ( "error during search for prefix %q: %w" , prefix , err )
2019-05-22 21:16:55 +00:00
}
return nil
}
2020-11-04 22:15:43 +00:00
// SearchTagValuesOnTimeRange returns all the tag values for the given tagKey on tr.
func ( db * indexDB ) SearchTagValuesOnTimeRange ( tagKey [ ] byte , tr TimeRange , maxTagValues int , deadline uint64 ) ( [ ] string , error ) {
tvs := make ( map [ string ] struct { } )
is := db . getIndexSearch ( deadline )
err := is . searchTagValuesOnTimeRange ( tvs , tagKey , tr , maxTagValues )
db . putIndexSearch ( is )
if err != nil {
return nil , err
}
ok := db . doExtDB ( func ( extDB * indexDB ) {
is := extDB . getIndexSearch ( deadline )
err = is . searchTagValuesOnTimeRange ( tvs , tagKey , tr , maxTagValues )
extDB . putIndexSearch ( is )
} )
if ok && err != nil {
return nil , err
}
tagValues := make ( [ ] string , 0 , len ( tvs ) )
for tv := range tvs {
if len ( tv ) == 0 {
// Skip empty values, since they have no any meaning.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/600
continue
}
tagValues = append ( tagValues , tv )
}
// Do not sort tagValues, since they must be sorted by vmselect.
return tagValues , nil
}
func ( is * indexSearch ) searchTagValuesOnTimeRange ( tvs map [ string ] struct { } , tagKey [ ] byte , tr TimeRange , maxTagValues int ) error {
minDate := uint64 ( tr . MinTimestamp ) / msecPerDay
maxDate := uint64 ( tr . MaxTimestamp ) / msecPerDay
2021-04-07 10:31:57 +00:00
if minDate > maxDate || maxDate - minDate > maxDaysForPerDaySearch {
return is . searchTagValues ( tvs , tagKey , maxTagValues )
}
2020-11-04 22:15:43 +00:00
var mu sync . Mutex
var wg sync . WaitGroup
var errGlobal error
for date := minDate ; date <= maxDate ; date ++ {
wg . Add ( 1 )
go func ( date uint64 ) {
defer wg . Done ( )
tvsLocal := make ( map [ string ] struct { } )
isLocal := is . db . getIndexSearch ( is . deadline )
err := isLocal . searchTagValuesOnDate ( tvsLocal , tagKey , date , maxTagValues )
is . db . putIndexSearch ( isLocal )
mu . Lock ( )
defer mu . Unlock ( )
if errGlobal != nil {
return
}
if err != nil {
errGlobal = err
return
}
if len ( tvs ) >= maxTagValues {
return
}
for v := range tvsLocal {
tvs [ v ] = struct { } { }
}
} ( date )
}
wg . Wait ( )
return errGlobal
}
func ( is * indexSearch ) searchTagValuesOnDate ( tvs map [ string ] struct { } , tagKey [ ] byte , date uint64 , maxTagValues int ) error {
ts := & is . ts
kb := & is . kb
mp := & is . mp
mp . Reset ( )
2021-06-15 11:56:51 +00:00
dmis := is . db . s . getDeletedMetricIDs ( )
2020-11-04 22:15:43 +00:00
loopsPaceLimiter := 0
kb . B = is . marshalCommonPrefix ( kb . B [ : 0 ] , nsPrefixDateTagToMetricIDs )
kb . B = encoding . MarshalUint64 ( kb . B , date )
kb . B = marshalTagValue ( kb . B , tagKey )
prefix := kb . B
ts . Seek ( prefix )
for len ( tvs ) < maxTagValues && ts . NextItem ( ) {
if loopsPaceLimiter & paceLimiterFastIterationsMask == 0 {
if err := checkSearchDeadlineAndPace ( is . deadline ) ; err != nil {
return err
}
}
loopsPaceLimiter ++
item := ts . Item
if ! bytes . HasPrefix ( item , prefix ) {
break
}
if err := mp . Init ( item , nsPrefixDateTagToMetricIDs ) ; err != nil {
return err
}
if mp . IsDeletedTag ( dmis ) {
continue
}
// Store tag value
tvs [ string ( mp . Tag . Value ) ] = struct { } { }
if mp . MetricIDsLen ( ) < maxMetricIDsPerRow / 2 {
// There is no need in searching for the next tag value,
// since it is likely it is located in the next row,
// because the current row contains incomplete metricIDs set.
continue
}
// Search for the next tag value.
// The last char in kb.B must be tagSeparatorChar.
// Just increment it in order to jump to the next tag value.
kb . B = is . marshalCommonPrefix ( kb . B [ : 0 ] , nsPrefixDateTagToMetricIDs )
kb . B = encoding . MarshalUint64 ( kb . B , date )
kb . B = marshalTagValue ( kb . B , mp . Tag . Key )
kb . B = marshalTagValue ( kb . B , mp . Tag . Value )
kb . B [ len ( kb . B ) - 1 ] ++
ts . Seek ( kb . B )
}
if err := ts . Error ( ) ; err != nil {
return fmt . Errorf ( "error when searching for tag name prefix %q: %w" , prefix , err )
}
return nil
}
2019-05-22 21:16:55 +00:00
// SearchTagValues returns all the tag values for the given tagKey
2020-07-23 17:42:57 +00:00
func ( db * indexDB ) SearchTagValues ( tagKey [ ] byte , maxTagValues int , deadline uint64 ) ( [ ] string , error ) {
2019-05-22 21:16:55 +00:00
tvs := make ( map [ string ] struct { } )
2020-07-23 17:42:57 +00:00
is := db . getIndexSearch ( deadline )
2019-09-20 16:46:47 +00:00
err := is . searchTagValues ( tvs , tagKey , maxTagValues )
2019-05-22 21:16:55 +00:00
db . putIndexSearch ( is )
if err != nil {
return nil , err
}
ok := db . doExtDB ( func ( extDB * indexDB ) {
2020-07-23 17:42:57 +00:00
is := extDB . getIndexSearch ( deadline )
2019-09-20 16:46:47 +00:00
err = is . searchTagValues ( tvs , tagKey , maxTagValues )
2019-05-22 21:16:55 +00:00
extDB . putIndexSearch ( is )
} )
if ok && err != nil {
return nil , err
}
tagValues := make ( [ ] string , 0 , len ( tvs ) )
for tv := range tvs {
2020-07-02 20:13:13 +00:00
if len ( tv ) == 0 {
// Skip empty values, since they have no any meaning.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/600
continue
}
2019-05-22 21:16:55 +00:00
tagValues = append ( tagValues , tv )
}
// Do not sort tagValues, since they must be sorted by vmselect.
return tagValues , nil
}
2019-09-20 16:46:47 +00:00
func ( is * indexSearch ) searchTagValues ( tvs map [ string ] struct { } , tagKey [ ] byte , maxTagValues int ) error {
2019-05-22 21:16:55 +00:00
ts := & is . ts
kb := & is . kb
2019-09-20 16:46:47 +00:00
mp := & is . mp
mp . Reset ( )
2021-06-15 11:56:51 +00:00
dmis := is . db . s . getDeletedMetricIDs ( )
2020-07-23 16:21:49 +00:00
loopsPaceLimiter := 0
2020-07-23 21:31:09 +00:00
kb . B = is . marshalCommonPrefix ( kb . B [ : 0 ] , nsPrefixTagToMetricIDs )
2019-09-20 16:46:47 +00:00
kb . B = marshalTagValue ( kb . B , tagKey )
prefix := kb . B
2019-05-22 21:16:55 +00:00
ts . Seek ( prefix )
for len ( tvs ) < maxTagValues && ts . NextItem ( ) {
2020-08-07 05:37:33 +00:00
if loopsPaceLimiter & paceLimiterFastIterationsMask == 0 {
2020-07-23 17:42:57 +00:00
if err := checkSearchDeadlineAndPace ( is . deadline ) ; err != nil {
return err
}
2020-07-23 16:21:49 +00:00
}
loopsPaceLimiter ++
2019-09-20 16:46:47 +00:00
item := ts . Item
if ! bytes . HasPrefix ( item , prefix ) {
2019-05-22 21:16:55 +00:00
break
}
2019-11-09 21:17:42 +00:00
if err := mp . Init ( item , nsPrefixTagToMetricIDs ) ; err != nil {
2019-09-20 16:46:47 +00:00
return err
2019-05-22 21:16:55 +00:00
}
2019-09-20 16:46:47 +00:00
if mp . IsDeletedTag ( dmis ) {
continue
2019-05-22 21:16:55 +00:00
}
// Store tag value
2019-09-20 16:46:47 +00:00
tvs [ string ( mp . Tag . Value ) ] = struct { } { }
2019-05-22 21:16:55 +00:00
2019-12-02 22:29:44 +00:00
if mp . MetricIDsLen ( ) < maxMetricIDsPerRow / 2 {
// There is no need in searching for the next tag value,
// since it is likely it is located in the next row,
// because the current row contains incomplete metricIDs set.
continue
}
2019-05-22 21:16:55 +00:00
// Search for the next tag value.
2019-09-20 16:46:47 +00:00
// The last char in kb.B must be tagSeparatorChar.
2019-12-02 22:29:44 +00:00
// Just increment it in order to jump to the next tag value.
2020-07-23 21:31:09 +00:00
kb . B = is . marshalCommonPrefix ( kb . B [ : 0 ] , nsPrefixTagToMetricIDs )
2019-09-20 16:46:47 +00:00
kb . B = marshalTagValue ( kb . B , mp . Tag . Key )
kb . B = marshalTagValue ( kb . B , mp . Tag . Value )
2019-05-22 21:16:55 +00:00
kb . B [ len ( kb . B ) - 1 ] ++
ts . Seek ( kb . B )
}
if err := ts . Error ( ) ; err != nil {
2020-06-30 19:58:18 +00:00
return fmt . Errorf ( "error when searching for tag name prefix %q: %w" , prefix , err )
2019-05-22 21:16:55 +00:00
}
return nil
}
2020-09-10 21:28:19 +00:00
// SearchTagValueSuffixes returns all the tag value suffixes for the given tagKey and tagValuePrefix on the given tr.
//
// This allows implementing https://graphite-api.readthedocs.io/en/latest/api.html#metrics-find or similar APIs.
2021-02-02 22:24:05 +00:00
//
// If it returns maxTagValueSuffixes suffixes, then it is likely more than maxTagValueSuffixes suffixes is found.
2020-09-10 21:28:19 +00:00
func ( db * indexDB ) SearchTagValueSuffixes ( tr TimeRange , tagKey , tagValuePrefix [ ] byte , delimiter byte , maxTagValueSuffixes int , deadline uint64 ) ( [ ] string , error ) {
// TODO: cache results?
tvss := make ( map [ string ] struct { } )
is := db . getIndexSearch ( deadline )
err := is . searchTagValueSuffixesForTimeRange ( tvss , tr , tagKey , tagValuePrefix , delimiter , maxTagValueSuffixes )
db . putIndexSearch ( is )
if err != nil {
return nil , err
}
2021-02-02 22:24:05 +00:00
if len ( tvss ) < maxTagValueSuffixes {
ok := db . doExtDB ( func ( extDB * indexDB ) {
is := extDB . getIndexSearch ( deadline )
err = is . searchTagValueSuffixesForTimeRange ( tvss , tr , tagKey , tagValuePrefix , delimiter , maxTagValueSuffixes )
extDB . putIndexSearch ( is )
} )
if ok && err != nil {
return nil , err
}
2020-09-10 21:28:19 +00:00
}
suffixes := make ( [ ] string , 0 , len ( tvss ) )
for suffix := range tvss {
// Do not skip empty suffixes, since they may represent leaf tag values.
suffixes = append ( suffixes , suffix )
}
2021-02-02 22:24:05 +00:00
if len ( suffixes ) > maxTagValueSuffixes {
suffixes = suffixes [ : maxTagValueSuffixes ]
}
2020-09-10 21:28:19 +00:00
// Do not sort suffixes, since they must be sorted by vmselect.
return suffixes , nil
}
func ( is * indexSearch ) searchTagValueSuffixesForTimeRange ( tvss map [ string ] struct { } , tr TimeRange , tagKey , tagValuePrefix [ ] byte , delimiter byte , maxTagValueSuffixes int ) error {
minDate := uint64 ( tr . MinTimestamp ) / msecPerDay
maxDate := uint64 ( tr . MaxTimestamp ) / msecPerDay
2021-04-07 10:31:57 +00:00
if minDate > maxDate || maxDate - minDate > maxDaysForPerDaySearch {
2020-09-10 21:28:19 +00:00
return is . searchTagValueSuffixesAll ( tvss , tagKey , tagValuePrefix , delimiter , maxTagValueSuffixes )
}
// Query over multiple days in parallel.
var wg sync . WaitGroup
var errGlobal error
var mu sync . Mutex // protects tvss + errGlobal from concurrent access below.
for minDate <= maxDate {
wg . Add ( 1 )
go func ( date uint64 ) {
defer wg . Done ( )
tvssLocal := make ( map [ string ] struct { } )
isLocal := is . db . getIndexSearch ( is . deadline )
err := isLocal . searchTagValueSuffixesForDate ( tvssLocal , date , tagKey , tagValuePrefix , delimiter , maxTagValueSuffixes )
2021-02-16 19:22:10 +00:00
is . db . putIndexSearch ( isLocal )
2020-09-10 21:28:19 +00:00
mu . Lock ( )
defer mu . Unlock ( )
if errGlobal != nil {
return
}
if err != nil {
errGlobal = err
return
}
2021-02-02 22:24:05 +00:00
if len ( tvss ) > maxTagValueSuffixes {
return
}
2020-09-10 21:28:19 +00:00
for k := range tvssLocal {
tvss [ k ] = struct { } { }
}
} ( minDate )
minDate ++
}
wg . Wait ( )
return errGlobal
}
func ( is * indexSearch ) searchTagValueSuffixesAll ( tvss map [ string ] struct { } , tagKey , tagValuePrefix [ ] byte , delimiter byte , maxTagValueSuffixes int ) error {
kb := & is . kb
nsPrefix := byte ( nsPrefixTagToMetricIDs )
kb . B = is . marshalCommonPrefix ( kb . B [ : 0 ] , nsPrefix )
kb . B = marshalTagValue ( kb . B , tagKey )
kb . B = marshalTagValue ( kb . B , tagValuePrefix )
kb . B = kb . B [ : len ( kb . B ) - 1 ] // remove tagSeparatorChar from the end of kb.B
prefix := append ( [ ] byte ( nil ) , kb . B ... )
2021-02-02 22:24:05 +00:00
return is . searchTagValueSuffixesForPrefix ( tvss , nsPrefix , prefix , len ( tagValuePrefix ) , delimiter , maxTagValueSuffixes )
2020-09-10 21:28:19 +00:00
}
func ( is * indexSearch ) searchTagValueSuffixesForDate ( tvss map [ string ] struct { } , date uint64 , tagKey , tagValuePrefix [ ] byte , delimiter byte , maxTagValueSuffixes int ) error {
nsPrefix := byte ( nsPrefixDateTagToMetricIDs )
kb := & is . kb
kb . B = is . marshalCommonPrefix ( kb . B [ : 0 ] , nsPrefix )
kb . B = encoding . MarshalUint64 ( kb . B , date )
kb . B = marshalTagValue ( kb . B , tagKey )
kb . B = marshalTagValue ( kb . B , tagValuePrefix )
kb . B = kb . B [ : len ( kb . B ) - 1 ] // remove tagSeparatorChar from the end of kb.B
prefix := append ( [ ] byte ( nil ) , kb . B ... )
2021-02-02 22:24:05 +00:00
return is . searchTagValueSuffixesForPrefix ( tvss , nsPrefix , prefix , len ( tagValuePrefix ) , delimiter , maxTagValueSuffixes )
2020-09-10 21:28:19 +00:00
}
2021-02-02 22:24:05 +00:00
func ( is * indexSearch ) searchTagValueSuffixesForPrefix ( tvss map [ string ] struct { } , nsPrefix byte , prefix [ ] byte , tagValuePrefixLen int , delimiter byte , maxTagValueSuffixes int ) error {
2020-09-10 21:28:19 +00:00
kb := & is . kb
ts := & is . ts
mp := & is . mp
mp . Reset ( )
2021-06-15 11:56:51 +00:00
dmis := is . db . s . getDeletedMetricIDs ( )
2020-09-10 21:28:19 +00:00
loopsPaceLimiter := 0
ts . Seek ( prefix )
for len ( tvss ) < maxTagValueSuffixes && ts . NextItem ( ) {
if loopsPaceLimiter & paceLimiterFastIterationsMask == 0 {
if err := checkSearchDeadlineAndPace ( is . deadline ) ; err != nil {
return err
}
}
loopsPaceLimiter ++
item := ts . Item
if ! bytes . HasPrefix ( item , prefix ) {
break
}
if err := mp . Init ( item , nsPrefix ) ; err != nil {
return err
}
if mp . IsDeletedTag ( dmis ) {
continue
}
tagValue := mp . Tag . Value
2021-02-02 22:24:05 +00:00
suffix := tagValue [ tagValuePrefixLen : ]
2020-09-10 21:28:19 +00:00
n := bytes . IndexByte ( suffix , delimiter )
if n < 0 {
// Found leaf tag value that doesn't have delimiters after the given tagValuePrefix.
tvss [ string ( suffix ) ] = struct { } { }
continue
}
// Found non-leaf tag value. Extract suffix that end with the given delimiter.
suffix = suffix [ : n + 1 ]
tvss [ string ( suffix ) ] = struct { } { }
if suffix [ len ( suffix ) - 1 ] == 255 {
continue
}
// Search for the next suffix
suffix [ len ( suffix ) - 1 ] ++
kb . B = append ( kb . B [ : 0 ] , prefix ... )
kb . B = marshalTagValue ( kb . B , suffix )
kb . B = kb . B [ : len ( kb . B ) - 1 ] // remove tagSeparatorChar
ts . Seek ( kb . B )
}
if err := ts . Error ( ) ; err != nil {
return fmt . Errorf ( "error when searching for tag value sufixes for prefix %q: %w" , prefix , err )
}
return nil
}
2019-05-22 21:16:55 +00:00
// GetSeriesCount returns the approximate number of unique timeseries in the db.
//
// It includes the deleted series too and may count the same series
// up to two times - in db and extDB.
2020-07-23 17:42:57 +00:00
func ( db * indexDB ) GetSeriesCount ( deadline uint64 ) ( uint64 , error ) {
is := db . getIndexSearch ( deadline )
2019-06-10 09:27:44 +00:00
n , err := is . getSeriesCount ( )
2019-05-22 21:16:55 +00:00
db . putIndexSearch ( is )
if err != nil {
return 0 , err
}
var nExt uint64
ok := db . doExtDB ( func ( extDB * indexDB ) {
2020-07-23 17:42:57 +00:00
is := extDB . getIndexSearch ( deadline )
2019-06-10 09:27:44 +00:00
nExt , err = is . getSeriesCount ( )
2019-05-22 21:16:55 +00:00
extDB . putIndexSearch ( is )
} )
if ok && err != nil {
2020-06-30 19:58:18 +00:00
return 0 , fmt . Errorf ( "error when searching in extDB: %w" , err )
2019-05-22 21:16:55 +00:00
}
return n + nExt , nil
}
2020-04-22 16:57:36 +00:00
func ( is * indexSearch ) getSeriesCount ( ) ( uint64 , error ) {
ts := & is . ts
kb := & is . kb
mp := & is . mp
2020-07-23 16:21:49 +00:00
loopsPaceLimiter := 0
2020-04-22 16:57:36 +00:00
var metricIDsLen uint64
// Extract the number of series from ((__name__=value): metricIDs) rows
2020-07-23 21:31:09 +00:00
kb . B = is . marshalCommonPrefix ( kb . B [ : 0 ] , nsPrefixTagToMetricIDs )
2020-04-22 16:57:36 +00:00
kb . B = marshalTagValue ( kb . B , nil )
ts . Seek ( kb . B )
for ts . NextItem ( ) {
2020-08-07 05:37:33 +00:00
if loopsPaceLimiter & paceLimiterFastIterationsMask == 0 {
2020-07-23 17:42:57 +00:00
if err := checkSearchDeadlineAndPace ( is . deadline ) ; err != nil {
return 0 , err
}
2020-07-23 16:21:49 +00:00
}
loopsPaceLimiter ++
2020-04-22 16:57:36 +00:00
item := ts . Item
if ! bytes . HasPrefix ( item , kb . B ) {
break
}
tail := item [ len ( kb . B ) : ]
n := bytes . IndexByte ( tail , tagSeparatorChar )
if n < 0 {
return 0 , fmt . Errorf ( "invalid tag->metricIDs line %q: cannot find tagSeparatorChar %d" , item , tagSeparatorChar )
}
tail = tail [ n + 1 : ]
if err := mp . InitOnlyTail ( item , tail ) ; err != nil {
return 0 , err
}
// Take into account deleted timeseries too.
// It is OK if series can be counted multiple times in rare cases -
// the returned number is an estimation.
metricIDsLen += uint64 ( mp . MetricIDsLen ( ) )
}
if err := ts . Error ( ) ; err != nil {
2020-06-30 19:58:18 +00:00
return 0 , fmt . Errorf ( "error when counting unique timeseries: %w" , err )
2020-04-22 16:57:36 +00:00
}
return metricIDsLen , nil
}
2021-05-12 13:32:48 +00:00
// GetTSDBStatusWithFiltersForDate returns topN entries for tsdb status for the given tfss and the given date.
func ( db * indexDB ) GetTSDBStatusWithFiltersForDate ( tfss [ ] * TagFilters , date uint64 , topN int , deadline uint64 ) ( * TSDBStatus , error ) {
2021-05-12 12:18:45 +00:00
is := db . getIndexSearch ( deadline )
2021-05-12 14:56:50 +00:00
status , err := is . getTSDBStatusWithFiltersForDate ( tfss , date , topN )
2021-05-12 12:18:45 +00:00
db . putIndexSearch ( is )
if err != nil {
return nil , err
}
if status . hasEntries ( ) {
return status , nil
}
ok := db . doExtDB ( func ( extDB * indexDB ) {
is := extDB . getIndexSearch ( deadline )
2021-05-12 14:56:50 +00:00
status , err = is . getTSDBStatusWithFiltersForDate ( tfss , date , topN )
2021-05-12 12:18:45 +00:00
extDB . putIndexSearch ( is )
} )
if ok && err != nil {
return nil , fmt . Errorf ( "error when obtaining TSDB status from extDB: %w" , err )
}
return status , nil
}
2021-05-12 13:32:48 +00:00
// getTSDBStatusWithFiltersForDate returns topN entries for tsdb status for the given tfss and the given date.
2021-05-12 14:56:50 +00:00
func ( is * indexSearch ) getTSDBStatusWithFiltersForDate ( tfss [ ] * TagFilters , date uint64 , topN int ) ( * TSDBStatus , error ) {
var filter * uint64set . Set
if len ( tfss ) > 0 {
tr := TimeRange {
MinTimestamp : int64 ( date ) * msecPerDay ,
MaxTimestamp : int64 ( date + 1 ) * msecPerDay ,
}
metricIDs , err := is . searchMetricIDsInternal ( tfss , tr , 2e9 )
if err != nil {
return nil , err
}
if metricIDs . Len ( ) == 0 {
// Nothing found.
return & TSDBStatus { } , nil
}
filter = metricIDs
2021-05-12 13:32:48 +00:00
}
2021-05-12 12:18:45 +00:00
2021-05-12 13:32:48 +00:00
ts := & is . ts
kb := & is . kb
mp := & is . mp
2021-05-12 12:18:45 +00:00
thLabelValueCountByLabelName := newTopHeap ( topN )
thSeriesCountByLabelValuePair := newTopHeap ( topN )
thSeriesCountByMetricName := newTopHeap ( topN )
2021-05-12 13:32:48 +00:00
var tmp , labelName , labelNameValue [ ] byte
var labelValueCountByLabelName , seriesCountByLabelValuePair uint64
nameEqualBytes := [ ] byte ( "__name__=" )
2021-05-12 12:18:45 +00:00
2021-05-12 13:32:48 +00:00
loopsPaceLimiter := 0
kb . B = is . marshalCommonPrefix ( kb . B [ : 0 ] , nsPrefixDateTagToMetricIDs )
kb . B = encoding . MarshalUint64 ( kb . B , date )
prefix := kb . B
ts . Seek ( prefix )
for ts . NextItem ( ) {
if loopsPaceLimiter & paceLimiterFastIterationsMask == 0 {
if err := checkSearchDeadlineAndPace ( is . deadline ) ; err != nil {
2021-05-12 12:18:45 +00:00
return nil , err
}
}
2021-05-12 13:32:48 +00:00
loopsPaceLimiter ++
item := ts . Item
if ! bytes . HasPrefix ( item , prefix ) {
break
}
matchingSeriesCount := 0
2021-05-12 14:56:50 +00:00
if filter != nil {
if err := mp . Init ( item , nsPrefixDateTagToMetricIDs ) ; err != nil {
return nil , err
}
mp . ParseMetricIDs ( )
for _ , metricID := range mp . MetricIDs {
if filter . Has ( metricID ) {
matchingSeriesCount ++
}
}
if matchingSeriesCount == 0 {
// Skip rows without matching metricIDs.
continue
2021-05-12 13:32:48 +00:00
}
2021-05-12 12:18:45 +00:00
}
2021-05-12 13:32:48 +00:00
tail := item [ len ( prefix ) : ]
var err error
tail , tmp , err = unmarshalTagValue ( tmp [ : 0 ] , tail )
2021-05-12 12:18:45 +00:00
if err != nil {
2021-05-12 13:32:48 +00:00
return nil , fmt . Errorf ( "cannot unmarshal tag key from line %q: %w" , item , err )
2021-05-12 12:18:45 +00:00
}
2021-05-12 13:32:48 +00:00
if isArtificialTagKey ( tmp ) {
// Skip artificially created tag keys.
continue
}
if len ( tmp ) == 0 {
tmp = append ( tmp , "__name__" ... )
}
if ! bytes . Equal ( tmp , labelName ) {
thLabelValueCountByLabelName . pushIfNonEmpty ( labelName , labelValueCountByLabelName )
labelValueCountByLabelName = 0
labelName = append ( labelName [ : 0 ] , tmp ... )
}
tmp = append ( tmp , '=' )
tail , tmp , err = unmarshalTagValue ( tmp , tail )
if err != nil {
return nil , fmt . Errorf ( "cannot unmarshal tag value from line %q: %w" , item , err )
}
if ! bytes . Equal ( tmp , labelNameValue ) {
thSeriesCountByLabelValuePair . pushIfNonEmpty ( labelNameValue , seriesCountByLabelValuePair )
if bytes . HasPrefix ( labelNameValue , nameEqualBytes ) {
thSeriesCountByMetricName . pushIfNonEmpty ( labelNameValue [ len ( nameEqualBytes ) : ] , seriesCountByLabelValuePair )
2021-05-12 12:18:45 +00:00
}
2021-05-12 13:32:48 +00:00
seriesCountByLabelValuePair = 0
labelValueCountByLabelName ++
labelNameValue = append ( labelNameValue [ : 0 ] , tmp ... )
}
2021-05-12 14:56:50 +00:00
if filter == nil {
if err := mp . InitOnlyTail ( item , tail ) ; err != nil {
return nil , err
}
matchingSeriesCount = mp . MetricIDsLen ( )
2021-05-12 12:18:45 +00:00
}
2021-05-12 13:32:48 +00:00
// Take into account deleted timeseries too.
// It is OK if series can be counted multiple times in rare cases -
// the returned number is an estimation.
seriesCountByLabelValuePair += uint64 ( matchingSeriesCount )
2021-05-12 12:18:45 +00:00
}
2021-05-12 13:32:48 +00:00
if err := ts . Error ( ) ; err != nil {
return nil , fmt . Errorf ( "error when counting time series by metric names: %w" , err )
2021-05-12 12:18:45 +00:00
}
2021-05-12 13:32:48 +00:00
thLabelValueCountByLabelName . pushIfNonEmpty ( labelName , labelValueCountByLabelName )
thSeriesCountByLabelValuePair . pushIfNonEmpty ( labelNameValue , seriesCountByLabelValuePair )
if bytes . HasPrefix ( labelNameValue , nameEqualBytes ) {
thSeriesCountByMetricName . pushIfNonEmpty ( labelNameValue [ len ( nameEqualBytes ) : ] , seriesCountByLabelValuePair )
2021-05-12 12:18:45 +00:00
}
2021-05-12 13:32:48 +00:00
status := & TSDBStatus {
2021-05-12 12:18:45 +00:00
SeriesCountByMetricName : thSeriesCountByMetricName . getSortedResult ( ) ,
LabelValueCountByLabelName : thLabelValueCountByLabelName . getSortedResult ( ) ,
SeriesCountByLabelValuePair : thSeriesCountByLabelValuePair . getSortedResult ( ) ,
}
2021-05-12 13:32:48 +00:00
return status , nil
2021-05-12 12:18:45 +00:00
}
2020-04-22 16:57:36 +00:00
// TSDBStatus contains TSDB status data for /api/v1/status/tsdb.
//
// See https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats
type TSDBStatus struct {
SeriesCountByMetricName [ ] TopHeapEntry
LabelValueCountByLabelName [ ] TopHeapEntry
SeriesCountByLabelValuePair [ ] TopHeapEntry
}
func ( status * TSDBStatus ) hasEntries ( ) bool {
return len ( status . SeriesCountByLabelValuePair ) > 0
}
// topHeap maintains a heap of topHeapEntries with the maximum TopHeapEntry.n values.
type topHeap struct {
topN int
a [ ] TopHeapEntry
}
// newTopHeap returns topHeap for topN items.
func newTopHeap ( topN int ) * topHeap {
return & topHeap {
topN : topN ,
}
}
// TopHeapEntry represents an entry from `top heap` used in stats.
type TopHeapEntry struct {
Name string
Count uint64
}
func ( th * topHeap ) pushIfNonEmpty ( name [ ] byte , count uint64 ) {
if count == 0 {
return
}
if len ( th . a ) < th . topN {
th . a = append ( th . a , TopHeapEntry {
Name : string ( name ) ,
Count : count ,
} )
heap . Fix ( th , len ( th . a ) - 1 )
return
}
if count <= th . a [ 0 ] . Count {
return
}
th . a [ 0 ] = TopHeapEntry {
Name : string ( name ) ,
Count : count ,
}
heap . Fix ( th , 0 )
}
func ( th * topHeap ) getSortedResult ( ) [ ] TopHeapEntry {
result := append ( [ ] TopHeapEntry { } , th . a ... )
sort . Slice ( result , func ( i , j int ) bool {
a , b := result [ i ] , result [ j ]
if a . Count != b . Count {
return a . Count > b . Count
}
return a . Name < b . Name
} )
return result
}
// heap.Interface implementation for topHeap.
func ( th * topHeap ) Len ( ) int {
return len ( th . a )
}
func ( th * topHeap ) Less ( i , j int ) bool {
a := th . a
return a [ i ] . Count < a [ j ] . Count
}
func ( th * topHeap ) Swap ( i , j int ) {
a := th . a
a [ j ] , a [ i ] = a [ i ] , a [ j ]
}
func ( th * topHeap ) Push ( x interface { } ) {
panic ( fmt . Errorf ( "BUG: Push shouldn't be called" ) )
}
func ( th * topHeap ) Pop ( ) interface { } {
panic ( fmt . Errorf ( "BUG: Pop shouldn't be called" ) )
}
2021-03-22 20:41:47 +00:00
// searchMetricNameWithCache appends metric name for the given metricID to dst
2019-05-22 21:16:55 +00:00
// and returns the result.
2021-03-22 20:41:47 +00:00
func ( db * indexDB ) searchMetricNameWithCache ( dst [ ] byte , metricID uint64 ) ( [ ] byte , error ) {
metricName := db . getMetricNameFromCache ( dst , metricID )
if len ( metricName ) > len ( dst ) {
return metricName , nil
}
2020-07-23 17:42:57 +00:00
is := db . getIndexSearch ( noDeadline )
2021-04-13 07:20:35 +00:00
var err error
dst , err = is . searchMetricName ( dst , metricID )
2019-05-22 21:16:55 +00:00
db . putIndexSearch ( is )
2021-04-13 07:20:35 +00:00
if err == nil {
// There is no need in verifying whether the given metricID is deleted,
// since the filtering must be performed before calling this func.
db . putMetricNameToCache ( metricID , dst )
return dst , nil
}
2019-05-22 21:16:55 +00:00
if err != io . EOF {
return dst , err
}
// Try searching in the external indexDB.
if db . doExtDB ( func ( extDB * indexDB ) {
2020-07-23 17:42:57 +00:00
is := extDB . getIndexSearch ( noDeadline )
2019-05-22 21:16:55 +00:00
dst , err = is . searchMetricName ( dst , metricID )
extDB . putIndexSearch ( is )
2021-04-13 07:20:35 +00:00
if err == nil {
// There is no need in verifying whether the given metricID is deleted,
// since the filtering must be performed before calling this func.
extDB . putMetricNameToCache ( metricID , dst )
}
2019-05-22 21:16:55 +00:00
} ) {
return dst , err
}
// Cannot find MetricName for the given metricID. This may be the case
// when indexDB contains incomplete set of metricID -> metricName entries
// after a snapshot or due to unflushed entries.
2019-12-02 18:44:18 +00:00
atomic . AddUint64 ( & db . missingMetricNamesForMetricID , 1 )
// Mark the metricID as deleted, so it will be created again when new data point
// for the given time series will arrive.
if err := db . deleteMetricIDs ( [ ] uint64 { metricID } ) ; err != nil {
2020-06-30 19:58:18 +00:00
return dst , fmt . Errorf ( "cannot delete metricID for missing metricID->metricName entry; metricID=%d; error: %w" , metricID , err )
2019-12-02 18:44:18 +00:00
}
2019-05-22 21:16:55 +00:00
return dst , io . EOF
}
// DeleteTSIDs marks as deleted all the TSIDs matching the given tfss.
//
// The caller must reset all the caches which may contain the deleted TSIDs.
//
// Returns the number of metrics deleted.
func ( db * indexDB ) DeleteTSIDs ( tfss [ ] * TagFilters ) ( int , error ) {
if len ( tfss ) == 0 {
return 0 , nil
}
// Obtain metricIDs to delete.
2019-11-09 16:48:58 +00:00
tr := TimeRange {
MinTimestamp : 0 ,
2019-11-09 21:05:14 +00:00
MaxTimestamp : ( 1 << 63 ) - 1 ,
2019-11-09 16:48:58 +00:00
}
2020-07-23 17:42:57 +00:00
is := db . getIndexSearch ( noDeadline )
2019-11-11 11:21:05 +00:00
metricIDs , err := is . searchMetricIDs ( tfss , tr , 2e9 )
2019-05-22 21:16:55 +00:00
db . putIndexSearch ( is )
if err != nil {
return 0 , err
}
2019-12-02 18:44:18 +00:00
if err := db . deleteMetricIDs ( metricIDs ) ; err != nil {
return 0 , err
}
// Delete TSIDs in the extDB.
deletedCount := len ( metricIDs )
if db . doExtDB ( func ( extDB * indexDB ) {
var n int
n , err = extDB . DeleteTSIDs ( tfss )
deletedCount += n
} ) {
if err != nil {
2020-06-30 19:58:18 +00:00
return deletedCount , fmt . Errorf ( "cannot delete tsids in extDB: %w" , err )
2019-12-02 18:44:18 +00:00
}
}
return deletedCount , nil
}
func ( db * indexDB ) deleteMetricIDs ( metricIDs [ ] uint64 ) error {
2019-05-22 21:16:55 +00:00
if len ( metricIDs ) == 0 {
// Nothing to delete
2019-12-02 18:44:18 +00:00
return nil
2019-05-22 21:16:55 +00:00
}
// atomically add deleted metricIDs to an inmemory map.
2019-11-03 22:34:24 +00:00
dmis := & uint64set . Set { }
2020-07-21 17:56:49 +00:00
dmis . AddMulti ( metricIDs )
2021-06-15 11:56:51 +00:00
db . s . updateDeletedMetricIDs ( dmis )
2019-05-22 21:16:55 +00:00
// Reset TagFilters -> TSIDS cache, since it may contain deleted TSIDs.
2021-07-06 08:01:51 +00:00
invalidateTagFiltersCache ( )
2019-05-22 21:16:55 +00:00
2020-07-14 11:02:14 +00:00
// Reset MetricName -> TSID cache, since it may contain deleted TSIDs.
2021-06-11 09:42:26 +00:00
db . s . resetAndSaveTSIDCache ( )
2020-07-14 11:02:14 +00:00
2021-06-11 09:42:26 +00:00
// Store the metricIDs as deleted.
// Make this after updating the deletedMetricIDs and resetting caches
// in order to exclude the possibility of the inconsistent state when the deleted metricIDs
// remain available in the tsidCache after unclean shutdown.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1347
items := getIndexItems ( )
for _ , metricID := range metricIDs {
items . B = append ( items . B , nsPrefixDeletedMetricID )
items . B = encoding . MarshalUint64 ( items . B , metricID )
items . Next ( )
}
err := db . tb . AddItems ( items . Items )
putIndexItems ( items )
return err
2019-05-22 21:16:55 +00:00
}
2021-06-15 11:56:51 +00:00
func ( db * indexDB ) loadDeletedMetricIDs ( ) ( * uint64set . Set , error ) {
is := db . getIndexSearch ( noDeadline )
dmis , err := is . loadDeletedMetricIDs ( )
db . putIndexSearch ( is )
if err != nil {
return nil , err
}
return dmis , nil
2019-05-22 21:16:55 +00:00
}
2019-09-24 18:10:22 +00:00
func ( is * indexSearch ) loadDeletedMetricIDs ( ) ( * uint64set . Set , error ) {
dmis := & uint64set . Set { }
2019-05-22 21:16:55 +00:00
ts := & is . ts
kb := & is . kb
2019-09-25 10:47:06 +00:00
kb . B = append ( kb . B [ : 0 ] , nsPrefixDeletedMetricID )
2019-05-22 21:16:55 +00:00
ts . Seek ( kb . B )
for ts . NextItem ( ) {
item := ts . Item
if ! bytes . HasPrefix ( item , kb . B ) {
break
}
item = item [ len ( kb . B ) : ]
if len ( item ) != 8 {
return nil , fmt . Errorf ( "unexpected item len; got %d bytes; want %d bytes" , len ( item ) , 8 )
}
metricID := encoding . UnmarshalUint64 ( item )
2019-09-24 18:10:22 +00:00
dmis . Add ( metricID )
2019-05-22 21:16:55 +00:00
}
if err := ts . Error ( ) ; err != nil {
return nil , err
}
return dmis , nil
}
2019-09-23 19:34:04 +00:00
// searchTSIDs returns sorted tsids matching the given tfss over the given tr.
2020-07-23 17:42:57 +00:00
func ( db * indexDB ) searchTSIDs ( tfss [ ] * TagFilters , tr TimeRange , maxMetrics int , deadline uint64 ) ( [ ] TSID , error ) {
2019-05-22 21:16:55 +00:00
if len ( tfss ) == 0 {
return nil , nil
}
2021-06-11 09:42:26 +00:00
if tr . MinTimestamp >= db . s . minTimestampForCompositeIndex {
2021-02-10 12:37:14 +00:00
tfss = convertToCompositeTagFilterss ( tfss )
}
2019-05-22 21:16:55 +00:00
tfKeyBuf := tagFiltersKeyBufPool . Get ( )
defer tagFiltersKeyBufPool . Put ( tfKeyBuf )
2019-11-06 11:39:48 +00:00
tfKeyBuf . B = marshalTagFiltersKey ( tfKeyBuf . B [ : 0 ] , tfss , tr , true )
2021-07-06 08:01:51 +00:00
tsids , ok := db . getFromTagFiltersCache ( tfKeyBuf . B )
2019-05-22 21:16:55 +00:00
if ok {
// Fast path - tsids found in the cache.
return tsids , nil
}
// Slow path - search for tsids in the db and extDB.
2020-07-23 17:42:57 +00:00
is := db . getIndexSearch ( deadline )
2019-05-22 21:16:55 +00:00
localTSIDs , err := is . searchTSIDs ( tfss , tr , maxMetrics )
db . putIndexSearch ( is )
if err != nil {
return nil , err
}
var extTSIDs [ ] TSID
if db . doExtDB ( func ( extDB * indexDB ) {
2019-06-25 10:08:56 +00:00
tfKeyExtBuf := tagFiltersKeyBufPool . Get ( )
defer tagFiltersKeyBufPool . Put ( tfKeyExtBuf )
// Data in extDB cannot be changed, so use unversioned keys for tag cache.
2019-11-06 11:39:48 +00:00
tfKeyExtBuf . B = marshalTagFiltersKey ( tfKeyExtBuf . B [ : 0 ] , tfss , tr , false )
2021-07-06 08:01:51 +00:00
tsids , ok := extDB . getFromTagFiltersCache ( tfKeyExtBuf . B )
2019-05-22 21:16:55 +00:00
if ok {
extTSIDs = tsids
return
}
2020-07-23 17:42:57 +00:00
is := extDB . getIndexSearch ( deadline )
2019-05-22 21:16:55 +00:00
extTSIDs , err = is . searchTSIDs ( tfss , tr , maxMetrics )
extDB . putIndexSearch ( is )
2019-08-13 23:12:42 +00:00
sort . Slice ( extTSIDs , func ( i , j int ) bool { return extTSIDs [ i ] . Less ( & extTSIDs [ j ] ) } )
2021-07-06 08:01:51 +00:00
extDB . putToTagFiltersCache ( extTSIDs , tfKeyExtBuf . B )
2019-05-22 21:16:55 +00:00
} ) {
if err != nil {
return nil , err
}
}
// Merge localTSIDs with extTSIDs.
tsids = mergeTSIDs ( localTSIDs , extTSIDs )
// Sort the found tsids, since they must be passed to TSID search
// in the sorted order.
sort . Slice ( tsids , func ( i , j int ) bool { return tsids [ i ] . Less ( & tsids [ j ] ) } )
// Store TSIDs in the cache.
2021-07-06 08:01:51 +00:00
db . putToTagFiltersCache ( tsids , tfKeyBuf . B )
2019-05-22 21:16:55 +00:00
return tsids , err
}
var tagFiltersKeyBufPool bytesutil . ByteBufferPool
func ( is * indexSearch ) getTSIDByMetricName ( dst * TSID , metricName [ ] byte ) error {
2021-06-15 11:56:51 +00:00
dmis := is . db . s . getDeletedMetricIDs ( )
2019-05-22 21:16:55 +00:00
ts := & is . ts
kb := & is . kb
kb . B = append ( kb . B [ : 0 ] , nsPrefixMetricNameToTSID )
kb . B = append ( kb . B , metricName ... )
kb . B = append ( kb . B , kvSeparatorChar )
ts . Seek ( kb . B )
for ts . NextItem ( ) {
if ! bytes . HasPrefix ( ts . Item , kb . B ) {
// Nothing found.
return io . EOF
}
v := ts . Item [ len ( kb . B ) : ]
tail , err := dst . Unmarshal ( v )
if err != nil {
2020-06-30 19:58:18 +00:00
return fmt . Errorf ( "cannot unmarshal TSID: %w" , err )
2019-05-22 21:16:55 +00:00
}
if len ( tail ) > 0 {
return fmt . Errorf ( "unexpected non-empty tail left after unmarshaling TSID: %X" , tail )
}
2019-09-24 18:10:22 +00:00
if dmis . Len ( ) > 0 {
2019-05-22 21:16:55 +00:00
// Verify whether the dst is marked as deleted.
2019-09-24 18:10:22 +00:00
if dmis . Has ( dst . MetricID ) {
2019-05-25 18:51:11 +00:00
// The dst is deleted. Continue searching.
2019-05-22 21:16:55 +00:00
continue
}
}
// Found valid dst.
return nil
}
if err := ts . Error ( ) ; err != nil {
2020-06-30 19:58:18 +00:00
return fmt . Errorf ( "error when searching TSID by metricName; searchPrefix %q: %w" , kb . B , err )
2019-05-22 21:16:55 +00:00
}
// Nothing found
return io . EOF
}
2021-03-22 20:41:47 +00:00
func ( is * indexSearch ) searchMetricNameWithCache ( dst [ ] byte , metricID uint64 ) ( [ ] byte , error ) {
2019-05-22 21:16:55 +00:00
metricName := is . db . getMetricNameFromCache ( dst , metricID )
if len ( metricName ) > len ( dst ) {
return metricName , nil
}
2021-04-13 07:20:35 +00:00
var err error
dst , err = is . searchMetricName ( dst , metricID )
if err == nil {
// There is no need in verifying whether the given metricID is deleted,
// since the filtering must be performed before calling this func.
is . db . putMetricNameToCache ( metricID , dst )
return dst , nil
}
return dst , err
2021-03-22 20:41:47 +00:00
}
2019-05-22 21:16:55 +00:00
2021-03-22 20:41:47 +00:00
func ( is * indexSearch ) searchMetricName ( dst [ ] byte , metricID uint64 ) ( [ ] byte , error ) {
2019-05-22 21:16:55 +00:00
ts := & is . ts
kb := & is . kb
2020-07-23 21:31:09 +00:00
kb . B = is . marshalCommonPrefix ( kb . B [ : 0 ] , nsPrefixMetricIDToMetricName )
2019-05-22 21:16:55 +00:00
kb . B = encoding . MarshalUint64 ( kb . B , metricID )
if err := ts . FirstItemWithPrefix ( kb . B ) ; err != nil {
if err == io . EOF {
return dst , err
}
2020-06-30 19:58:18 +00:00
return dst , fmt . Errorf ( "error when searching metricName by metricID; searchPrefix %q: %w" , kb . B , err )
2019-05-22 21:16:55 +00:00
}
v := ts . Item [ len ( kb . B ) : ]
dst = append ( dst , v ... )
return dst , nil
}
func mergeTSIDs ( a , b [ ] TSID ) [ ] TSID {
if len ( b ) > len ( a ) {
a , b = b , a
}
if len ( b ) == 0 {
return a
}
m := make ( map [ uint64 ] TSID , len ( a ) )
for i := range a {
tsid := & a [ i ]
m [ tsid . MetricID ] = * tsid
}
for i := range b {
tsid := & b [ i ]
m [ tsid . MetricID ] = * tsid
}
tsids := make ( [ ] TSID , 0 , len ( m ) )
for _ , tsid := range m {
tsids = append ( tsids , tsid )
}
return tsids
}
2020-03-31 09:34:29 +00:00
func ( is * indexSearch ) containsTimeRange ( tr TimeRange ) ( bool , error ) {
ts := & is . ts
kb := & is . kb
// Verify whether the maximum date in `ts` covers tr.MinTimestamp.
minDate := uint64 ( tr . MinTimestamp ) / msecPerDay
2020-07-23 21:31:09 +00:00
kb . B = is . marshalCommonPrefix ( kb . B [ : 0 ] , nsPrefixDateToMetricID )
2020-03-31 09:34:29 +00:00
prefix := kb . B
kb . B = encoding . MarshalUint64 ( kb . B , minDate )
ts . Seek ( kb . B )
if ! ts . NextItem ( ) {
if err := ts . Error ( ) ; err != nil {
2020-06-30 19:58:18 +00:00
return false , fmt . Errorf ( "error when searching for minDate=%d, prefix %q: %w" , minDate , kb . B , err )
2020-03-31 09:34:29 +00:00
}
return false , nil
}
if ! bytes . HasPrefix ( ts . Item , prefix ) {
// minDate exceeds max date from ts.
return false , nil
}
return true , nil
}
2019-05-22 21:16:55 +00:00
func ( is * indexSearch ) searchTSIDs ( tfss [ ] * TagFilters , tr TimeRange , maxMetrics int ) ( [ ] TSID , error ) {
2020-03-31 09:34:29 +00:00
ok , err := is . containsTimeRange ( tr )
if err != nil {
return nil , err
}
if ! ok {
// Fast path - the index doesn't contain data for the given tr.
return nil , nil
}
2019-05-22 21:16:55 +00:00
metricIDs , err := is . searchMetricIDs ( tfss , tr , maxMetrics )
if err != nil {
return nil , err
}
if len ( metricIDs ) == 0 {
// Nothing found.
return nil , nil
}
// Obtain TSID values for the given metricIDs.
tsids := make ( [ ] TSID , len ( metricIDs ) )
i := 0
2020-07-23 16:21:49 +00:00
for loopsPaceLimiter , metricID := range metricIDs {
2020-08-07 05:37:33 +00:00
if loopsPaceLimiter & paceLimiterSlowIterationsMask == 0 {
2020-07-23 17:42:57 +00:00
if err := checkSearchDeadlineAndPace ( is . deadline ) ; err != nil {
return nil , err
}
2020-07-23 16:21:49 +00:00
}
2020-07-14 11:02:14 +00:00
// Try obtaining TSIDs from MetricID->TSID cache. This is much faster
2019-05-22 21:16:55 +00:00
// than scanning the mergeset if it contains a lot of metricIDs.
tsid := & tsids [ i ]
err := is . db . getFromMetricIDCache ( tsid , metricID )
if err == nil {
// Fast path - the tsid for metricID is found in cache.
i ++
continue
}
if err != io . EOF {
return nil , err
}
if err := is . getTSIDByMetricID ( tsid , metricID ) ; err != nil {
if err == io . EOF {
// Cannot find TSID for the given metricID.
// This may be the case on incomplete indexDB
// due to snapshot or due to unflushed entries.
// Just increment errors counter and skip it.
atomic . AddUint64 ( & is . db . missingTSIDsForMetricID , 1 )
continue
}
2020-06-30 19:58:18 +00:00
return nil , fmt . Errorf ( "cannot find tsid %d out of %d for metricID %d: %w" , i , len ( metricIDs ) , metricID , err )
2019-05-22 21:16:55 +00:00
}
is . db . putToMetricIDCache ( metricID , tsid )
i ++
}
tsids = tsids [ : i ]
// Do not sort the found tsids, since they will be sorted later.
return tsids , nil
}
func ( is * indexSearch ) getTSIDByMetricID ( dst * TSID , metricID uint64 ) error {
// There is no need in checking for deleted metricIDs here, since they
// must be checked by the caller.
ts := & is . ts
kb := & is . kb
2020-07-23 21:31:09 +00:00
kb . B = is . marshalCommonPrefix ( kb . B [ : 0 ] , nsPrefixMetricIDToTSID )
2019-05-22 21:16:55 +00:00
kb . B = encoding . MarshalUint64 ( kb . B , metricID )
if err := ts . FirstItemWithPrefix ( kb . B ) ; err != nil {
if err == io . EOF {
return err
}
2020-06-30 19:58:18 +00:00
return fmt . Errorf ( "error when searching TSID by metricID; searchPrefix %q: %w" , kb . B , err )
2019-05-22 21:16:55 +00:00
}
v := ts . Item [ len ( kb . B ) : ]
tail , err := dst . Unmarshal ( v )
if err != nil {
2020-06-30 19:58:18 +00:00
return fmt . Errorf ( "cannot unmarshal TSID=%X: %w" , v , err )
2019-05-22 21:16:55 +00:00
}
if len ( tail ) > 0 {
return fmt . Errorf ( "unexpected non-zero tail left after unmarshaling TSID: %X" , tail )
}
return nil
}
2019-06-10 09:57:34 +00:00
// updateMetricIDsByMetricNameMatch matches metricName values for the given srcMetricIDs against tfs
2019-05-22 21:16:55 +00:00
// and adds matching metrics to metricIDs.
2019-09-24 18:10:22 +00:00
func ( is * indexSearch ) updateMetricIDsByMetricNameMatch ( metricIDs , srcMetricIDs * uint64set . Set , tfs [ ] * tagFilter ) error {
2019-05-22 21:16:55 +00:00
// sort srcMetricIDs in order to speed up Seek below.
2019-09-24 18:10:22 +00:00
sortedMetricIDs := srcMetricIDs . AppendTo ( nil )
2019-05-22 21:16:55 +00:00
2021-02-09 23:24:45 +00:00
kb := & is . kb
2021-02-10 12:37:14 +00:00
kb . B = is . marshalCommonPrefix ( kb . B [ : 0 ] , nsPrefixTagToMetricIDs )
tfs = removeCompositeTagFilters ( tfs , kb . B )
2021-02-09 23:24:45 +00:00
2019-05-22 21:16:55 +00:00
metricName := kbPool . Get ( )
defer kbPool . Put ( metricName )
mn := GetMetricName ( )
defer PutMetricName ( mn )
2020-07-23 16:21:49 +00:00
for loopsPaceLimiter , metricID := range sortedMetricIDs {
2020-08-07 05:37:33 +00:00
if loopsPaceLimiter & paceLimiterSlowIterationsMask == 0 {
2020-07-23 17:42:57 +00:00
if err := checkSearchDeadlineAndPace ( is . deadline ) ; err != nil {
return err
}
2020-07-23 16:21:49 +00:00
}
2019-05-22 21:16:55 +00:00
var err error
2021-03-22 20:41:47 +00:00
metricName . B , err = is . searchMetricNameWithCache ( metricName . B [ : 0 ] , metricID )
2019-05-22 21:16:55 +00:00
if err != nil {
2020-04-14 21:05:01 +00:00
if err == io . EOF {
// It is likely the metricID->metricName entry didn't propagate to inverted index yet.
// Skip this metricID for now.
continue
}
2020-06-30 19:58:18 +00:00
return fmt . Errorf ( "cannot find metricName by metricID %d: %w" , metricID , err )
2019-05-22 21:16:55 +00:00
}
if err := mn . Unmarshal ( metricName . B ) ; err != nil {
2020-06-30 19:58:18 +00:00
return fmt . Errorf ( "cannot unmarshal metricName %q: %w" , metricName . B , err )
2019-05-22 21:16:55 +00:00
}
// Match the mn against tfs.
ok , err := matchTagFilters ( mn , tfs , & is . kb )
if err != nil {
2020-06-30 19:58:18 +00:00
return fmt . Errorf ( "cannot match MetricName %s against tagFilters: %w" , mn , err )
2019-05-22 21:16:55 +00:00
}
if ! ok {
continue
}
2019-09-24 18:10:22 +00:00
metricIDs . Add ( metricID )
2019-05-22 21:16:55 +00:00
}
return nil
}
2021-02-10 12:37:14 +00:00
func removeCompositeTagFilters ( tfs [ ] * tagFilter , prefix [ ] byte ) [ ] * tagFilter {
if ! hasCompositeTagFilters ( tfs , prefix ) {
return tfs
}
var tagKey [ ] byte
var name [ ] byte
tfsNew := make ( [ ] * tagFilter , 0 , len ( tfs ) + 1 )
2021-02-09 22:44:54 +00:00
for _ , tf := range tfs {
if ! bytes . HasPrefix ( tf . prefix , prefix ) {
tfsNew = append ( tfsNew , tf )
continue
}
suffix := tf . prefix [ len ( prefix ) : ]
var err error
2021-02-10 12:37:14 +00:00
_ , tagKey , err = unmarshalTagValue ( tagKey [ : 0 ] , suffix )
2021-02-09 22:44:54 +00:00
if err != nil {
logger . Panicf ( "BUG: cannot unmarshal tag key from suffix=%q: %s" , suffix , err )
}
if len ( tagKey ) == 0 || tagKey [ 0 ] != compositeTagKeyPrefix {
tfsNew = append ( tfsNew , tf )
continue
}
tagKey = tagKey [ 1 : ]
var nameLen uint64
tagKey , nameLen , err = encoding . UnmarshalVarUint64 ( tagKey )
if err != nil {
logger . Panicf ( "BUG: cannot unmarshal nameLen from tagKey %q: %s" , tagKey , err )
}
2021-02-10 12:37:14 +00:00
if nameLen == 0 {
logger . Panicf ( "BUG: nameLen must be greater than 0" )
}
2021-02-09 22:44:54 +00:00
if uint64 ( len ( tagKey ) ) < nameLen {
logger . Panicf ( "BUG: expecting at %d bytes for name in tagKey=%q; got %d bytes" , nameLen , tagKey , len ( tagKey ) )
}
2021-02-10 12:37:14 +00:00
name = append ( name [ : 0 ] , tagKey [ : nameLen ] ... )
2021-02-09 22:44:54 +00:00
tagKey = tagKey [ nameLen : ]
2021-02-10 12:37:14 +00:00
var tfNew tagFilter
if err := tfNew . Init ( prefix , tagKey , tf . value , tf . isNegative , tf . isRegexp ) ; err != nil {
logger . Panicf ( "BUG: cannot initialize {%s=%q} filter: %s" , tagKey , tf . value , err )
}
tfsNew = append ( tfsNew , & tfNew )
}
if len ( name ) > 0 {
var tfNew tagFilter
if err := tfNew . Init ( prefix , nil , name , false , false ) ; err != nil {
logger . Panicf ( "BUG: unexpected error when initializing {__name__=%q} filter: %s" , name , err )
}
2021-02-09 22:44:54 +00:00
tfsNew = append ( tfsNew , & tfNew )
}
return tfsNew
}
2021-02-10 12:37:14 +00:00
func hasCompositeTagFilters ( tfs [ ] * tagFilter , prefix [ ] byte ) bool {
var tagKey [ ] byte
for _ , tf := range tfs {
if ! bytes . HasPrefix ( tf . prefix , prefix ) {
continue
}
suffix := tf . prefix [ len ( prefix ) : ]
var err error
_ , tagKey , err = unmarshalTagValue ( tagKey [ : 0 ] , suffix )
if err != nil {
logger . Panicf ( "BUG: cannot unmarshal tag key from suffix=%q: %s" , suffix , err )
}
if len ( tagKey ) > 0 && tagKey [ 0 ] == compositeTagKeyPrefix {
return true
}
}
return false
}
2019-05-22 21:16:55 +00:00
func matchTagFilters ( mn * MetricName , tfs [ ] * tagFilter , kb * bytesutil . ByteBuffer ) ( bool , error ) {
2019-09-20 16:46:47 +00:00
kb . B = marshalCommonPrefix ( kb . B [ : 0 ] , nsPrefixTagToMetricIDs )
2019-11-21 19:34:32 +00:00
for i , tf := range tfs {
2021-02-09 22:44:54 +00:00
if bytes . Equal ( tf . key , graphiteReverseTagKey ) {
// Skip artificial tag filter for Graphite-like metric names with dots,
// since mn doesn't contain the corresponding tag.
continue
}
2021-02-02 22:24:05 +00:00
if len ( tf . key ) == 0 || string ( tf . key ) == "__graphite__" {
2019-05-22 21:16:55 +00:00
// Match against mn.MetricGroup.
b := marshalTagValue ( kb . B , nil )
b = marshalTagValue ( b , mn . MetricGroup )
kb . B = b [ : len ( kb . B ) ]
2021-02-09 22:44:54 +00:00
ok , err := tf . match ( b )
2019-05-22 21:16:55 +00:00
if err != nil {
2020-06-30 19:58:18 +00:00
return false , fmt . Errorf ( "cannot match MetricGroup %q with tagFilter %s: %w" , mn . MetricGroup , tf , err )
2019-05-22 21:16:55 +00:00
}
if ! ok {
2019-11-21 19:34:32 +00:00
// Move failed tf to start.
// This should reduce the amount of useless work for the next mn.
if i > 0 {
tfs [ 0 ] , tfs [ i ] = tfs [ i ] , tfs [ 0 ]
}
2019-05-22 21:16:55 +00:00
return false , nil
}
continue
}
// Search for matching tag name.
tagMatched := false
2020-06-10 15:40:00 +00:00
tagSeen := false
2021-02-09 22:44:54 +00:00
for _ , tag := range mn . Tags {
2019-05-22 21:16:55 +00:00
if string ( tag . Key ) != string ( tf . key ) {
continue
}
2019-07-30 12:14:09 +00:00
// Found the matching tag name. Match the value.
2020-06-10 15:40:00 +00:00
tagSeen = true
2019-05-22 21:16:55 +00:00
b := tag . Marshal ( kb . B )
kb . B = b [ : len ( kb . B ) ]
2021-02-09 22:44:54 +00:00
ok , err := tf . match ( b )
2019-05-22 21:16:55 +00:00
if err != nil {
2020-06-30 19:58:18 +00:00
return false , fmt . Errorf ( "cannot match tag %q with tagFilter %s: %w" , tag , tf , err )
2019-05-22 21:16:55 +00:00
}
if ! ok {
2019-11-21 19:34:32 +00:00
// Move failed tf to start.
// This should reduce the amount of useless work for the next mn.
if i > 0 {
tfs [ 0 ] , tfs [ i ] = tfs [ i ] , tfs [ 0 ]
}
2019-05-22 21:16:55 +00:00
return false , nil
}
tagMatched = true
break
}
2020-06-10 15:40:00 +00:00
if ! tagSeen && tf . isNegative && ! tf . isEmptyMatch {
// tf contains negative filter for non-exsisting tag key
// and this filter doesn't match empty string, i.e. {non_existing_tag_key!="foobar"}
// Such filter matches anything.
//
// Note that the filter `{non_existing_tag_key!~"|foobar"}` shouldn't match anything,
// since it is expected that it matches non-empty `non_existing_tag_key`.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/546 for details.
continue
}
if tagMatched {
// tf matches mn. Go to the next tf.
continue
}
// Matching tag name wasn't found.
// Move failed tf to start.
// This should reduce the amount of useless work for the next mn.
if i > 0 {
tfs [ 0 ] , tfs [ i ] = tfs [ i ] , tfs [ 0 ]
2019-05-22 21:16:55 +00:00
}
2020-06-10 15:40:00 +00:00
return false , nil
2019-05-22 21:16:55 +00:00
}
return true , nil
}
func ( is * indexSearch ) searchMetricIDs ( tfss [ ] * TagFilters , tr TimeRange , maxMetrics int ) ( [ ] uint64 , error ) {
2021-05-12 13:32:48 +00:00
metricIDs , err := is . searchMetricIDsInternal ( tfss , tr , maxMetrics )
if err != nil {
return nil , err
2019-05-22 21:16:55 +00:00
}
2019-09-24 18:10:22 +00:00
if metricIDs . Len ( ) == 0 {
2019-05-22 21:16:55 +00:00
// Nothing found
return nil , nil
}
2019-09-24 18:10:22 +00:00
sortedMetricIDs := metricIDs . AppendTo ( nil )
2019-05-22 21:16:55 +00:00
// Filter out deleted metricIDs.
2021-06-15 11:56:51 +00:00
dmis := is . db . s . getDeletedMetricIDs ( )
2019-09-24 18:10:22 +00:00
if dmis . Len ( ) > 0 {
2019-06-10 09:49:59 +00:00
metricIDsFiltered := sortedMetricIDs [ : 0 ]
for _ , metricID := range sortedMetricIDs {
2019-09-24 18:10:22 +00:00
if ! dmis . Has ( metricID ) {
2019-05-22 21:16:55 +00:00
metricIDsFiltered = append ( metricIDsFiltered , metricID )
}
}
2019-06-10 09:49:59 +00:00
sortedMetricIDs = metricIDsFiltered
2019-05-22 21:16:55 +00:00
}
2019-06-10 09:49:59 +00:00
return sortedMetricIDs , nil
2019-05-22 21:16:55 +00:00
}
2021-05-12 13:32:48 +00:00
func ( is * indexSearch ) searchMetricIDsInternal ( tfss [ ] * TagFilters , tr TimeRange , maxMetrics int ) ( * uint64set . Set , error ) {
metricIDs := & uint64set . Set { }
for _ , tfs := range tfss {
if len ( tfs . tfs ) == 0 {
// An empty filters must be equivalent to `{__name__!=""}`
tfs = NewTagFilters ( )
if err := tfs . Add ( nil , nil , true , false ) ; err != nil {
logger . Panicf ( ` BUG: cannot add { __name__!=""} filter: %s ` , err )
}
}
if err := is . updateMetricIDsForTagFilters ( metricIDs , tfs , tr , maxMetrics + 1 ) ; err != nil {
return nil , err
}
if metricIDs . Len ( ) > maxMetrics {
return nil , fmt . Errorf ( "the number of matching unique timeseries exceeds %d; either narrow down the search or increase -search.maxUniqueTimeseries" , maxMetrics )
}
}
return metricIDs , nil
}
2019-09-24 18:10:22 +00:00
func ( is * indexSearch ) updateMetricIDsForTagFilters ( metricIDs * uint64set . Set , tfs * TagFilters , tr TimeRange , maxMetrics int ) error {
2020-03-13 20:42:22 +00:00
err := is . tryUpdatingMetricIDsForDateRange ( metricIDs , tfs , tr , maxMetrics )
if err == nil {
2019-11-09 21:17:42 +00:00
// Fast path: found metricIDs by date range.
return nil
}
2021-03-16 16:46:22 +00:00
if ! errors . Is ( err , errFallbackToGlobalSearch ) {
2020-03-13 20:42:22 +00:00
return err
}
2019-11-08 11:16:40 +00:00
2021-07-30 05:37:10 +00:00
// Slow path - fall back to search in the global inverted index.
atomic . AddUint64 ( & is . db . globalSearchCalls , 1 )
m , err := is . getMetricIDsForDateAndFilters ( 0 , tfs , maxMetrics )
2019-06-10 10:25:44 +00:00
if err != nil {
2019-08-19 13:04:12 +00:00
return err
2019-05-22 21:16:55 +00:00
}
2021-07-30 05:37:10 +00:00
metricIDs . UnionMayOwn ( m )
2019-05-22 21:16:55 +00:00
return nil
}
2021-03-16 18:01:41 +00:00
func ( is * indexSearch ) getMetricIDsForTagFilter ( tf * tagFilter , maxMetrics int , maxLoopsCount int64 ) ( * uint64set . Set , int64 , error ) {
2019-05-22 21:16:55 +00:00
if tf . isNegative {
logger . Panicf ( "BUG: isNegative must be false" )
}
2019-09-24 18:10:22 +00:00
metricIDs := & uint64set . Set { }
2019-05-22 21:16:55 +00:00
if len ( tf . orSuffixes ) > 0 {
2020-10-16 21:46:55 +00:00
// Fast path for orSuffixes - seek for rows for each value from orSuffixes.
2021-07-30 05:37:10 +00:00
loopsCount , err := is . updateMetricIDsForOrSuffixes ( tf , metricIDs , maxMetrics , maxLoopsCount )
2021-02-18 10:47:36 +00:00
if err != nil {
return nil , loopsCount , fmt . Errorf ( "error when searching for metricIDs for tagFilter in fast path: %w; tagFilter=%s" , err , tf )
2019-05-22 21:16:55 +00:00
}
2021-02-18 10:47:36 +00:00
return metricIDs , loopsCount , nil
2019-05-22 21:16:55 +00:00
}
2019-06-27 13:15:25 +00:00
// Slow path - scan for all the rows with the given prefix.
2021-07-30 05:37:10 +00:00
loopsCount , err := is . getMetricIDsForTagFilterSlow ( tf , metricIDs . Add , maxLoopsCount )
2021-02-18 10:47:36 +00:00
if err != nil {
return nil , loopsCount , fmt . Errorf ( "error when searching for metricIDs for tagFilter in slow path: %w; tagFilter=%s" , err , tf )
2019-06-27 13:15:25 +00:00
}
2021-02-18 10:47:36 +00:00
return metricIDs , loopsCount , nil
2019-06-27 13:15:25 +00:00
}
2021-03-16 16:46:22 +00:00
var errTooManyLoops = fmt . Errorf ( "too many loops is needed for applying this filter" )
2021-07-30 05:37:10 +00:00
func ( is * indexSearch ) getMetricIDsForTagFilterSlow ( tf * tagFilter , f func ( metricID uint64 ) , maxLoopsCount int64 ) ( int64 , error ) {
2019-06-27 13:15:25 +00:00
if len ( tf . orSuffixes ) > 0 {
logger . Panicf ( "BUG: the getMetricIDsForTagFilterSlow must be called only for empty tf.orSuffixes; got %s" , tf . orSuffixes )
}
// Scan all the rows with tf.prefix and call f on every tf match.
2019-05-22 21:16:55 +00:00
ts := & is . ts
2019-06-27 13:15:25 +00:00
kb := & is . kb
2019-09-20 16:46:47 +00:00
mp := & is . mp
mp . Reset ( )
var prevMatchingSuffix [ ] byte
2019-06-27 13:15:25 +00:00
var prevMatch bool
2021-03-16 16:46:22 +00:00
var loopsCount int64
2020-07-23 16:21:49 +00:00
loopsPaceLimiter := 0
2019-09-20 16:46:47 +00:00
prefix := tf . prefix
ts . Seek ( prefix )
2019-06-27 13:15:25 +00:00
for ts . NextItem ( ) {
2020-08-07 05:37:33 +00:00
if loopsPaceLimiter & paceLimiterMediumIterationsMask == 0 {
2020-07-23 17:42:57 +00:00
if err := checkSearchDeadlineAndPace ( is . deadline ) ; err != nil {
2021-02-18 10:47:36 +00:00
return loopsCount , err
2020-07-23 17:42:57 +00:00
}
2020-07-23 16:21:49 +00:00
}
loopsPaceLimiter ++
2019-09-20 16:46:47 +00:00
item := ts . Item
if ! bytes . HasPrefix ( item , prefix ) {
2021-02-18 10:47:36 +00:00
return loopsCount , nil
2019-05-22 21:16:55 +00:00
}
2019-09-20 16:46:47 +00:00
tail := item [ len ( prefix ) : ]
n := bytes . IndexByte ( tail , tagSeparatorChar )
if n < 0 {
2021-02-18 10:47:36 +00:00
return loopsCount , fmt . Errorf ( "invalid tag->metricIDs line %q: cannot find tagSeparatorChar=%d" , item , tagSeparatorChar )
2019-05-22 21:16:55 +00:00
}
2019-09-20 16:46:47 +00:00
suffix := tail [ : n + 1 ]
tail = tail [ n + 1 : ]
if err := mp . InitOnlyTail ( item , tail ) ; err != nil {
2021-02-18 10:47:36 +00:00
return loopsCount , err
2019-05-22 21:16:55 +00:00
}
2020-09-21 21:33:43 +00:00
mp . ParseMetricIDs ( )
2021-03-16 16:46:22 +00:00
loopsCount += int64 ( mp . MetricIDsLen ( ) )
if loopsCount > maxLoopsCount {
return loopsCount , errTooManyLoops
}
2019-09-20 16:46:47 +00:00
if prevMatch && string ( suffix ) == string ( prevMatchingSuffix ) {
2019-06-27 13:15:25 +00:00
// Fast path: the same tag value found.
// There is no need in checking it again with potentially
// slow tf.matchSuffix, which may call regexp.
2019-09-20 16:46:47 +00:00
for _ , metricID := range mp . MetricIDs {
2020-09-21 21:36:43 +00:00
f ( metricID )
2019-06-27 13:15:25 +00:00
}
continue
}
2019-09-20 16:46:47 +00:00
// Slow path: need tf.matchSuffix call.
ok , err := tf . matchSuffix ( suffix )
2021-03-07 19:12:28 +00:00
// Assume that tf.matchSuffix call needs 10x more time than a single metric scan iteration.
2021-03-16 16:46:22 +00:00
loopsCount += 10 * int64 ( tf . matchCost )
2019-05-22 21:16:55 +00:00
if err != nil {
2021-02-18 10:47:36 +00:00
return loopsCount , fmt . Errorf ( "error when matching %s against suffix %q: %w" , tf , suffix , err )
2019-05-22 21:16:55 +00:00
}
if ! ok {
2019-06-27 13:15:25 +00:00
prevMatch = false
2019-12-02 22:29:44 +00:00
if mp . MetricIDsLen ( ) < maxMetricIDsPerRow / 2 {
// If the current row contains non-full metricIDs list,
// then it is likely the next row contains the next tag value.
// So skip seeking for the next tag value, since it will be slower than just ts.NextItem call.
continue
}
2019-06-27 13:15:25 +00:00
// Optimization: skip all the metricIDs for the given tag value
2019-09-20 16:46:47 +00:00
kb . B = append ( kb . B [ : 0 ] , item [ : len ( item ) - len ( tail ) ] ... )
2019-06-27 13:15:25 +00:00
// The last char in kb.B must be tagSeparatorChar. Just increment it
// in order to jump to the next tag value.
if len ( kb . B ) == 0 || kb . B [ len ( kb . B ) - 1 ] != tagSeparatorChar || tagSeparatorChar >= 0xff {
2021-02-18 10:47:36 +00:00
return loopsCount , fmt . Errorf ( "data corruption: the last char in k=%X must be %X" , kb . B , tagSeparatorChar )
2019-06-27 13:15:25 +00:00
}
kb . B [ len ( kb . B ) - 1 ] ++
ts . Seek ( kb . B )
2021-03-07 19:12:28 +00:00
// Assume that a seek cost is equivalent to 1000 ordinary loops.
loopsCount += 1000
2019-05-22 21:16:55 +00:00
continue
}
2019-06-27 13:15:25 +00:00
prevMatch = true
2019-09-20 16:46:47 +00:00
prevMatchingSuffix = append ( prevMatchingSuffix [ : 0 ] , suffix ... )
for _ , metricID := range mp . MetricIDs {
2020-09-21 21:36:43 +00:00
f ( metricID )
2019-06-27 13:15:25 +00:00
}
2019-05-22 21:16:55 +00:00
}
if err := ts . Error ( ) ; err != nil {
2021-02-18 10:47:36 +00:00
return loopsCount , fmt . Errorf ( "error when searching for tag filter prefix %q: %w" , prefix , err )
2019-05-22 21:16:55 +00:00
}
2021-02-18 10:47:36 +00:00
return loopsCount , nil
2019-05-22 21:16:55 +00:00
}
2021-07-30 05:37:10 +00:00
func ( is * indexSearch ) updateMetricIDsForOrSuffixes ( tf * tagFilter , metricIDs * uint64set . Set , maxMetrics int , maxLoopsCount int64 ) ( int64 , error ) {
2019-05-22 21:16:55 +00:00
if tf . isNegative {
logger . Panicf ( "BUG: isNegative must be false" )
}
kb := kbPool . Get ( )
defer kbPool . Put ( kb )
2021-03-16 16:46:22 +00:00
var loopsCount int64
2019-05-22 21:16:55 +00:00
for _ , orSuffix := range tf . orSuffixes {
kb . B = append ( kb . B [ : 0 ] , tf . prefix ... )
kb . B = append ( kb . B , orSuffix ... )
kb . B = append ( kb . B , tagSeparatorChar )
2021-07-30 05:37:10 +00:00
lc , err := is . updateMetricIDsForOrSuffix ( kb . B , metricIDs , maxMetrics , maxLoopsCount - loopsCount )
2021-06-08 10:04:08 +00:00
loopsCount += lc
2021-02-18 10:47:36 +00:00
if err != nil {
return loopsCount , err
2019-05-22 21:16:55 +00:00
}
2019-09-24 18:10:22 +00:00
if metricIDs . Len ( ) >= maxMetrics {
2021-02-18 10:47:36 +00:00
return loopsCount , nil
2019-05-22 21:16:55 +00:00
}
}
2021-02-18 10:47:36 +00:00
return loopsCount , nil
2019-05-22 21:16:55 +00:00
}
2021-07-30 05:37:10 +00:00
func ( is * indexSearch ) updateMetricIDsForOrSuffix ( prefix [ ] byte , metricIDs * uint64set . Set , maxMetrics int , maxLoopsCount int64 ) ( int64 , error ) {
2019-05-22 21:16:55 +00:00
ts := & is . ts
2019-09-20 16:46:47 +00:00
mp := & is . mp
mp . Reset ( )
2021-03-16 16:46:22 +00:00
var loopsCount int64
2020-07-23 16:21:49 +00:00
loopsPaceLimiter := 0
2019-05-22 21:16:55 +00:00
ts . Seek ( prefix )
2019-09-24 18:10:22 +00:00
for metricIDs . Len ( ) < maxMetrics && ts . NextItem ( ) {
2020-08-07 05:37:33 +00:00
if loopsPaceLimiter & paceLimiterFastIterationsMask == 0 {
2020-07-23 17:42:57 +00:00
if err := checkSearchDeadlineAndPace ( is . deadline ) ; err != nil {
2021-02-18 10:47:36 +00:00
return loopsCount , err
2020-07-23 17:42:57 +00:00
}
2020-07-23 16:21:49 +00:00
}
loopsPaceLimiter ++
2019-09-20 16:46:47 +00:00
item := ts . Item
if ! bytes . HasPrefix ( item , prefix ) {
2021-02-18 10:47:36 +00:00
return loopsCount , nil
2019-09-20 16:46:47 +00:00
}
if err := mp . InitOnlyTail ( item , item [ len ( prefix ) : ] ) ; err != nil {
2021-02-18 10:47:36 +00:00
return loopsCount , err
2019-09-20 16:46:47 +00:00
}
2021-03-16 16:46:22 +00:00
loopsCount += int64 ( mp . MetricIDsLen ( ) )
if loopsCount > maxLoopsCount {
return loopsCount , errTooManyLoops
}
2019-09-23 17:40:38 +00:00
mp . ParseMetricIDs ( )
2020-07-21 17:56:49 +00:00
metricIDs . AddMulti ( mp . MetricIDs )
2019-05-22 21:16:55 +00:00
}
if err := ts . Error ( ) ; err != nil {
2021-02-18 10:47:36 +00:00
return loopsCount , fmt . Errorf ( "error when searching for tag filter prefix %q: %w" , prefix , err )
2019-05-22 21:16:55 +00:00
}
2021-02-18 10:47:36 +00:00
return loopsCount , nil
2019-05-22 21:16:55 +00:00
}
2021-03-15 11:31:55 +00:00
var errFallbackToGlobalSearch = errors . New ( "fall back from per-day index search to global index search" )
2019-05-22 21:16:55 +00:00
2021-04-07 10:31:57 +00:00
const maxDaysForPerDaySearch = 40
2020-10-01 16:03:34 +00:00
2020-03-13 20:42:22 +00:00
func ( is * indexSearch ) tryUpdatingMetricIDsForDateRange ( metricIDs * uint64set . Set , tfs * TagFilters , tr TimeRange , maxMetrics int ) error {
2019-11-09 21:17:42 +00:00
atomic . AddUint64 ( & is . db . dateRangeSearchCalls , 1 )
minDate := uint64 ( tr . MinTimestamp ) / msecPerDay
maxDate := uint64 ( tr . MaxTimestamp ) / msecPerDay
2021-04-07 10:31:57 +00:00
if minDate > maxDate || maxDate - minDate > maxDaysForPerDaySearch {
2019-11-09 21:17:42 +00:00
// Too much dates must be covered. Give up, since it may be slow.
2021-03-15 11:31:55 +00:00
return errFallbackToGlobalSearch
2020-03-13 20:42:22 +00:00
}
if minDate == maxDate {
// Fast path - query only a single date.
m , err := is . getMetricIDsForDateAndFilters ( minDate , tfs , maxMetrics )
if err != nil {
return err
}
2021-07-06 15:21:35 +00:00
metricIDs . UnionMayOwn ( m )
2020-03-13 20:42:22 +00:00
atomic . AddUint64 ( & is . db . dateRangeSearchHits , 1 )
return nil
2019-11-09 21:17:42 +00:00
}
2020-03-13 20:42:22 +00:00
// Slower path - search for metricIDs for each day in parallel.
2019-11-09 21:17:42 +00:00
var wg sync . WaitGroup
var errGlobal error
2020-03-13 20:42:22 +00:00
var mu sync . Mutex // protects metricIDs + errGlobal vars from concurrent access below
2019-11-09 21:17:42 +00:00
for minDate <= maxDate {
wg . Add ( 1 )
2020-03-13 20:42:22 +00:00
go func ( date uint64 ) {
2019-11-09 21:17:42 +00:00
defer wg . Done ( )
2020-07-23 17:42:57 +00:00
isLocal := is . db . getIndexSearch ( is . deadline )
2020-03-13 20:42:22 +00:00
m , err := isLocal . getMetricIDsForDateAndFilters ( date , tfs , maxMetrics )
2021-02-16 19:22:10 +00:00
is . db . putIndexSearch ( isLocal )
2019-11-09 21:17:42 +00:00
mu . Lock ( )
2020-03-13 20:42:22 +00:00
defer mu . Unlock ( )
if errGlobal != nil {
return
2019-11-09 21:17:42 +00:00
}
if err != nil {
2019-12-03 12:46:39 +00:00
dateStr := time . Unix ( int64 ( date * 24 * 3600 ) , 0 )
2021-03-15 11:31:55 +00:00
errGlobal = fmt . Errorf ( "cannot search for metricIDs at %s: %w" , dateStr , err )
2020-03-13 20:42:22 +00:00
return
}
if metricIDs . Len ( ) < maxMetrics {
2021-07-06 15:21:35 +00:00
metricIDs . UnionMayOwn ( m )
2019-11-09 21:17:42 +00:00
}
2020-03-13 20:42:22 +00:00
} ( minDate )
2019-11-09 21:17:42 +00:00
minDate ++
}
wg . Wait ( )
if errGlobal != nil {
2020-03-13 20:42:22 +00:00
return errGlobal
2019-11-09 21:17:42 +00:00
}
atomic . AddUint64 ( & is . db . dateRangeSearchHits , 1 )
2020-03-13 20:42:22 +00:00
return nil
2019-11-09 21:17:42 +00:00
}
2020-03-13 20:42:22 +00:00
func ( is * indexSearch ) getMetricIDsForDateAndFilters ( date uint64 , tfs * TagFilters , maxMetrics int ) ( * uint64set . Set , error ) {
2021-02-18 10:47:36 +00:00
// Sort tfs by loopsCount needed for performing each filter.
// This stats is usually collected from the previous queries.
2021-02-16 19:22:10 +00:00
// This way we limit the amount of work below by applying fast filters at first.
2021-02-16 11:03:58 +00:00
type tagFilterWithWeight struct {
2021-03-16 16:46:22 +00:00
tf * tagFilter
loopsCount int64
filterLoopsCount int64
2020-03-30 21:44:41 +00:00
}
2021-02-16 19:22:10 +00:00
tfws := make ( [ ] tagFilterWithWeight , len ( tfs . tfs ) )
2021-02-18 10:47:36 +00:00
currentTime := fasttime . UnixTimestamp ( )
2019-11-09 21:17:42 +00:00
for i := range tfs . tfs {
tf := & tfs . tfs [ i ]
2021-03-16 16:46:22 +00:00
loopsCount , filterLoopsCount , timestamp := is . getLoopsCountAndTimestampForDateFilter ( date , tf )
if currentTime > timestamp + 3600 {
2021-03-11 22:48:28 +00:00
// Update stats once per hour for relatively fast tag filters.
// There is no need in spending CPU resources on updating stats for heavy tag filters.
2021-02-21 19:32:52 +00:00
if loopsCount <= 10e6 {
loopsCount = 0
}
2021-03-16 16:46:22 +00:00
if filterLoopsCount <= 10e6 {
filterLoopsCount = 0
}
2021-02-17 15:55:29 +00:00
}
2021-02-16 19:22:10 +00:00
tfws [ i ] = tagFilterWithWeight {
2021-03-16 16:46:22 +00:00
tf : tf ,
loopsCount : loopsCount ,
filterLoopsCount : filterLoopsCount ,
2020-03-30 21:44:41 +00:00
}
2021-02-17 15:28:15 +00:00
}
sort . Slice ( tfws , func ( i , j int ) bool {
2021-02-16 19:22:10 +00:00
a , b := & tfws [ i ] , & tfws [ j ]
2021-02-18 10:47:36 +00:00
if a . loopsCount != b . loopsCount {
return a . loopsCount < b . loopsCount
2020-03-30 21:44:41 +00:00
}
2021-02-15 14:24:08 +00:00
return a . tf . Less ( b . tf )
2020-03-30 21:44:41 +00:00
} )
2021-03-16 16:46:22 +00:00
getFirstPositiveLoopsCount := func ( tfws [ ] tagFilterWithWeight ) int64 {
for i := range tfws {
if n := tfws [ i ] . loopsCount ; n > 0 {
return n
}
}
return int64Max
}
storeLoopsCount := func ( tfw * tagFilterWithWeight , loopsCount int64 ) {
if loopsCount != tfw . loopsCount {
tfw . loopsCount = loopsCount
is . storeLoopsCountForDateFilter ( date , tfw . tf , tfw . loopsCount , tfw . filterLoopsCount )
}
}
2020-03-30 21:44:41 +00:00
2021-03-16 16:46:22 +00:00
// Populate metricIDs for the first non-negative filter with the cost smaller than maxLoopsCount.
2020-04-24 18:11:46 +00:00
var metricIDs * uint64set . Set
2021-02-16 19:22:10 +00:00
tfwsRemaining := tfws [ : 0 ]
2021-03-16 16:46:22 +00:00
maxDateMetrics := intMax
if maxMetrics < intMax / 50 {
maxDateMetrics = maxMetrics * 50
}
for i , tfw := range tfws {
2021-02-16 11:03:58 +00:00
tf := tfw . tf
2021-09-09 18:09:18 +00:00
if tf . isNegative || tf . isEmptyMatch {
2021-02-16 19:22:10 +00:00
tfwsRemaining = append ( tfwsRemaining , tfw )
2020-04-24 18:11:46 +00:00
continue
}
2021-03-16 16:46:22 +00:00
maxLoopsCount := getFirstPositiveLoopsCount ( tfws [ i + 1 : ] )
2021-03-16 18:01:41 +00:00
m , loopsCount , err := is . getMetricIDsForDateTagFilter ( tf , date , tfs . commonPrefix , maxDateMetrics , maxLoopsCount )
2020-04-24 18:11:46 +00:00
if err != nil {
2021-03-16 16:46:22 +00:00
if errors . Is ( err , errTooManyLoops ) {
// The tf took too many loops compared to the next filter. Postpone applying this filter.
2021-03-17 13:09:40 +00:00
storeLoopsCount ( & tfw , 2 * loopsCount )
2021-03-16 16:46:22 +00:00
tfwsRemaining = append ( tfwsRemaining , tfw )
continue
}
// Move failing filter to the end of filter list.
storeLoopsCount ( & tfw , int64Max )
2020-04-24 18:11:46 +00:00
return nil , err
}
if m . Len ( ) >= maxDateMetrics {
2021-03-16 16:46:22 +00:00
// Too many time series found by a single tag filter. Move the filter to the end of list.
storeLoopsCount ( & tfw , int64Max - 1 )
2021-03-15 18:31:24 +00:00
tfwsRemaining = append ( tfwsRemaining , tfw )
2019-11-09 21:17:42 +00:00
continue
}
2021-03-16 16:46:22 +00:00
storeLoopsCount ( & tfw , loopsCount )
2020-04-24 18:11:46 +00:00
metricIDs = m
2021-03-16 16:46:22 +00:00
tfwsRemaining = append ( tfwsRemaining , tfws [ i + 1 : ] ... )
2019-11-09 21:17:42 +00:00
break
}
2021-03-16 16:46:22 +00:00
tfws = tfwsRemaining
2020-04-24 18:11:46 +00:00
if metricIDs == nil {
// All the filters in tfs are negative or match too many time series.
// Populate all the metricIDs for the given (date),
2020-03-13 20:42:22 +00:00
// so later they can be filtered out with negative filters.
m , err := is . getMetricIDsForDate ( date , maxDateMetrics )
if err != nil {
2020-06-30 19:58:18 +00:00
return nil , fmt . Errorf ( "cannot obtain all the metricIDs: %w" , err )
2019-11-09 21:17:42 +00:00
}
2020-04-24 18:11:46 +00:00
if m . Len ( ) >= maxDateMetrics {
// Too many time series found for the given (date). Fall back to global search.
2021-03-15 11:31:55 +00:00
return nil , errFallbackToGlobalSearch
2019-11-09 21:17:42 +00:00
}
2020-03-13 20:42:22 +00:00
metricIDs = m
2019-11-09 21:17:42 +00:00
}
2021-03-16 16:46:22 +00:00
sort . Slice ( tfws , func ( i , j int ) bool {
a , b := & tfws [ i ] , & tfws [ j ]
if a . filterLoopsCount != b . filterLoopsCount {
return a . filterLoopsCount < b . filterLoopsCount
}
return a . tf . Less ( b . tf )
} )
getFirstPositiveFilterLoopsCount := func ( tfws [ ] tagFilterWithWeight ) int64 {
for i := range tfws {
if n := tfws [ i ] . filterLoopsCount ; n > 0 {
return n
}
}
return int64Max
}
storeFilterLoopsCount := func ( tfw * tagFilterWithWeight , filterLoopsCount int64 ) {
if filterLoopsCount != tfw . filterLoopsCount {
is . storeLoopsCountForDateFilter ( date , tfw . tf , tfw . loopsCount , filterLoopsCount )
}
}
2020-03-13 20:42:22 +00:00
// Intersect metricIDs with the rest of filters.
2021-02-10 20:40:20 +00:00
//
// Do not run these tag filters in parallel, since this may result in CPU and RAM waste
// when the intial tag filters significantly reduce the number of found metricIDs,
// so the remaining filters could be performed via much faster metricName matching instead
// of slow selecting of matching metricIDs.
2021-03-15 18:31:24 +00:00
var tfsPostponed [ ] * tagFilter
2021-03-16 16:46:22 +00:00
for i , tfw := range tfws {
2021-02-16 11:03:58 +00:00
tf := tfw . tf
2021-02-10 20:40:20 +00:00
metricIDsLen := metricIDs . Len ( )
if metricIDsLen == 0 {
2021-03-16 16:46:22 +00:00
// There is no need in applying the remaining filters to an empty set.
2021-02-10 20:40:20 +00:00
break
}
2021-03-16 16:46:22 +00:00
if tfw . filterLoopsCount > int64 ( metricIDsLen ) * loopsCountPerMetricNameMatch {
2021-02-10 20:40:20 +00:00
// It should be faster performing metricName match on the remaining filters
// instead of scanning big number of entries in the inverted index for these filters.
2021-03-16 16:46:22 +00:00
for _ , tfw := range tfws [ i : ] {
tfsPostponed = append ( tfsPostponed , tfw . tf )
2021-03-15 18:31:24 +00:00
}
break
}
2021-03-16 16:46:22 +00:00
maxLoopsCount := getFirstPositiveFilterLoopsCount ( tfws [ i + 1 : ] )
2021-03-16 22:46:22 +00:00
if maxLoopsCount == int64Max {
maxLoopsCount = int64 ( metricIDsLen ) * loopsCountPerMetricNameMatch
}
2021-03-16 18:01:41 +00:00
m , filterLoopsCount , err := is . getMetricIDsForDateTagFilter ( tf , date , tfs . commonPrefix , intMax , maxLoopsCount )
2021-02-10 20:40:20 +00:00
if err != nil {
2021-03-16 16:46:22 +00:00
if errors . Is ( err , errTooManyLoops ) {
// Postpone tf, since it took more loops than the next filter may need.
2021-03-17 13:09:40 +00:00
storeFilterLoopsCount ( & tfw , 2 * filterLoopsCount )
2021-03-16 16:46:22 +00:00
tfsPostponed = append ( tfsPostponed , tf )
continue
}
// Move failing tf to the end of filter list
storeFilterLoopsCount ( & tfw , int64Max )
2021-02-10 20:40:20 +00:00
return nil , err
}
2021-03-16 16:46:22 +00:00
storeFilterLoopsCount ( & tfw , filterLoopsCount )
2021-09-09 18:09:18 +00:00
if tf . isNegative || tf . isEmptyMatch {
2021-02-10 20:40:20 +00:00
metricIDs . Subtract ( m )
} else {
metricIDs . Intersect ( m )
}
2021-02-10 14:13:17 +00:00
}
2021-02-10 20:40:20 +00:00
if metricIDs . Len ( ) == 0 {
// There is no need in applying tfsPostponed, since the result is empty.
return nil , nil
2019-11-09 21:17:42 +00:00
}
2020-04-24 18:11:46 +00:00
if len ( tfsPostponed ) > 0 {
// Apply the postponed filters via metricName match.
var m uint64set . Set
if err := is . updateMetricIDsByMetricNameMatch ( & m , metricIDs , tfsPostponed ) ; err != nil {
return nil , err
}
return & m , nil
}
2020-03-13 20:42:22 +00:00
return metricIDs , nil
2019-11-09 21:17:42 +00:00
}
2021-03-16 16:46:22 +00:00
const (
intMax = int ( ( ^ uint ( 0 ) ) >> 1 )
int64Max = int64 ( ( 1 << 63 ) - 1 )
)
2021-05-23 13:39:55 +00:00
func ( is * indexSearch ) storeDateMetricID ( date , metricID uint64 , mn * MetricName ) error {
2021-02-09 22:44:54 +00:00
ii := getIndexItems ( )
defer putIndexItems ( ii )
2019-11-09 21:17:42 +00:00
2021-02-09 22:44:54 +00:00
ii . B = is . marshalCommonPrefix ( ii . B , nsPrefixDateToMetricID )
ii . B = encoding . MarshalUint64 ( ii . B , date )
ii . B = encoding . MarshalUint64 ( ii . B , metricID )
ii . Next ( )
2019-11-09 21:17:42 +00:00
// Create per-day inverted index entries for metricID.
kb := kbPool . Get ( )
defer kbPool . Put ( kb )
2020-07-23 21:31:09 +00:00
kb . B = is . marshalCommonPrefix ( kb . B [ : 0 ] , nsPrefixDateTagToMetricIDs )
2019-11-09 21:17:42 +00:00
kb . B = encoding . MarshalUint64 ( kb . B , date )
2021-02-09 22:44:54 +00:00
ii . registerTagIndexes ( kb . B , mn , metricID )
2021-05-23 13:39:55 +00:00
if err := is . db . tb . AddItems ( ii . Items ) ; err != nil {
2020-06-30 19:58:18 +00:00
return fmt . Errorf ( "cannot add per-day entires for metricID %d: %w" , metricID , err )
2019-11-09 21:17:42 +00:00
}
return nil
2019-05-22 21:16:55 +00:00
}
2021-02-09 22:44:54 +00:00
func ( ii * indexItems ) registerTagIndexes ( prefix [ ] byte , mn * MetricName , metricID uint64 ) {
// Add index entry for MetricGroup -> MetricID
ii . B = append ( ii . B , prefix ... )
ii . B = marshalTagValue ( ii . B , nil )
ii . B = marshalTagValue ( ii . B , mn . MetricGroup )
ii . B = encoding . MarshalUint64 ( ii . B , metricID )
ii . Next ( )
ii . addReverseMetricGroupIfNeeded ( prefix , mn , metricID )
// Add index entries for tags: tag -> MetricID
for _ , tag := range mn . Tags {
ii . B = append ( ii . B , prefix ... )
ii . B = tag . Marshal ( ii . B )
ii . B = encoding . MarshalUint64 ( ii . B , metricID )
ii . Next ( )
}
// Add index entries for composite tags: MetricGroup+tag -> MetricID
compositeKey := kbPool . Get ( )
for _ , tag := range mn . Tags {
compositeKey . B = marshalCompositeTagKey ( compositeKey . B [ : 0 ] , mn . MetricGroup , tag . Key )
ii . B = append ( ii . B , prefix ... )
ii . B = marshalTagValue ( ii . B , compositeKey . B )
ii . B = marshalTagValue ( ii . B , tag . Value )
ii . B = encoding . MarshalUint64 ( ii . B , metricID )
ii . Next ( )
}
kbPool . Put ( compositeKey )
}
func ( ii * indexItems ) addReverseMetricGroupIfNeeded ( prefix [ ] byte , mn * MetricName , metricID uint64 ) {
2020-05-27 18:35:58 +00:00
if bytes . IndexByte ( mn . MetricGroup , '.' ) < 0 {
// The reverse metric group is needed only for Graphite-like metrics with points.
return
}
// This is most likely a Graphite metric like 'foo.bar.baz'.
// Store reverse metric name 'zab.rab.oof' in order to speed up search for '*.bar.baz'
// when the Graphite wildcard has a suffix matching small number of time series.
2021-02-09 22:44:54 +00:00
ii . B = append ( ii . B , prefix ... )
ii . B = marshalTagValue ( ii . B , graphiteReverseTagKey )
2020-05-27 18:35:58 +00:00
revBuf := kbPool . Get ( )
revBuf . B = reverseBytes ( revBuf . B [ : 0 ] , mn . MetricGroup )
2021-02-09 22:44:54 +00:00
ii . B = marshalTagValue ( ii . B , revBuf . B )
2020-05-27 18:35:58 +00:00
kbPool . Put ( revBuf )
2021-02-09 22:44:54 +00:00
ii . B = encoding . MarshalUint64 ( ii . B , metricID )
ii . Next ( )
}
func isArtificialTagKey ( key [ ] byte ) bool {
if bytes . Equal ( key , graphiteReverseTagKey ) {
return true
}
if len ( key ) > 0 && key [ 0 ] == compositeTagKeyPrefix {
return true
}
return false
2020-05-27 18:35:58 +00:00
}
// The tag key for reverse metric name used for speeding up searching
// for Graphite wildcards with suffix matching small number of time series,
// i.e. '*.bar.baz'.
//
// It is expected that the given key isn't be used by users.
var graphiteReverseTagKey = [ ] byte ( "\xff" )
2021-02-09 22:44:54 +00:00
// The prefix for composite tag, which is used for speeding up searching
// for composite filters, which contain `{__name__="<metric_name>"}` filter.
//
// It is expected that the given prefix isn't used by users.
const compositeTagKeyPrefix = '\xfe'
func marshalCompositeTagKey ( dst , name , key [ ] byte ) [ ] byte {
dst = append ( dst , compositeTagKeyPrefix )
dst = encoding . MarshalVarUint64 ( dst , uint64 ( len ( name ) ) )
dst = append ( dst , name ... )
dst = append ( dst , key ... )
return dst
}
2020-05-27 18:35:58 +00:00
func reverseBytes ( dst , src [ ] byte ) [ ] byte {
for i := len ( src ) - 1 ; i >= 0 ; i -- {
dst = append ( dst , src [ i ] )
}
return dst
}
2019-05-22 21:16:55 +00:00
func ( is * indexSearch ) hasDateMetricID ( date , metricID uint64 ) ( bool , error ) {
ts := & is . ts
kb := & is . kb
2020-07-23 21:31:09 +00:00
kb . B = is . marshalCommonPrefix ( kb . B [ : 0 ] , nsPrefixDateToMetricID )
2019-05-22 21:16:55 +00:00
kb . B = encoding . MarshalUint64 ( kb . B , date )
kb . B = encoding . MarshalUint64 ( kb . B , metricID )
if err := ts . FirstItemWithPrefix ( kb . B ) ; err != nil {
if err == io . EOF {
return false , nil
}
2020-06-30 19:58:18 +00:00
return false , fmt . Errorf ( "error when searching for (date=%d, metricID=%d) entry: %w" , date , metricID , err )
2019-05-22 21:16:55 +00:00
}
if string ( ts . Item ) != string ( kb . B ) {
return false , fmt . Errorf ( "unexpected entry for (date=%d, metricID=%d); got %q; want %q" , date , metricID , ts . Item , kb . B )
}
return true , nil
}
2021-03-16 18:01:41 +00:00
func ( is * indexSearch ) getMetricIDsForDateTagFilter ( tf * tagFilter , date uint64 , commonPrefix [ ] byte , maxMetrics int , maxLoopsCount int64 ) ( * uint64set . Set , int64 , error ) {
2019-11-09 21:17:42 +00:00
if ! bytes . HasPrefix ( tf . prefix , commonPrefix ) {
logger . Panicf ( "BUG: unexpected tf.prefix %q; must start with commonPrefix %q" , tf . prefix , commonPrefix )
}
kb := kbPool . Get ( )
2021-09-09 18:09:18 +00:00
defer kbPool . Put ( kb )
2021-07-30 05:37:10 +00:00
if date != 0 {
// Use per-date search.
kb . B = is . marshalCommonPrefix ( kb . B [ : 0 ] , nsPrefixDateTagToMetricIDs )
kb . B = encoding . MarshalUint64 ( kb . B , date )
} else {
// Use global search if date isn't set.
kb . B = is . marshalCommonPrefix ( kb . B [ : 0 ] , nsPrefixTagToMetricIDs )
}
2019-11-09 21:17:42 +00:00
kb . B = append ( kb . B , tf . prefix [ len ( commonPrefix ) : ] ... )
tfNew := * tf
tfNew . isNegative = false // isNegative for the original tf is handled by the caller.
tfNew . prefix = kb . B
2021-03-16 18:01:41 +00:00
metricIDs , loopsCount , err := is . getMetricIDsForTagFilter ( & tfNew , maxMetrics , maxLoopsCount )
2021-09-09 18:09:18 +00:00
if err != nil {
return nil , loopsCount , err
}
if tf . isNegative || ! tf . isEmptyMatch {
return metricIDs , loopsCount , nil
}
// The tag filter, which matches empty label such as {foo=~"bar|"}
// Convert it to negative filter, which matches {foo=~".+",foo!~"bar|"}.
// This fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1601
// See also https://github.com/VictoriaMetrics/VictoriaMetrics/issues/395
maxLoopsCount -= loopsCount
if err := tfNew . Init ( kb . B , tf . key , [ ] byte ( ".+" ) , false , true ) ; err != nil {
logger . Panicf ( ` BUG: cannot init tag filter: { %q=~".+"}: %s ` , tf . key , err )
}
m , lc , err := is . getMetricIDsForTagFilter ( & tfNew , maxMetrics , maxLoopsCount )
loopsCount += lc
if err != nil {
return nil , loopsCount , err
}
m . Subtract ( metricIDs )
return m , loopsCount , nil
2021-02-16 19:22:10 +00:00
}
2021-03-16 16:46:22 +00:00
func ( is * indexSearch ) getLoopsCountAndTimestampForDateFilter ( date uint64 , tf * tagFilter ) ( int64 , int64 , uint64 ) {
2021-06-29 09:40:03 +00:00
is . kb . B = appendDateTagFilterCacheKey ( is . kb . B [ : 0 ] , is . db . name , date , tf )
2021-02-16 19:22:10 +00:00
kb := kbPool . Get ( )
defer kbPool . Put ( kb )
2021-02-23 13:47:19 +00:00
kb . B = is . db . loopsPerDateTagFilterCache . Get ( kb . B [ : 0 ] , is . kb . B )
2021-03-16 16:46:22 +00:00
if len ( kb . B ) != 3 * 8 {
return 0 , 0 , 0
2021-02-16 19:22:10 +00:00
}
2021-03-16 16:46:22 +00:00
loopsCount := encoding . UnmarshalInt64 ( kb . B )
filterLoopsCount := encoding . UnmarshalInt64 ( kb . B [ 8 : ] )
timestamp := encoding . UnmarshalUint64 ( kb . B [ 16 : ] )
return loopsCount , filterLoopsCount , timestamp
2021-02-16 19:22:10 +00:00
}
2021-03-16 16:46:22 +00:00
func ( is * indexSearch ) storeLoopsCountForDateFilter ( date uint64 , tf * tagFilter , loopsCount , filterLoopsCount int64 ) {
2021-02-18 11:56:50 +00:00
currentTimestamp := fasttime . UnixTimestamp ( )
2021-06-29 09:40:03 +00:00
is . kb . B = appendDateTagFilterCacheKey ( is . kb . B [ : 0 ] , is . db . name , date , tf )
2021-02-16 19:22:10 +00:00
kb := kbPool . Get ( )
2021-03-16 16:46:22 +00:00
kb . B = encoding . MarshalInt64 ( kb . B [ : 0 ] , loopsCount )
kb . B = encoding . MarshalInt64 ( kb . B , filterLoopsCount )
2021-02-18 11:56:50 +00:00
kb . B = encoding . MarshalUint64 ( kb . B , currentTimestamp )
2021-02-23 13:47:19 +00:00
is . db . loopsPerDateTagFilterCache . Set ( is . kb . B , kb . B )
2021-02-16 19:22:10 +00:00
kbPool . Put ( kb )
2020-03-30 21:44:41 +00:00
}
2021-06-29 09:40:03 +00:00
func appendDateTagFilterCacheKey ( dst [ ] byte , indexDBName string , date uint64 , tf * tagFilter ) [ ] byte {
dst = append ( dst , indexDBName ... )
2020-03-30 21:44:41 +00:00
dst = encoding . MarshalUint64 ( dst , date )
dst = tf . Marshal ( dst )
return dst
2019-11-09 21:17:42 +00:00
}
2020-03-13 20:42:22 +00:00
func ( is * indexSearch ) getMetricIDsForDate ( date uint64 , maxMetrics int ) ( * uint64set . Set , error ) {
2019-11-09 21:17:42 +00:00
// Extract all the metricIDs from (date, __name__=value)->metricIDs entries.
kb := kbPool . Get ( )
defer kbPool . Put ( kb )
2021-07-30 05:37:10 +00:00
if date != 0 {
// Use per-date search
kb . B = is . marshalCommonPrefix ( kb . B [ : 0 ] , nsPrefixDateTagToMetricIDs )
kb . B = encoding . MarshalUint64 ( kb . B , date )
} else {
// Use global search
kb . B = is . marshalCommonPrefix ( kb . B [ : 0 ] , nsPrefixTagToMetricIDs )
}
2019-11-09 21:17:42 +00:00
kb . B = marshalTagValue ( kb . B , nil )
2020-03-13 20:42:22 +00:00
var metricIDs uint64set . Set
if err := is . updateMetricIDsForPrefix ( kb . B , & metricIDs , maxMetrics ) ; err != nil {
return nil , err
}
return & metricIDs , nil
2019-11-09 21:17:42 +00:00
}
2019-11-09 16:00:58 +00:00
2019-11-09 21:17:42 +00:00
func ( is * indexSearch ) updateMetricIDsForPrefix ( prefix [ ] byte , metricIDs * uint64set . Set , maxMetrics int ) error {
ts := & is . ts
mp := & is . mp
2020-07-23 16:21:49 +00:00
loopsPaceLimiter := 0
2019-09-20 08:53:42 +00:00
ts . Seek ( prefix )
for ts . NextItem ( ) {
2020-08-07 05:37:33 +00:00
if loopsPaceLimiter & paceLimiterFastIterationsMask == 0 {
2020-07-23 17:42:57 +00:00
if err := checkSearchDeadlineAndPace ( is . deadline ) ; err != nil {
return err
}
2020-07-23 16:21:49 +00:00
}
loopsPaceLimiter ++
2019-09-20 08:53:42 +00:00
item := ts . Item
if ! bytes . HasPrefix ( item , prefix ) {
return nil
2019-05-22 21:16:55 +00:00
}
2019-09-20 08:53:42 +00:00
tail := item [ len ( prefix ) : ]
2019-11-09 16:00:58 +00:00
n := bytes . IndexByte ( tail , tagSeparatorChar )
if n < 0 {
return fmt . Errorf ( "invalid tag->metricIDs line %q: cannot find tagSeparatorChar %d" , item , tagSeparatorChar )
}
tail = tail [ n + 1 : ]
if err := mp . InitOnlyTail ( item , tail ) ; err != nil {
return err
}
mp . ParseMetricIDs ( )
2020-07-21 17:56:49 +00:00
metricIDs . AddMulti ( mp . MetricIDs )
2019-09-24 18:10:22 +00:00
if metricIDs . Len ( ) >= maxMetrics {
2019-09-20 08:53:42 +00:00
return nil
}
2019-05-22 21:16:55 +00:00
}
if err := ts . Error ( ) ; err != nil {
2020-06-30 19:58:18 +00:00
return fmt . Errorf ( "error when searching for all metricIDs by prefix %q: %w" , prefix , err )
2019-05-22 21:16:55 +00:00
}
return nil
}
2021-03-15 18:31:24 +00:00
// The estimated number of index scan loops a single loop in updateMetricIDsByMetricNameMatch takes.
2021-03-25 11:27:47 +00:00
const loopsCountPerMetricNameMatch = 150
2019-05-22 21:16:55 +00:00
var kbPool bytesutil . ByteBufferPool
// Returns local unique MetricID.
2020-05-14 11:08:39 +00:00
func generateUniqueMetricID ( ) uint64 {
// It is expected that metricIDs returned from this function must be dense.
// If they will be sparse, then this may hurt metric_ids intersection
// performance with uint64set.Set.
return atomic . AddUint64 ( & nextUniqueMetricID , 1 )
2019-05-22 21:16:55 +00:00
}
// This number mustn't go backwards on restarts, otherwise metricID
// collisions are possible. So don't change time on the server
// between VictoriaMetrics restarts.
2020-05-14 11:08:39 +00:00
var nextUniqueMetricID = uint64 ( time . Now ( ) . UnixNano ( ) )
2019-05-22 21:16:55 +00:00
func marshalCommonPrefix ( dst [ ] byte , nsPrefix byte ) [ ] byte {
dst = append ( dst , nsPrefix )
return dst
}
2020-07-23 21:31:09 +00:00
// This function is needed only for minimizing the difference between code for single-node and cluster version.
func ( is * indexSearch ) marshalCommonPrefix ( dst [ ] byte , nsPrefix byte ) [ ] byte {
return marshalCommonPrefix ( dst , nsPrefix )
}
2019-09-20 16:46:47 +00:00
func unmarshalCommonPrefix ( src [ ] byte ) ( [ ] byte , byte , error ) {
if len ( src ) < commonPrefixLen {
return nil , 0 , fmt . Errorf ( "cannot unmarshal common prefix from %d bytes; need at least %d bytes; data=%X" , len ( src ) , commonPrefixLen , src )
}
prefix := src [ 0 ]
return src [ commonPrefixLen : ] , prefix , nil
}
// 1 byte for prefix
const commonPrefixLen = 1
type tagToMetricIDsRowParser struct {
2019-11-09 21:17:42 +00:00
// NSPrefix contains the first byte parsed from the row after Init call.
// This is either nsPrefixTagToMetricIDs or nsPrefixDateTagToMetricIDs.
NSPrefix byte
// Date contains parsed date for nsPrefixDateTagToMetricIDs rows after Init call
Date uint64
2019-09-20 16:46:47 +00:00
// MetricIDs contains parsed MetricIDs after ParseMetricIDs call
MetricIDs [ ] uint64
// Tag contains parsed tag after Init call
Tag Tag
// tail contains the remaining unparsed metricIDs
tail [ ] byte
}
func ( mp * tagToMetricIDsRowParser ) Reset ( ) {
2019-11-09 21:17:42 +00:00
mp . NSPrefix = 0
mp . Date = 0
2019-09-20 16:46:47 +00:00
mp . MetricIDs = mp . MetricIDs [ : 0 ]
mp . Tag . Reset ( )
mp . tail = nil
}
// Init initializes mp from b, which should contain encoded tag->metricIDs row.
//
// b cannot be re-used until Reset call.
2019-11-09 21:17:42 +00:00
func ( mp * tagToMetricIDsRowParser ) Init ( b [ ] byte , nsPrefixExpected byte ) error {
tail , nsPrefix , err := unmarshalCommonPrefix ( b )
2019-09-20 16:46:47 +00:00
if err != nil {
2020-06-30 19:58:18 +00:00
return fmt . Errorf ( "invalid tag->metricIDs row %q: %w" , b , err )
2019-09-20 16:46:47 +00:00
}
2019-11-09 21:17:42 +00:00
if nsPrefix != nsPrefixExpected {
return fmt . Errorf ( "invalid prefix for tag->metricIDs row %q; got %d; want %d" , b , nsPrefix , nsPrefixExpected )
}
if nsPrefix == nsPrefixDateTagToMetricIDs {
// unmarshal date.
if len ( tail ) < 8 {
return fmt . Errorf ( "cannot unmarshal date from (date, tag)->metricIDs row %q from %d bytes; want at least 8 bytes" , b , len ( tail ) )
}
mp . Date = encoding . UnmarshalUint64 ( tail )
tail = tail [ 8 : ]
2019-09-20 16:46:47 +00:00
}
2019-11-09 21:17:42 +00:00
mp . NSPrefix = nsPrefix
2019-09-20 16:46:47 +00:00
tail , err = mp . Tag . Unmarshal ( tail )
if err != nil {
2020-06-30 19:58:18 +00:00
return fmt . Errorf ( "cannot unmarshal tag from tag->metricIDs row %q: %w" , b , err )
2019-09-20 16:46:47 +00:00
}
return mp . InitOnlyTail ( b , tail )
}
2019-11-09 21:17:42 +00:00
// MarshalPrefix marshals row prefix without tail to dst.
func ( mp * tagToMetricIDsRowParser ) MarshalPrefix ( dst [ ] byte ) [ ] byte {
dst = marshalCommonPrefix ( dst , mp . NSPrefix )
if mp . NSPrefix == nsPrefixDateTagToMetricIDs {
dst = encoding . MarshalUint64 ( dst , mp . Date )
}
dst = mp . Tag . Marshal ( dst )
return dst
}
2019-09-20 16:46:47 +00:00
// InitOnlyTail initializes mp.tail from tail.
//
// b must contain tag->metricIDs row.
// b cannot be re-used until Reset call.
func ( mp * tagToMetricIDsRowParser ) InitOnlyTail ( b , tail [ ] byte ) error {
if len ( tail ) == 0 {
return fmt . Errorf ( "missing metricID in the tag->metricIDs row %q" , b )
}
if len ( tail ) % 8 != 0 {
return fmt . Errorf ( "invalid tail length in the tag->metricIDs row; got %d bytes; must be multiple of 8 bytes" , len ( tail ) )
}
mp . tail = tail
return nil
}
// EqualPrefix returns true if prefixes for mp and x are equal.
//
// Prefix contains (tag)
func ( mp * tagToMetricIDsRowParser ) EqualPrefix ( x * tagToMetricIDsRowParser ) bool {
2019-11-09 21:17:42 +00:00
if ! mp . Tag . Equal ( & x . Tag ) {
return false
}
return mp . Date == x . Date && mp . NSPrefix == x . NSPrefix
2019-09-20 16:46:47 +00:00
}
2019-09-23 17:40:38 +00:00
// MetricIDsLen returns the number of MetricIDs in the mp.tail
func ( mp * tagToMetricIDsRowParser ) MetricIDsLen ( ) int {
return len ( mp . tail ) / 8
}
2019-09-20 16:46:47 +00:00
// ParseMetricIDs parses MetricIDs from mp.tail into mp.MetricIDs.
func ( mp * tagToMetricIDsRowParser ) ParseMetricIDs ( ) {
tail := mp . tail
mp . MetricIDs = mp . MetricIDs [ : 0 ]
n := len ( tail ) / 8
if n <= cap ( mp . MetricIDs ) {
mp . MetricIDs = mp . MetricIDs [ : n ]
} else {
mp . MetricIDs = append ( mp . MetricIDs [ : cap ( mp . MetricIDs ) ] , make ( [ ] uint64 , n - cap ( mp . MetricIDs ) ) ... )
}
metricIDs := mp . MetricIDs
_ = metricIDs [ n - 1 ]
for i := 0 ; i < n ; i ++ {
if len ( tail ) < 8 {
logger . Panicf ( "BUG: tail cannot be smaller than 8 bytes; got %d bytes; tail=%X" , len ( tail ) , tail )
return
}
metricID := encoding . UnmarshalUint64 ( tail )
metricIDs [ i ] = metricID
tail = tail [ 8 : ]
}
}
// IsDeletedTag verifies whether the tag from mp is deleted according to dmis.
//
// dmis must contain deleted MetricIDs.
2019-09-24 18:10:22 +00:00
func ( mp * tagToMetricIDsRowParser ) IsDeletedTag ( dmis * uint64set . Set ) bool {
if dmis . Len ( ) == 0 {
2019-09-20 16:46:47 +00:00
return false
}
mp . ParseMetricIDs ( )
for _ , metricID := range mp . MetricIDs {
2019-09-24 18:10:22 +00:00
if ! dmis . Has ( metricID ) {
2019-09-20 16:46:47 +00:00
return false
}
}
return true
}
2021-02-21 20:06:45 +00:00
func mergeTagToMetricIDsRows ( data [ ] byte , items [ ] mergeset . Item ) ( [ ] byte , [ ] mergeset . Item ) {
2019-11-09 21:17:42 +00:00
data , items = mergeTagToMetricIDsRowsInternal ( data , items , nsPrefixTagToMetricIDs )
data , items = mergeTagToMetricIDsRowsInternal ( data , items , nsPrefixDateTagToMetricIDs )
return data , items
}
2021-02-21 20:06:45 +00:00
func mergeTagToMetricIDsRowsInternal ( data [ ] byte , items [ ] mergeset . Item , nsPrefix byte ) ( [ ] byte , [ ] mergeset . Item ) {
2019-11-09 21:17:42 +00:00
// Perform quick checks whether items contain rows starting from nsPrefix
2019-09-20 16:46:47 +00:00
// based on the fact that items are sorted.
2019-10-08 13:25:24 +00:00
if len ( items ) <= 2 {
// The first and the last row must remain unchanged.
2019-09-20 16:46:47 +00:00
return data , items
}
2021-02-21 20:06:45 +00:00
firstItem := items [ 0 ] . Bytes ( data )
2019-11-09 21:17:42 +00:00
if len ( firstItem ) > 0 && firstItem [ 0 ] > nsPrefix {
2019-09-20 16:46:47 +00:00
return data , items
}
2021-02-21 20:06:45 +00:00
lastItem := items [ len ( items ) - 1 ] . Bytes ( data )
2019-11-09 21:17:42 +00:00
if len ( lastItem ) > 0 && lastItem [ 0 ] < nsPrefix {
2019-09-20 16:46:47 +00:00
return data , items
}
2019-11-09 21:17:42 +00:00
// items contain at least one row starting from nsPrefix. Merge rows with common tag.
2019-09-20 16:46:47 +00:00
tmm := getTagToMetricIDsRowsMerger ( )
2019-10-08 13:25:24 +00:00
tmm . dataCopy = append ( tmm . dataCopy [ : 0 ] , data ... )
tmm . itemsCopy = append ( tmm . itemsCopy [ : 0 ] , items ... )
2019-09-20 16:46:47 +00:00
mp := & tmm . mp
mpPrev := & tmm . mpPrev
2019-10-08 13:25:24 +00:00
dstData := data [ : 0 ]
dstItems := items [ : 0 ]
2021-02-21 20:06:45 +00:00
for i , it := range items {
item := it . Bytes ( data )
2019-11-09 21:17:42 +00:00
if len ( item ) == 0 || item [ 0 ] != nsPrefix || i == 0 || i == len ( items ) - 1 {
// Write rows not starting with nsPrefix as-is.
2019-09-23 17:40:38 +00:00
// Additionally write the first and the last row as-is in order to preserve
2021-03-09 07:18:19 +00:00
// sort order for adjacent blocks.
2019-11-09 21:17:42 +00:00
dstData , dstItems = tmm . flushPendingMetricIDs ( dstData , dstItems , mpPrev )
2019-09-20 16:46:47 +00:00
dstData = append ( dstData , item ... )
2021-02-21 20:06:45 +00:00
dstItems = append ( dstItems , mergeset . Item {
Start : uint32 ( len ( dstData ) - len ( item ) ) ,
End : uint32 ( len ( dstData ) ) ,
} )
2019-09-20 16:46:47 +00:00
continue
}
2019-11-09 21:17:42 +00:00
if err := mp . Init ( item , nsPrefix ) ; err != nil {
logger . Panicf ( "FATAL: cannot parse row starting with nsPrefix %d during merge: %s" , nsPrefix , err )
2019-09-20 16:46:47 +00:00
}
2019-09-23 21:49:21 +00:00
if mp . MetricIDsLen ( ) >= maxMetricIDsPerRow {
2019-11-09 21:17:42 +00:00
dstData , dstItems = tmm . flushPendingMetricIDs ( dstData , dstItems , mpPrev )
2019-09-23 21:49:21 +00:00
dstData = append ( dstData , item ... )
2021-02-21 20:06:45 +00:00
dstItems = append ( dstItems , mergeset . Item {
Start : uint32 ( len ( dstData ) - len ( item ) ) ,
End : uint32 ( len ( dstData ) ) ,
} )
2019-09-23 21:49:21 +00:00
continue
}
2019-11-09 21:17:42 +00:00
if ! mp . EqualPrefix ( mpPrev ) {
2019-09-20 16:46:47 +00:00
dstData , dstItems = tmm . flushPendingMetricIDs ( dstData , dstItems , mpPrev )
}
mp . ParseMetricIDs ( )
tmm . pendingMetricIDs = append ( tmm . pendingMetricIDs , mp . MetricIDs ... )
mpPrev , mp = mp , mpPrev
2019-09-23 21:49:21 +00:00
if len ( tmm . pendingMetricIDs ) >= maxMetricIDsPerRow {
dstData , dstItems = tmm . flushPendingMetricIDs ( dstData , dstItems , mpPrev )
}
2019-09-20 16:46:47 +00:00
}
if len ( tmm . pendingMetricIDs ) > 0 {
2019-10-08 13:25:24 +00:00
logger . Panicf ( "BUG: tmm.pendingMetricIDs must be empty at this point; got %d items: %d" , len ( tmm . pendingMetricIDs ) , tmm . pendingMetricIDs )
}
2021-02-21 20:06:45 +00:00
if ! checkItemsSorted ( dstData , dstItems ) {
2019-11-06 12:24:48 +00:00
// Items could become unsorted if initial items contain duplicate metricIDs:
//
// item1: 1, 1, 5
// item2: 1, 4
//
// Items could become the following after the merge:
//
// item1: 1, 5
// item2: 1, 4
//
// i.e. item1 > item2
//
// Leave the original items unmerged, so they can be merged next time.
// This case should be quite rare - if multiple data points are simultaneously inserted
// into the same new time series from multiple concurrent goroutines.
atomic . AddUint64 ( & indexBlocksWithMetricIDsIncorrectOrder , 1 )
2019-10-08 13:25:24 +00:00
dstData = append ( dstData [ : 0 ] , tmm . dataCopy ... )
2021-02-21 20:06:45 +00:00
dstItems = append ( dstItems [ : 0 ] , tmm . itemsCopy ... )
if ! checkItemsSorted ( dstData , dstItems ) {
2019-11-06 12:24:48 +00:00
logger . Panicf ( "BUG: the original items weren't sorted; items=%q" , dstItems )
2019-10-09 09:13:17 +00:00
}
2019-09-20 16:46:47 +00:00
}
2019-09-23 17:40:38 +00:00
putTagToMetricIDsRowsMerger ( tmm )
2019-11-06 12:24:48 +00:00
atomic . AddUint64 ( & indexBlocksWithMetricIDsProcessed , 1 )
2019-09-24 16:32:06 +00:00
return dstData , dstItems
2019-09-20 16:46:47 +00:00
}
2019-11-06 12:24:48 +00:00
var indexBlocksWithMetricIDsIncorrectOrder uint64
var indexBlocksWithMetricIDsProcessed uint64
2021-02-21 20:06:45 +00:00
func checkItemsSorted ( data [ ] byte , items [ ] mergeset . Item ) bool {
2019-09-26 10:12:24 +00:00
if len ( items ) == 0 {
2019-11-06 12:24:48 +00:00
return true
2019-09-26 10:12:24 +00:00
}
2021-02-21 20:06:45 +00:00
prevItem := items [ 0 ] . String ( data )
for _ , it := range items [ 1 : ] {
currItem := it . String ( data )
if prevItem > currItem {
2019-11-06 12:24:48 +00:00
return false
2019-09-26 10:12:24 +00:00
}
prevItem = currItem
}
2019-11-06 12:24:48 +00:00
return true
2019-09-26 10:12:24 +00:00
}
2019-09-23 21:49:21 +00:00
// maxMetricIDsPerRow limits the number of metricIDs in tag->metricIDs row.
//
// This reduces overhead on index and metaindex in lib/mergeset.
const maxMetricIDsPerRow = 64
2019-09-19 17:00:33 +00:00
type uint64Sorter [ ] uint64
func ( s uint64Sorter ) Len ( ) int { return len ( s ) }
func ( s uint64Sorter ) Less ( i , j int ) bool {
return s [ i ] < s [ j ]
}
func ( s uint64Sorter ) Swap ( i , j int ) {
s [ i ] , s [ j ] = s [ j ] , s [ i ]
}
2019-09-20 16:46:47 +00:00
type tagToMetricIDsRowsMerger struct {
pendingMetricIDs uint64Sorter
mp tagToMetricIDsRowParser
mpPrev tagToMetricIDsRowParser
2019-10-08 13:25:24 +00:00
2021-02-21 20:06:45 +00:00
itemsCopy [ ] mergeset . Item
2019-10-08 13:25:24 +00:00
dataCopy [ ] byte
2019-09-20 16:46:47 +00:00
}
2019-09-23 17:40:38 +00:00
func ( tmm * tagToMetricIDsRowsMerger ) Reset ( ) {
tmm . pendingMetricIDs = tmm . pendingMetricIDs [ : 0 ]
tmm . mp . Reset ( )
tmm . mpPrev . Reset ( )
2019-10-08 13:25:24 +00:00
tmm . itemsCopy = tmm . itemsCopy [ : 0 ]
tmm . dataCopy = tmm . dataCopy [ : 0 ]
2019-09-23 17:40:38 +00:00
}
2021-02-21 20:06:45 +00:00
func ( tmm * tagToMetricIDsRowsMerger ) flushPendingMetricIDs ( dstData [ ] byte , dstItems [ ] mergeset . Item , mp * tagToMetricIDsRowParser ) ( [ ] byte , [ ] mergeset . Item ) {
2019-09-20 16:46:47 +00:00
if len ( tmm . pendingMetricIDs ) == 0 {
2019-11-09 21:17:42 +00:00
// Nothing to flush
return dstData , dstItems
2019-09-20 16:46:47 +00:00
}
2019-09-23 17:40:38 +00:00
// Use sort.Sort instead of sort.Slice in order to reduce memory allocations.
sort . Sort ( & tmm . pendingMetricIDs )
2019-09-25 14:55:13 +00:00
tmm . pendingMetricIDs = removeDuplicateMetricIDs ( tmm . pendingMetricIDs )
2019-09-23 17:40:38 +00:00
2019-09-25 14:55:13 +00:00
// Marshal pendingMetricIDs
2019-09-20 16:46:47 +00:00
dstDataLen := len ( dstData )
2019-11-09 21:17:42 +00:00
dstData = mp . MarshalPrefix ( dstData )
2019-09-23 17:40:38 +00:00
for _ , metricID := range tmm . pendingMetricIDs {
2019-09-20 16:46:47 +00:00
dstData = encoding . MarshalUint64 ( dstData , metricID )
}
2021-02-21 20:06:45 +00:00
dstItems = append ( dstItems , mergeset . Item {
Start : uint32 ( dstDataLen ) ,
End : uint32 ( len ( dstData ) ) ,
} )
2019-09-23 17:40:38 +00:00
tmm . pendingMetricIDs = tmm . pendingMetricIDs [ : 0 ]
2019-09-20 16:46:47 +00:00
return dstData , dstItems
}
2019-09-25 14:55:13 +00:00
func removeDuplicateMetricIDs ( sortedMetricIDs [ ] uint64 ) [ ] uint64 {
if len ( sortedMetricIDs ) < 2 {
return sortedMetricIDs
}
prevMetricID := sortedMetricIDs [ 0 ]
hasDuplicates := false
for _ , metricID := range sortedMetricIDs [ 1 : ] {
if prevMetricID == metricID {
hasDuplicates = true
2019-09-25 15:23:13 +00:00
break
2019-09-25 14:55:13 +00:00
}
prevMetricID = metricID
}
if ! hasDuplicates {
return sortedMetricIDs
}
dstMetricIDs := sortedMetricIDs [ : 1 ]
prevMetricID = sortedMetricIDs [ 0 ]
for _ , metricID := range sortedMetricIDs [ 1 : ] {
if prevMetricID == metricID {
continue
}
dstMetricIDs = append ( dstMetricIDs , metricID )
prevMetricID = metricID
}
return dstMetricIDs
}
2019-09-20 16:46:47 +00:00
func getTagToMetricIDsRowsMerger ( ) * tagToMetricIDsRowsMerger {
v := tmmPool . Get ( )
if v == nil {
return & tagToMetricIDsRowsMerger { }
}
return v . ( * tagToMetricIDsRowsMerger )
}
func putTagToMetricIDsRowsMerger ( tmm * tagToMetricIDsRowsMerger ) {
2019-09-23 17:40:38 +00:00
tmm . Reset ( )
2019-09-20 16:46:47 +00:00
tmmPool . Put ( tmm )
}
var tmmPool sync . Pool