2024-04-25 22:19:58 +00:00
|
|
|
package logstorage
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2024-04-26 21:47:50 +00:00
|
|
|
"slices"
|
|
|
|
"strconv"
|
2024-04-25 22:19:58 +00:00
|
|
|
"strings"
|
2024-04-27 00:50:19 +00:00
|
|
|
"sync/atomic"
|
2024-04-26 21:47:50 +00:00
|
|
|
"unsafe"
|
2024-04-25 22:19:58 +00:00
|
|
|
|
2024-04-26 21:47:50 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
2024-04-25 22:19:58 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
2024-04-28 10:52:21 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
2024-04-25 22:19:58 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type pipe interface {
|
2024-04-26 21:47:50 +00:00
|
|
|
// String returns string representation of the pipe.
|
2024-04-25 22:19:58 +00:00
|
|
|
String() string
|
2024-04-26 21:47:50 +00:00
|
|
|
|
|
|
|
// newPipeProcessor must return new pipeProcessor for the given ppBase.
|
|
|
|
//
|
|
|
|
// workersCount is the number of goroutine workers, which will call writeBlock() method.
|
|
|
|
//
|
|
|
|
// If stopCh is closed, the returned pipeProcessor must stop performing CPU-intensive tasks which take more than a few milliseconds.
|
|
|
|
// It is OK to continue processing pipeProcessor calls if they take less than a few milliseconds.
|
|
|
|
//
|
2024-04-27 20:08:03 +00:00
|
|
|
// The returned pipeProcessor may call cancel() at any time in order to notify worker goroutines to stop sending new data to pipeProcessor.
|
2024-04-26 21:47:50 +00:00
|
|
|
newPipeProcessor(workersCount int, stopCh <-chan struct{}, cancel func(), ppBase pipeProcessor) pipeProcessor
|
|
|
|
}
|
|
|
|
|
|
|
|
// pipeProcessor must process a single pipe.
|
|
|
|
type pipeProcessor interface {
|
|
|
|
// writeBlock must write the given block of data to the given pipeProcessor.
|
|
|
|
//
|
2024-04-27 20:08:03 +00:00
|
|
|
// writeBlock is called concurrently from worker goroutines.
|
|
|
|
// The workerID is the id of the worker goroutine, which calls the writeBlock.
|
2024-04-26 21:47:50 +00:00
|
|
|
// It is in the range 0 ... workersCount-1 .
|
|
|
|
//
|
|
|
|
// It is forbidden to hold references to columns after returning from writeBlock, since the caller re-uses columns.
|
2024-04-27 20:08:03 +00:00
|
|
|
//
|
|
|
|
// If any error occurs at writeBlock, then cancel() must be called by pipeProcessor in order to notify worker goroutines
|
|
|
|
// to stop sending new data. The occurred error must be returned from flush().
|
|
|
|
//
|
|
|
|
// cancel() may be called also when the pipeProcessor decides to stop accepting new data, even if there is no any error.
|
2024-04-26 21:47:50 +00:00
|
|
|
writeBlock(workerID uint, timestamps []int64, columns []BlockColumn)
|
|
|
|
|
|
|
|
// flush must flush all the data accumulated in the pipeProcessor to the base pipeProcessor.
|
|
|
|
//
|
2024-04-27 20:08:03 +00:00
|
|
|
// flush is called after all the worker goroutines are stopped.
|
|
|
|
//
|
|
|
|
// It is guaranteed that flush() is called for every pipeProcessor returned from pipe.newPipeProcessor().
|
|
|
|
flush() error
|
2024-04-26 21:47:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type defaultPipeProcessor func(workerID uint, timestamps []int64, columns []BlockColumn)
|
|
|
|
|
|
|
|
func newDefaultPipeProcessor(writeBlock func(workerID uint, timestamps []int64, columns []BlockColumn)) pipeProcessor {
|
|
|
|
return defaultPipeProcessor(writeBlock)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (dpp defaultPipeProcessor) writeBlock(workerID uint, timestamps []int64, columns []BlockColumn) {
|
|
|
|
dpp(workerID, timestamps, columns)
|
|
|
|
}
|
|
|
|
|
2024-04-27 20:08:03 +00:00
|
|
|
func (dpp defaultPipeProcessor) flush() error {
|
|
|
|
return nil
|
2024-04-25 22:19:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func parsePipes(lex *lexer) ([]pipe, error) {
|
|
|
|
var pipes []pipe
|
2024-04-26 23:53:32 +00:00
|
|
|
for !lex.isKeyword(")", "") {
|
2024-04-25 22:19:58 +00:00
|
|
|
if !lex.isKeyword("|") {
|
|
|
|
return nil, fmt.Errorf("expecting '|'")
|
|
|
|
}
|
|
|
|
if !lex.mustNextToken() {
|
|
|
|
return nil, fmt.Errorf("missing token after '|'")
|
|
|
|
}
|
|
|
|
switch {
|
|
|
|
case lex.isKeyword("fields"):
|
|
|
|
fp, err := parseFieldsPipe(lex)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("cannot parse 'fields' pipe: %w", err)
|
|
|
|
}
|
|
|
|
pipes = append(pipes, fp)
|
|
|
|
case lex.isKeyword("stats"):
|
|
|
|
sp, err := parseStatsPipe(lex)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("cannot parse 'stats' pipe: %w", err)
|
|
|
|
}
|
|
|
|
pipes = append(pipes, sp)
|
2024-04-27 00:50:19 +00:00
|
|
|
case lex.isKeyword("head"):
|
|
|
|
hp, err := parseHeadPipe(lex)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("cannot parse 'head' pipe: %w", err)
|
|
|
|
}
|
|
|
|
pipes = append(pipes, hp)
|
2024-04-27 01:14:00 +00:00
|
|
|
case lex.isKeyword("skip"):
|
|
|
|
sp, err := parseSkipPipe(lex)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("cannot parse 'skip' pipe: %w", err)
|
|
|
|
}
|
|
|
|
pipes = append(pipes, sp)
|
2024-04-25 22:19:58 +00:00
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("unexpected pipe %q", lex.token)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return pipes, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type fieldsPipe struct {
|
|
|
|
// fields contains list of fields to fetch
|
|
|
|
fields []string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fp *fieldsPipe) String() string {
|
|
|
|
if len(fp.fields) == 0 {
|
|
|
|
logger.Panicf("BUG: fieldsPipe must contain at least a single field")
|
|
|
|
}
|
|
|
|
return "fields " + fieldNamesString(fp.fields)
|
|
|
|
}
|
|
|
|
|
2024-04-27 20:08:03 +00:00
|
|
|
func (fp *fieldsPipe) newPipeProcessor(_ int, _ <-chan struct{}, _ func(), ppBase pipeProcessor) pipeProcessor {
|
2024-04-26 21:47:50 +00:00
|
|
|
return &fieldsPipeProcessor{
|
|
|
|
fp: fp,
|
|
|
|
ppBase: ppBase,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type fieldsPipeProcessor struct {
|
|
|
|
fp *fieldsPipe
|
|
|
|
ppBase pipeProcessor
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fpp *fieldsPipeProcessor) writeBlock(workerID uint, timestamps []int64, columns []BlockColumn) {
|
|
|
|
if slices.Contains(fpp.fp.fields, "*") || areSameBlockColumns(columns, fpp.fp.fields) {
|
|
|
|
// Fast path - there is no need in additional transformations before writing the block to ppBase.
|
|
|
|
fpp.ppBase.writeBlock(workerID, timestamps, columns)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Slow path - construct columns for fpp.fp.fields before writing them to ppBase.
|
|
|
|
brs := getBlockRows()
|
|
|
|
cs := brs.cs
|
|
|
|
for _, f := range fpp.fp.fields {
|
|
|
|
values := getValuesForBlockColumn(columns, f, len(timestamps))
|
|
|
|
cs = append(cs, BlockColumn{
|
|
|
|
Name: f,
|
|
|
|
Values: values,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
fpp.ppBase.writeBlock(workerID, timestamps, cs)
|
|
|
|
brs.cs = cs
|
|
|
|
putBlockRows(brs)
|
|
|
|
}
|
|
|
|
|
2024-04-27 20:08:03 +00:00
|
|
|
func (fpp *fieldsPipeProcessor) flush() error {
|
|
|
|
return nil
|
2024-04-26 21:47:50 +00:00
|
|
|
}
|
|
|
|
|
2024-04-25 22:19:58 +00:00
|
|
|
func parseFieldsPipe(lex *lexer) (*fieldsPipe, error) {
|
|
|
|
var fields []string
|
|
|
|
for {
|
|
|
|
if !lex.mustNextToken() {
|
|
|
|
return nil, fmt.Errorf("missing field name")
|
|
|
|
}
|
|
|
|
if lex.isKeyword(",") {
|
|
|
|
return nil, fmt.Errorf("unexpected ','; expecting field name")
|
|
|
|
}
|
2024-04-27 00:50:19 +00:00
|
|
|
field, err := parseFieldName(lex)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("cannot parse field name: %w", err)
|
|
|
|
}
|
2024-04-25 22:19:58 +00:00
|
|
|
fields = append(fields, field)
|
|
|
|
switch {
|
2024-04-26 23:53:32 +00:00
|
|
|
case lex.isKeyword("|", ")", ""):
|
2024-04-25 22:19:58 +00:00
|
|
|
fp := &fieldsPipe{
|
|
|
|
fields: fields,
|
|
|
|
}
|
|
|
|
return fp, nil
|
|
|
|
case lex.isKeyword(","):
|
|
|
|
default:
|
2024-04-26 23:53:32 +00:00
|
|
|
return nil, fmt.Errorf("unexpected token: %q; expecting ',', '|' or ')'", lex.token)
|
2024-04-25 22:19:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type statsPipe struct {
|
|
|
|
byFields []string
|
|
|
|
funcs []statsFunc
|
|
|
|
}
|
|
|
|
|
|
|
|
type statsFunc interface {
|
|
|
|
// String returns string representation of statsFunc
|
|
|
|
String() string
|
|
|
|
|
|
|
|
// neededFields returns the needed fields for calculating the given stats
|
|
|
|
neededFields() []string
|
2024-04-26 21:47:50 +00:00
|
|
|
|
|
|
|
// newStatsFuncProcessor must create new statsFuncProcessor for calculating stats for the given statsFunc.
|
2024-04-28 10:52:21 +00:00
|
|
|
//
|
|
|
|
// It also must return the size in bytes of the returned statsFuncProcessor.
|
|
|
|
newStatsFuncProcessor() (statsFuncProcessor, int)
|
2024-04-26 21:47:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// statsFuncProcessor must process stats for some statsFunc.
|
|
|
|
//
|
|
|
|
// All the statsFuncProcessor methods are called from a single goroutine at a time,
|
|
|
|
// so there is no need in the internal synchronization.
|
|
|
|
type statsFuncProcessor interface {
|
|
|
|
// updateStatsForAllRows must update statsFuncProcessor stats from all the rows.
|
2024-04-28 10:52:21 +00:00
|
|
|
//
|
|
|
|
// It must return the increase of internal state size in bytes for the statsFuncProcessor.
|
|
|
|
updateStatsForAllRows(timestamps []int64, columns []BlockColumn) int
|
2024-04-26 21:47:50 +00:00
|
|
|
|
|
|
|
// updateStatsForRow must update statsFuncProcessor stats from the row at rowIndex.
|
2024-04-28 10:52:21 +00:00
|
|
|
//
|
|
|
|
// It must return the increase of internal state size in bytes for the statsFuncProcessor.
|
|
|
|
updateStatsForRow(timestamps []int64, columns []BlockColumn, rowIndex int) int
|
2024-04-26 21:47:50 +00:00
|
|
|
|
|
|
|
// mergeState must merge sfp state into statsFuncProcessor state.
|
|
|
|
mergeState(sfp statsFuncProcessor)
|
|
|
|
|
|
|
|
// finalizeStats must return the collected stats from statsFuncProcessor.
|
|
|
|
finalizeStats() (name, value string)
|
2024-04-25 22:19:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (sp *statsPipe) String() string {
|
|
|
|
s := "stats "
|
|
|
|
if len(sp.byFields) > 0 {
|
|
|
|
s += "by (" + fieldNamesString(sp.byFields) + ") "
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(sp.funcs) == 0 {
|
|
|
|
logger.Panicf("BUG: statsPipe must contain at least a single statsFunc")
|
|
|
|
}
|
|
|
|
a := make([]string, len(sp.funcs))
|
|
|
|
for i, f := range sp.funcs {
|
|
|
|
a[i] = f.String()
|
|
|
|
}
|
|
|
|
s += strings.Join(a, ", ")
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
2024-04-28 10:52:21 +00:00
|
|
|
const stateSizeBudgetChunk = 1 << 20
|
|
|
|
|
2024-04-26 21:47:50 +00:00
|
|
|
func (sp *statsPipe) newPipeProcessor(workersCount int, stopCh <-chan struct{}, cancel func(), ppBase pipeProcessor) pipeProcessor {
|
2024-04-28 10:52:21 +00:00
|
|
|
maxStateSize := int64(float64(memory.Allowed()) * 0.3)
|
|
|
|
|
2024-04-26 21:47:50 +00:00
|
|
|
shards := make([]statsPipeProcessorShard, workersCount)
|
|
|
|
for i := range shards {
|
|
|
|
shard := &shards[i]
|
|
|
|
shard.m = make(map[string]*statsPipeGroup)
|
|
|
|
shard.funcs = sp.funcs
|
2024-04-28 10:52:21 +00:00
|
|
|
shard.stateSizeBudget = stateSizeBudgetChunk
|
|
|
|
maxStateSize -= stateSizeBudgetChunk
|
2024-04-26 21:47:50 +00:00
|
|
|
}
|
|
|
|
|
2024-04-28 10:52:21 +00:00
|
|
|
spp := &statsPipeProcessor{
|
2024-04-26 21:47:50 +00:00
|
|
|
sp: sp,
|
|
|
|
stopCh: stopCh,
|
|
|
|
cancel: cancel,
|
|
|
|
ppBase: ppBase,
|
|
|
|
|
|
|
|
shards: shards,
|
2024-04-28 10:52:21 +00:00
|
|
|
|
|
|
|
maxStateSize: maxStateSize,
|
2024-04-26 21:47:50 +00:00
|
|
|
}
|
2024-04-28 10:52:21 +00:00
|
|
|
spp.stateSizeBudget.Store(maxStateSize)
|
|
|
|
|
|
|
|
return spp
|
2024-04-26 21:47:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type statsPipeProcessor struct {
|
|
|
|
sp *statsPipe
|
|
|
|
stopCh <-chan struct{}
|
|
|
|
cancel func()
|
|
|
|
ppBase pipeProcessor
|
|
|
|
|
|
|
|
shards []statsPipeProcessorShard
|
2024-04-28 10:52:21 +00:00
|
|
|
|
|
|
|
maxStateSize int64
|
|
|
|
stateSizeBudget atomic.Int64
|
2024-04-26 21:47:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type statsPipeProcessorShard struct {
|
|
|
|
statsPipeProcessorShardNopad
|
|
|
|
|
2024-04-26 23:53:32 +00:00
|
|
|
// The padding prevents false sharing on widespread platforms with 128 mod (cache line size) = 0 .
|
2024-04-26 21:47:50 +00:00
|
|
|
_ [128 - unsafe.Sizeof(statsPipeProcessorShardNopad{})%128]byte
|
|
|
|
}
|
|
|
|
|
|
|
|
type statsPipeProcessorShardNopad struct {
|
|
|
|
m map[string]*statsPipeGroup
|
|
|
|
funcs []statsFunc
|
|
|
|
|
|
|
|
columnIdxs []int
|
|
|
|
keyBuf []byte
|
2024-04-28 10:52:21 +00:00
|
|
|
|
|
|
|
stateSizeBudget int
|
2024-04-26 21:47:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (shard *statsPipeProcessorShard) getStatsPipeGroup(key []byte) *statsPipeGroup {
|
2024-04-28 20:15:27 +00:00
|
|
|
spg := shard.m[string(key)]
|
|
|
|
if spg == nil {
|
|
|
|
sfps := make([]statsFuncProcessor, len(shard.funcs))
|
|
|
|
for i, f := range shard.funcs {
|
|
|
|
sfp, stateSize := f.newStatsFuncProcessor()
|
|
|
|
sfps[i] = sfp
|
|
|
|
shard.stateSizeBudget -= stateSize
|
|
|
|
}
|
|
|
|
spg = &statsPipeGroup{
|
|
|
|
sfps: sfps,
|
|
|
|
}
|
|
|
|
shard.m[string(key)] = spg
|
|
|
|
shard.stateSizeBudget -= len(key) + int(unsafe.Sizeof("")+unsafe.Sizeof(spg)+unsafe.Sizeof(sfps[0])*uintptr(len(sfps)))
|
2024-04-26 21:47:50 +00:00
|
|
|
}
|
|
|
|
return spg
|
|
|
|
}
|
|
|
|
|
|
|
|
type statsPipeGroup struct {
|
|
|
|
sfps []statsFuncProcessor
|
|
|
|
}
|
|
|
|
|
|
|
|
func (spp *statsPipeProcessor) writeBlock(workerID uint, timestamps []int64, columns []BlockColumn) {
|
|
|
|
shard := &spp.shards[workerID]
|
|
|
|
|
2024-04-28 10:52:21 +00:00
|
|
|
for shard.stateSizeBudget < 0 {
|
|
|
|
// steal some budget for the state size from the global budget.
|
|
|
|
remaining := spp.stateSizeBudget.Add(-stateSizeBudgetChunk)
|
|
|
|
if remaining < 0 {
|
|
|
|
// The state size is too big. Stop processing data in order to avoid OOM crash.
|
|
|
|
if remaining+stateSizeBudgetChunk >= 0 {
|
|
|
|
// Notify worker goroutines to stop calling writeBlock() in order to save CPU time.
|
|
|
|
spp.cancel()
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
shard.stateSizeBudget += stateSizeBudgetChunk
|
|
|
|
}
|
|
|
|
|
2024-04-26 21:47:50 +00:00
|
|
|
if len(spp.sp.byFields) == 0 {
|
|
|
|
// Fast path - pass all the rows to a single group
|
|
|
|
spg := shard.getStatsPipeGroup(nil)
|
|
|
|
for _, sfp := range spg.sfps {
|
2024-04-28 10:52:21 +00:00
|
|
|
shard.stateSizeBudget -= sfp.updateStatsForAllRows(timestamps, columns)
|
2024-04-26 21:47:50 +00:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Slow path - update per-row stats
|
|
|
|
|
|
|
|
// Pre-calculate column indexes for byFields in order to speed up building group key in the loop below.
|
2024-04-27 02:26:15 +00:00
|
|
|
shard.columnIdxs = appendBlockColumnIndexes(shard.columnIdxs[:0], columns, spp.sp.byFields)
|
|
|
|
columnIdxs := shard.columnIdxs
|
2024-04-26 21:47:50 +00:00
|
|
|
|
|
|
|
keyBuf := shard.keyBuf
|
2024-04-28 20:42:50 +00:00
|
|
|
var spg *statsPipeGroup
|
2024-04-26 21:47:50 +00:00
|
|
|
for i := range timestamps {
|
2024-04-28 20:42:50 +00:00
|
|
|
// verify whether the key for 'by (...)' fields equals the previous key
|
|
|
|
sameValue := spg != nil
|
2024-04-26 21:47:50 +00:00
|
|
|
for _, idx := range columnIdxs {
|
2024-04-28 20:42:50 +00:00
|
|
|
if idx < 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
values := columns[idx].Values
|
|
|
|
if i <= 0 || values[i-1] != values[i] {
|
|
|
|
sameValue = false
|
|
|
|
break
|
2024-04-26 21:47:50 +00:00
|
|
|
}
|
|
|
|
}
|
2024-04-28 20:42:50 +00:00
|
|
|
if !sameValue {
|
|
|
|
// Construct new key for the 'by (...)' fields
|
|
|
|
keyBuf = keyBuf[:0]
|
|
|
|
for _, idx := range columnIdxs {
|
|
|
|
v := ""
|
|
|
|
if idx >= 0 {
|
|
|
|
v = columns[idx].Values[i]
|
|
|
|
}
|
|
|
|
keyBuf = encoding.MarshalBytes(keyBuf, bytesutil.ToUnsafeBytes(v))
|
|
|
|
}
|
|
|
|
spg = shard.getStatsPipeGroup(keyBuf)
|
|
|
|
}
|
2024-04-26 21:47:50 +00:00
|
|
|
for _, sfp := range spg.sfps {
|
2024-04-28 10:52:21 +00:00
|
|
|
shard.stateSizeBudget -= sfp.updateStatsForRow(timestamps, columns, i)
|
2024-04-26 21:47:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
shard.keyBuf = keyBuf
|
|
|
|
}
|
|
|
|
|
2024-04-27 20:08:03 +00:00
|
|
|
func (spp *statsPipeProcessor) flush() error {
|
2024-04-28 10:52:21 +00:00
|
|
|
if n := spp.stateSizeBudget.Load(); n <= 0 {
|
|
|
|
return fmt.Errorf("cannot calculate [%s], since it requires more than %dMB of memory", spp.sp.String(), spp.maxStateSize/(1<<20))
|
|
|
|
}
|
|
|
|
|
2024-04-26 21:47:50 +00:00
|
|
|
// Merge states across shards
|
|
|
|
shards := spp.shards
|
|
|
|
m := shards[0].m
|
|
|
|
shards = shards[1:]
|
|
|
|
for i := range shards {
|
|
|
|
shard := &shards[i]
|
|
|
|
for key, spg := range shard.m {
|
|
|
|
// shard.m may be quite big, so this loop can take a lot of time and CPU.
|
|
|
|
// Stop processing data as soon as stopCh is closed without wasting CPU time.
|
|
|
|
select {
|
|
|
|
case <-spp.stopCh:
|
2024-04-27 20:08:03 +00:00
|
|
|
return nil
|
2024-04-26 21:47:50 +00:00
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
spgBase := m[key]
|
|
|
|
if spgBase == nil {
|
|
|
|
m[key] = spg
|
|
|
|
} else {
|
|
|
|
for i, sfp := range spgBase.sfps {
|
|
|
|
sfp.mergeState(spg.sfps[i])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write per-group states to ppBase
|
|
|
|
byFields := spp.sp.byFields
|
2024-04-27 01:31:19 +00:00
|
|
|
if len(byFields) == 0 && len(m) == 0 {
|
|
|
|
// Special case - zero matching rows.
|
|
|
|
_ = shards[0].getStatsPipeGroup(nil)
|
|
|
|
m = shards[0].m
|
|
|
|
}
|
|
|
|
|
2024-04-26 21:47:50 +00:00
|
|
|
var values []string
|
|
|
|
var columns []BlockColumn
|
|
|
|
for key, spg := range m {
|
|
|
|
// m may be quite big, so this loop can take a lot of time and CPU.
|
|
|
|
// Stop processing data as soon as stopCh is closed without wasting CPU time.
|
|
|
|
select {
|
|
|
|
case <-spp.stopCh:
|
2024-04-27 20:08:03 +00:00
|
|
|
return nil
|
2024-04-26 21:47:50 +00:00
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unmarshal values for byFields from key.
|
|
|
|
values = values[:0]
|
|
|
|
keyBuf := bytesutil.ToUnsafeBytes(key)
|
|
|
|
for len(keyBuf) > 0 {
|
|
|
|
tail, v, err := encoding.UnmarshalBytes(keyBuf)
|
|
|
|
if err != nil {
|
|
|
|
logger.Panicf("BUG: cannot unmarshal value from keyBuf=%q: %w", keyBuf, err)
|
|
|
|
}
|
|
|
|
values = append(values, bytesutil.ToUnsafeString(v))
|
|
|
|
keyBuf = tail
|
|
|
|
}
|
|
|
|
if len(values) != len(byFields) {
|
|
|
|
logger.Panicf("BUG: unexpected number of values decoded from keyBuf; got %d; want %d", len(values), len(byFields))
|
|
|
|
}
|
|
|
|
|
|
|
|
// construct columns for byFields
|
|
|
|
columns = columns[:0]
|
|
|
|
for i, f := range byFields {
|
|
|
|
columns = append(columns, BlockColumn{
|
|
|
|
Name: f,
|
|
|
|
Values: values[i : i+1],
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// construct columns for stats functions
|
|
|
|
for _, sfp := range spg.sfps {
|
|
|
|
name, value := sfp.finalizeStats()
|
|
|
|
columns = append(columns, BlockColumn{
|
|
|
|
Name: name,
|
|
|
|
Values: []string{value},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
spp.ppBase.writeBlock(0, []int64{0}, columns)
|
|
|
|
}
|
2024-04-27 20:08:03 +00:00
|
|
|
|
|
|
|
return nil
|
2024-04-26 21:47:50 +00:00
|
|
|
}
|
|
|
|
|
2024-04-25 22:19:58 +00:00
|
|
|
func (sp *statsPipe) neededFields() []string {
|
|
|
|
var neededFields []string
|
|
|
|
m := make(map[string]struct{})
|
|
|
|
updateNeededFields := func(fields []string) {
|
|
|
|
for _, field := range fields {
|
|
|
|
if _, ok := m[field]; !ok {
|
|
|
|
m[field] = struct{}{}
|
|
|
|
neededFields = append(neededFields, field)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
updateNeededFields(sp.byFields)
|
|
|
|
|
|
|
|
for _, f := range sp.funcs {
|
|
|
|
fields := f.neededFields()
|
|
|
|
updateNeededFields(fields)
|
|
|
|
}
|
|
|
|
|
|
|
|
return neededFields
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseStatsPipe(lex *lexer) (*statsPipe, error) {
|
|
|
|
if !lex.mustNextToken() {
|
|
|
|
return nil, fmt.Errorf("missing stats config")
|
|
|
|
}
|
|
|
|
|
|
|
|
var sp statsPipe
|
|
|
|
if lex.isKeyword("by") {
|
|
|
|
lex.nextToken()
|
|
|
|
fields, err := parseFieldNamesInParens(lex)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("cannot parse 'by': %w", err)
|
|
|
|
}
|
|
|
|
sp.byFields = fields
|
|
|
|
}
|
|
|
|
|
|
|
|
var funcs []statsFunc
|
|
|
|
for {
|
|
|
|
sf, err := parseStatsFunc(lex)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
funcs = append(funcs, sf)
|
2024-04-26 23:53:32 +00:00
|
|
|
if lex.isKeyword("|", ")", "") {
|
2024-04-25 22:19:58 +00:00
|
|
|
sp.funcs = funcs
|
|
|
|
return &sp, nil
|
|
|
|
}
|
|
|
|
if !lex.isKeyword(",") {
|
2024-04-26 23:53:32 +00:00
|
|
|
return nil, fmt.Errorf("unexpected token %q; want ',', '|' or ')'", lex.token)
|
2024-04-25 22:19:58 +00:00
|
|
|
}
|
|
|
|
lex.nextToken()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseStatsFunc(lex *lexer) (statsFunc, error) {
|
|
|
|
switch {
|
|
|
|
case lex.isKeyword("count"):
|
|
|
|
sfc, err := parseStatsFuncCount(lex)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("cannot parse 'count' func: %w", err)
|
|
|
|
}
|
|
|
|
return sfc, nil
|
2024-04-27 02:26:15 +00:00
|
|
|
case lex.isKeyword("uniq"):
|
|
|
|
sfu, err := parseStatsFuncUniq(lex)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("cannot parse 'uniq' func: %w", err)
|
|
|
|
}
|
|
|
|
return sfu, nil
|
2024-04-25 22:19:58 +00:00
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("unknown stats func %q", lex.token)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type statsFuncCount struct {
|
|
|
|
fields []string
|
|
|
|
resultName string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sfc *statsFuncCount) String() string {
|
2024-04-26 22:52:15 +00:00
|
|
|
return "count(" + fieldNamesString(sfc.fields) + ") as " + quoteTokenIfNeeded(sfc.resultName)
|
2024-04-25 22:19:58 +00:00
|
|
|
}
|
|
|
|
|
2024-04-27 02:26:15 +00:00
|
|
|
func (sfc *statsFuncCount) neededFields() []string {
|
|
|
|
return getFieldsIgnoreStar(sfc.fields)
|
|
|
|
}
|
|
|
|
|
2024-04-28 10:52:21 +00:00
|
|
|
func (sfc *statsFuncCount) newStatsFuncProcessor() (statsFuncProcessor, int) {
|
|
|
|
sfcp := &statsFuncCountProcessor{
|
2024-04-26 21:47:50 +00:00
|
|
|
sfc: sfc,
|
|
|
|
}
|
2024-04-28 10:52:21 +00:00
|
|
|
return sfcp, int(unsafe.Sizeof(*sfcp))
|
2024-04-26 21:47:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type statsFuncCountProcessor struct {
|
2024-04-27 02:26:15 +00:00
|
|
|
sfc *statsFuncCount
|
|
|
|
|
2024-04-26 21:47:50 +00:00
|
|
|
rowsCount uint64
|
|
|
|
}
|
|
|
|
|
2024-04-28 10:52:21 +00:00
|
|
|
func (sfcp *statsFuncCountProcessor) updateStatsForAllRows(timestamps []int64, columns []BlockColumn) int {
|
2024-04-26 22:52:15 +00:00
|
|
|
fields := sfcp.sfc.fields
|
|
|
|
if len(fields) == 0 || slices.Contains(fields, "*") {
|
|
|
|
// Fast path - count all the columns.
|
|
|
|
sfcp.rowsCount += uint64(len(timestamps))
|
2024-04-28 10:52:21 +00:00
|
|
|
return 0
|
2024-04-26 22:52:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Slow path - count rows containing at least a single non-empty value for the fields enumerated inside count().
|
|
|
|
bm := getFilterBitmap(len(timestamps))
|
|
|
|
bm.setBits()
|
|
|
|
for _, f := range fields {
|
|
|
|
if idx := getBlockColumnIndex(columns, f); idx >= 0 {
|
|
|
|
values := columns[idx].Values
|
|
|
|
bm.forEachSetBit(func(i int) bool {
|
|
|
|
return values[i] == ""
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
emptyValues := 0
|
|
|
|
bm.forEachSetBit(func(i int) bool {
|
|
|
|
emptyValues++
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
|
|
|
|
sfcp.rowsCount += uint64(len(timestamps) - emptyValues)
|
2024-04-28 10:52:21 +00:00
|
|
|
return 0
|
2024-04-26 21:47:50 +00:00
|
|
|
}
|
|
|
|
|
2024-04-28 10:52:21 +00:00
|
|
|
func (sfcp *statsFuncCountProcessor) updateStatsForRow(_ []int64, columns []BlockColumn, rowIdx int) int {
|
2024-04-26 22:52:15 +00:00
|
|
|
fields := sfcp.sfc.fields
|
|
|
|
if len(fields) == 0 || slices.Contains(fields, "*") {
|
|
|
|
// Fast path - count the given column
|
|
|
|
sfcp.rowsCount++
|
2024-04-28 10:52:21 +00:00
|
|
|
return 0
|
2024-04-26 22:52:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Slow path - count the row at rowIdx if at least a single field enumerated inside count() is non-empty
|
|
|
|
for _, f := range fields {
|
|
|
|
if idx := getBlockColumnIndex(columns, f); idx >= 0 && columns[idx].Values[rowIdx] != "" {
|
|
|
|
sfcp.rowsCount++
|
2024-04-28 10:52:21 +00:00
|
|
|
return 0
|
2024-04-26 22:52:15 +00:00
|
|
|
}
|
|
|
|
}
|
2024-04-28 10:52:21 +00:00
|
|
|
return 0
|
2024-04-26 21:47:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (sfcp *statsFuncCountProcessor) mergeState(sfp statsFuncProcessor) {
|
|
|
|
src := sfp.(*statsFuncCountProcessor)
|
|
|
|
sfcp.rowsCount += src.rowsCount
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sfcp *statsFuncCountProcessor) finalizeStats() (string, string) {
|
|
|
|
value := strconv.FormatUint(sfcp.rowsCount, 10)
|
|
|
|
return sfcp.sfc.resultName, value
|
|
|
|
}
|
|
|
|
|
2024-04-27 02:26:15 +00:00
|
|
|
type statsFuncUniq struct {
|
|
|
|
fields []string
|
|
|
|
resultName string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sfu *statsFuncUniq) String() string {
|
|
|
|
return "uniq(" + fieldNamesString(sfu.fields) + ") as " + quoteTokenIfNeeded(sfu.resultName)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sfu *statsFuncUniq) neededFields() []string {
|
|
|
|
return sfu.fields
|
|
|
|
}
|
|
|
|
|
2024-04-28 10:52:21 +00:00
|
|
|
func (sfu *statsFuncUniq) newStatsFuncProcessor() (statsFuncProcessor, int) {
|
|
|
|
sfup := &statsFuncUniqProcessor{
|
2024-04-27 02:26:15 +00:00
|
|
|
sfu: sfu,
|
|
|
|
|
|
|
|
m: make(map[string]struct{}),
|
|
|
|
}
|
2024-04-28 10:52:21 +00:00
|
|
|
return sfup, int(unsafe.Sizeof(*sfup))
|
2024-04-27 02:26:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type statsFuncUniqProcessor struct {
|
|
|
|
sfu *statsFuncUniq
|
|
|
|
|
|
|
|
m map[string]struct{}
|
|
|
|
|
|
|
|
columnIdxs []int
|
|
|
|
keyBuf []byte
|
|
|
|
}
|
|
|
|
|
2024-04-28 10:52:21 +00:00
|
|
|
func (sfup *statsFuncUniqProcessor) updateStatsForAllRows(timestamps []int64, columns []BlockColumn) int {
|
2024-04-27 02:26:15 +00:00
|
|
|
fields := sfup.sfu.fields
|
|
|
|
m := sfup.m
|
|
|
|
|
2024-04-28 10:52:21 +00:00
|
|
|
stateSizeIncrease := 0
|
2024-04-27 02:26:15 +00:00
|
|
|
if len(fields) == 1 {
|
|
|
|
// Fast path for a single column
|
|
|
|
if idx := getBlockColumnIndex(columns, fields[0]); idx >= 0 {
|
|
|
|
for _, v := range columns[idx].Values {
|
2024-04-27 02:43:38 +00:00
|
|
|
if v == "" {
|
|
|
|
// Do not count empty values
|
|
|
|
continue
|
|
|
|
}
|
2024-04-27 02:26:15 +00:00
|
|
|
if _, ok := m[v]; !ok {
|
|
|
|
vCopy := strings.Clone(v)
|
|
|
|
m[vCopy] = struct{}{}
|
2024-04-28 10:52:21 +00:00
|
|
|
stateSizeIncrease += len(vCopy) + int(unsafe.Sizeof(vCopy))
|
2024-04-27 02:26:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2024-04-28 10:52:21 +00:00
|
|
|
return stateSizeIncrease
|
2024-04-27 02:26:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Slow path for multiple columns.
|
|
|
|
|
|
|
|
// Pre-calculate column indexes for byFields in order to speed up building group key in the loop below.
|
|
|
|
sfup.columnIdxs = appendBlockColumnIndexes(sfup.columnIdxs[:0], columns, fields)
|
|
|
|
columnIdxs := sfup.columnIdxs
|
|
|
|
|
|
|
|
keyBuf := sfup.keyBuf
|
|
|
|
for i := range timestamps {
|
2024-04-27 02:43:38 +00:00
|
|
|
allEmptyValues := true
|
2024-04-27 02:26:15 +00:00
|
|
|
keyBuf = keyBuf[:0]
|
|
|
|
for _, idx := range columnIdxs {
|
|
|
|
v := ""
|
|
|
|
if idx >= 0 {
|
|
|
|
v = columns[idx].Values[i]
|
|
|
|
}
|
2024-04-27 02:43:38 +00:00
|
|
|
if v != "" {
|
|
|
|
allEmptyValues = false
|
|
|
|
}
|
2024-04-27 02:26:15 +00:00
|
|
|
keyBuf = encoding.MarshalBytes(keyBuf, bytesutil.ToUnsafeBytes(v))
|
|
|
|
}
|
2024-04-27 02:43:38 +00:00
|
|
|
if allEmptyValues {
|
|
|
|
// Do not count empty values
|
|
|
|
continue
|
|
|
|
}
|
2024-04-27 02:26:15 +00:00
|
|
|
if _, ok := m[string(keyBuf)]; !ok {
|
|
|
|
m[string(keyBuf)] = struct{}{}
|
2024-04-28 10:52:21 +00:00
|
|
|
stateSizeIncrease += len(keyBuf) + int(unsafe.Sizeof(""))
|
2024-04-27 02:26:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
sfup.keyBuf = keyBuf
|
2024-04-28 10:52:21 +00:00
|
|
|
return stateSizeIncrease
|
2024-04-27 02:26:15 +00:00
|
|
|
}
|
|
|
|
|
2024-04-28 10:52:21 +00:00
|
|
|
func (sfup *statsFuncUniqProcessor) updateStatsForRow(timestamps []int64, columns []BlockColumn, rowIdx int) int {
|
2024-04-27 02:26:15 +00:00
|
|
|
fields := sfup.sfu.fields
|
|
|
|
m := sfup.m
|
|
|
|
|
2024-04-28 10:52:21 +00:00
|
|
|
stateSizeIncrease := 0
|
2024-04-27 02:26:15 +00:00
|
|
|
if len(fields) == 1 {
|
|
|
|
// Fast path for a single column
|
|
|
|
if idx := getBlockColumnIndex(columns, fields[0]); idx >= 0 {
|
|
|
|
v := columns[idx].Values[rowIdx]
|
2024-04-27 02:43:38 +00:00
|
|
|
if v == "" {
|
|
|
|
// Do not count empty values
|
2024-04-28 10:52:21 +00:00
|
|
|
return stateSizeIncrease
|
2024-04-27 02:43:38 +00:00
|
|
|
}
|
2024-04-27 02:26:15 +00:00
|
|
|
if _, ok := m[v]; !ok {
|
|
|
|
vCopy := strings.Clone(v)
|
|
|
|
m[vCopy] = struct{}{}
|
2024-04-28 10:52:21 +00:00
|
|
|
stateSizeIncrease += len(vCopy) + int(unsafe.Sizeof(vCopy))
|
2024-04-27 02:26:15 +00:00
|
|
|
}
|
|
|
|
}
|
2024-04-28 10:52:21 +00:00
|
|
|
return stateSizeIncrease
|
2024-04-27 02:26:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Slow path for multiple columns.
|
2024-04-27 02:43:38 +00:00
|
|
|
allEmptyValues := true
|
2024-04-27 02:26:15 +00:00
|
|
|
keyBuf := sfup.keyBuf
|
|
|
|
for _, f := range fields {
|
|
|
|
v := ""
|
|
|
|
if idx := getBlockColumnIndex(columns, f); idx >= 0 {
|
|
|
|
v = columns[idx].Values[rowIdx]
|
|
|
|
}
|
2024-04-27 02:43:38 +00:00
|
|
|
if v != "" {
|
|
|
|
allEmptyValues = false
|
|
|
|
}
|
2024-04-27 02:26:15 +00:00
|
|
|
keyBuf = encoding.MarshalBytes(keyBuf, bytesutil.ToUnsafeBytes(v))
|
|
|
|
}
|
|
|
|
sfup.keyBuf = keyBuf
|
|
|
|
|
2024-04-27 02:43:38 +00:00
|
|
|
if allEmptyValues {
|
|
|
|
// Do not count empty values
|
2024-04-28 10:52:21 +00:00
|
|
|
return stateSizeIncrease
|
2024-04-27 02:43:38 +00:00
|
|
|
}
|
2024-04-27 02:26:15 +00:00
|
|
|
if _, ok := m[string(keyBuf)]; !ok {
|
|
|
|
m[string(keyBuf)] = struct{}{}
|
2024-04-28 10:52:21 +00:00
|
|
|
stateSizeIncrease += len(keyBuf) + int(unsafe.Sizeof(""))
|
2024-04-27 02:26:15 +00:00
|
|
|
}
|
2024-04-28 10:52:21 +00:00
|
|
|
return stateSizeIncrease
|
2024-04-27 02:26:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (sfup *statsFuncUniqProcessor) mergeState(sfp statsFuncProcessor) {
|
|
|
|
src := sfp.(*statsFuncUniqProcessor)
|
|
|
|
m := sfup.m
|
|
|
|
for k := range src.m {
|
|
|
|
m[k] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sfup *statsFuncUniqProcessor) finalizeStats() (string, string) {
|
|
|
|
n := uint64(len(sfup.m))
|
|
|
|
value := strconv.FormatUint(n, 10)
|
|
|
|
return sfup.sfu.resultName, value
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseStatsFuncUniq(lex *lexer) (*statsFuncUniq, error) {
|
2024-04-25 22:19:58 +00:00
|
|
|
lex.nextToken()
|
|
|
|
fields, err := parseFieldNamesInParens(lex)
|
|
|
|
if err != nil {
|
2024-04-27 02:26:15 +00:00
|
|
|
return nil, fmt.Errorf("cannot parse 'uniq' args: %w", err)
|
2024-04-25 22:19:58 +00:00
|
|
|
}
|
2024-04-27 02:26:15 +00:00
|
|
|
if len(fields) == 0 {
|
|
|
|
return nil, fmt.Errorf("'uniq' must contain at least a single arg")
|
2024-04-25 22:19:58 +00:00
|
|
|
}
|
2024-04-27 02:26:15 +00:00
|
|
|
resultName, err := parseResultName(lex)
|
2024-04-27 00:50:19 +00:00
|
|
|
if err != nil {
|
2024-04-27 02:26:15 +00:00
|
|
|
return nil, fmt.Errorf("cannot parse result name: %w", err)
|
|
|
|
}
|
|
|
|
sfu := &statsFuncUniq{
|
|
|
|
fields: fields,
|
|
|
|
resultName: resultName,
|
2024-04-27 00:50:19 +00:00
|
|
|
}
|
2024-04-27 02:26:15 +00:00
|
|
|
return sfu, nil
|
|
|
|
}
|
2024-04-25 22:19:58 +00:00
|
|
|
|
2024-04-27 02:26:15 +00:00
|
|
|
func parseStatsFuncCount(lex *lexer) (*statsFuncCount, error) {
|
|
|
|
lex.nextToken()
|
|
|
|
fields, err := parseFieldNamesInParens(lex)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("cannot parse 'count' args: %w", err)
|
|
|
|
}
|
|
|
|
resultName, err := parseResultName(lex)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("cannot parse result name: %w", err)
|
|
|
|
}
|
2024-04-25 22:19:58 +00:00
|
|
|
sfc := &statsFuncCount{
|
|
|
|
fields: fields,
|
|
|
|
resultName: resultName,
|
|
|
|
}
|
|
|
|
return sfc, nil
|
|
|
|
}
|
|
|
|
|
2024-04-27 02:26:15 +00:00
|
|
|
func parseResultName(lex *lexer) (string, error) {
|
|
|
|
if lex.isKeyword("as") {
|
|
|
|
if !lex.mustNextToken() {
|
|
|
|
return "", fmt.Errorf("missing token after 'as' keyword")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
resultName, err := parseFieldName(lex)
|
|
|
|
if err != nil {
|
|
|
|
return "", fmt.Errorf("cannot parse 'as' field name: %w", err)
|
|
|
|
}
|
|
|
|
return resultName, nil
|
|
|
|
}
|
|
|
|
|
2024-04-27 00:50:19 +00:00
|
|
|
type headPipe struct {
|
|
|
|
n uint64
|
|
|
|
}
|
|
|
|
|
|
|
|
func (hp *headPipe) String() string {
|
|
|
|
return fmt.Sprintf("head %d", hp.n)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (hp *headPipe) newPipeProcessor(_ int, _ <-chan struct{}, cancel func(), ppBase pipeProcessor) pipeProcessor {
|
2024-04-27 02:43:38 +00:00
|
|
|
if hp.n == 0 {
|
|
|
|
// Special case - notify the caller to stop writing data to the returned headPipeProcessor
|
|
|
|
cancel()
|
|
|
|
}
|
2024-04-27 00:50:19 +00:00
|
|
|
return &headPipeProcessor{
|
|
|
|
hp: hp,
|
|
|
|
cancel: cancel,
|
|
|
|
ppBase: ppBase,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type headPipeProcessor struct {
|
|
|
|
hp *headPipe
|
|
|
|
cancel func()
|
|
|
|
ppBase pipeProcessor
|
|
|
|
|
2024-04-28 10:52:21 +00:00
|
|
|
rowsProcessed atomic.Uint64
|
2024-04-27 00:50:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (hpp *headPipeProcessor) writeBlock(workerID uint, timestamps []int64, columns []BlockColumn) {
|
2024-04-28 10:52:21 +00:00
|
|
|
rowsProcessed := hpp.rowsProcessed.Add(uint64(len(timestamps)))
|
|
|
|
if rowsProcessed <= hpp.hp.n {
|
2024-04-27 00:50:19 +00:00
|
|
|
// Fast path - write all the rows to ppBase.
|
|
|
|
hpp.ppBase.writeBlock(workerID, timestamps, columns)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Slow path - overflow. Write the remaining rows if needed.
|
2024-04-28 10:52:21 +00:00
|
|
|
rowsProcessed -= uint64(len(timestamps))
|
|
|
|
if rowsProcessed >= hpp.hp.n {
|
2024-04-27 00:50:19 +00:00
|
|
|
// Nothing to write. There is no need in cancel() call, since it has been called by another goroutine.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write remaining rows.
|
2024-04-28 10:52:21 +00:00
|
|
|
rowsRemaining := hpp.hp.n - rowsProcessed
|
2024-04-27 00:50:19 +00:00
|
|
|
cs := make([]BlockColumn, len(columns))
|
|
|
|
for i, c := range columns {
|
|
|
|
cDst := &cs[i]
|
|
|
|
cDst.Name = c.Name
|
|
|
|
cDst.Values = c.Values[:rowsRemaining]
|
|
|
|
}
|
|
|
|
timestamps = timestamps[:rowsRemaining]
|
|
|
|
hpp.ppBase.writeBlock(workerID, timestamps, cs)
|
|
|
|
|
|
|
|
// Notify the caller that it should stop passing more data to writeBlock().
|
|
|
|
hpp.cancel()
|
|
|
|
}
|
|
|
|
|
2024-04-27 20:08:03 +00:00
|
|
|
func (hpp *headPipeProcessor) flush() error {
|
|
|
|
return nil
|
2024-04-27 00:50:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func parseHeadPipe(lex *lexer) (*headPipe, error) {
|
|
|
|
if !lex.mustNextToken() {
|
|
|
|
return nil, fmt.Errorf("missing the number of head rows to return")
|
|
|
|
}
|
2024-04-27 19:15:56 +00:00
|
|
|
n, err := parseUint(lex.token)
|
2024-04-27 00:50:19 +00:00
|
|
|
if err != nil {
|
2024-04-27 01:14:00 +00:00
|
|
|
return nil, fmt.Errorf("cannot parse the number of head rows to return %q: %w", lex.token, err)
|
2024-04-27 00:50:19 +00:00
|
|
|
}
|
|
|
|
lex.nextToken()
|
|
|
|
hp := &headPipe{
|
|
|
|
n: n,
|
|
|
|
}
|
|
|
|
return hp, nil
|
|
|
|
}
|
|
|
|
|
2024-04-27 01:14:00 +00:00
|
|
|
type skipPipe struct {
|
|
|
|
n uint64
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sp *skipPipe) String() string {
|
|
|
|
return fmt.Sprintf("skip %d", sp.n)
|
|
|
|
}
|
|
|
|
|
2024-04-27 20:08:03 +00:00
|
|
|
func (sp *skipPipe) newPipeProcessor(workersCount int, _ <-chan struct{}, _ func(), ppBase pipeProcessor) pipeProcessor {
|
2024-04-27 01:14:00 +00:00
|
|
|
return &skipPipeProcessor{
|
|
|
|
sp: sp,
|
|
|
|
ppBase: ppBase,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type skipPipeProcessor struct {
|
|
|
|
sp *skipPipe
|
|
|
|
ppBase pipeProcessor
|
|
|
|
|
2024-04-28 10:52:21 +00:00
|
|
|
rowsProcessed atomic.Uint64
|
2024-04-27 01:14:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (spp *skipPipeProcessor) writeBlock(workerID uint, timestamps []int64, columns []BlockColumn) {
|
2024-04-28 10:52:21 +00:00
|
|
|
rowsProcessed := spp.rowsProcessed.Add(uint64(len(timestamps)))
|
|
|
|
if rowsProcessed <= spp.sp.n {
|
2024-04-27 01:14:00 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-04-28 10:52:21 +00:00
|
|
|
rowsProcessed -= uint64(len(timestamps))
|
|
|
|
if rowsProcessed >= spp.sp.n {
|
2024-04-27 01:14:00 +00:00
|
|
|
spp.ppBase.writeBlock(workerID, timestamps, columns)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-04-28 10:52:21 +00:00
|
|
|
rowsRemaining := spp.sp.n - rowsProcessed
|
2024-04-27 01:14:00 +00:00
|
|
|
cs := make([]BlockColumn, len(columns))
|
|
|
|
for i, c := range columns {
|
|
|
|
cDst := &cs[i]
|
|
|
|
cDst.Name = c.Name
|
|
|
|
cDst.Values = c.Values[rowsRemaining:]
|
|
|
|
}
|
|
|
|
timestamps = timestamps[rowsRemaining:]
|
|
|
|
spp.ppBase.writeBlock(workerID, timestamps, cs)
|
|
|
|
}
|
|
|
|
|
2024-04-27 20:08:03 +00:00
|
|
|
func (spp *skipPipeProcessor) flush() error {
|
|
|
|
return nil
|
2024-04-27 01:14:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func parseSkipPipe(lex *lexer) (*skipPipe, error) {
|
|
|
|
if !lex.mustNextToken() {
|
|
|
|
return nil, fmt.Errorf("missing the number of rows to skip")
|
|
|
|
}
|
2024-04-27 19:15:56 +00:00
|
|
|
n, err := parseUint(lex.token)
|
2024-04-27 01:14:00 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("cannot parse the number of rows to skip %q: %w", lex.token, err)
|
|
|
|
}
|
|
|
|
lex.nextToken()
|
|
|
|
sp := &skipPipe{
|
|
|
|
n: n,
|
|
|
|
}
|
|
|
|
return sp, nil
|
|
|
|
}
|
|
|
|
|
2024-04-25 22:19:58 +00:00
|
|
|
func parseFieldNamesInParens(lex *lexer) ([]string, error) {
|
|
|
|
if !lex.isKeyword("(") {
|
|
|
|
return nil, fmt.Errorf("missing `(`")
|
|
|
|
}
|
|
|
|
var fields []string
|
|
|
|
for {
|
|
|
|
if !lex.mustNextToken() {
|
|
|
|
return nil, fmt.Errorf("missing field name or ')'")
|
|
|
|
}
|
|
|
|
if lex.isKeyword(")") {
|
|
|
|
lex.nextToken()
|
|
|
|
return fields, nil
|
|
|
|
}
|
|
|
|
if lex.isKeyword(",") {
|
|
|
|
return nil, fmt.Errorf("unexpected `,`")
|
|
|
|
}
|
2024-04-27 00:50:19 +00:00
|
|
|
field, err := parseFieldName(lex)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("cannot parse field name: %w", err)
|
|
|
|
}
|
2024-04-25 22:19:58 +00:00
|
|
|
fields = append(fields, field)
|
|
|
|
switch {
|
|
|
|
case lex.isKeyword(")"):
|
|
|
|
lex.nextToken()
|
|
|
|
return fields, nil
|
|
|
|
case lex.isKeyword(","):
|
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("unexpected token: %q; expecting ',' or ')'", lex.token)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-27 00:50:19 +00:00
|
|
|
func parseFieldName(lex *lexer) (string, error) {
|
|
|
|
if lex.isKeyword(",", "(", ")", "[", "]", "|", "") {
|
|
|
|
return "", fmt.Errorf("unexpected token: %q", lex.token)
|
2024-04-25 22:19:58 +00:00
|
|
|
}
|
2024-04-27 00:50:19 +00:00
|
|
|
token := getCompoundToken(lex)
|
|
|
|
return token, nil
|
2024-04-25 22:19:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func fieldNamesString(fields []string) string {
|
|
|
|
a := make([]string, len(fields))
|
|
|
|
for i, f := range fields {
|
|
|
|
if f != "*" {
|
|
|
|
f = quoteTokenIfNeeded(f)
|
|
|
|
}
|
|
|
|
a[i] = f
|
|
|
|
}
|
|
|
|
return strings.Join(a, ", ")
|
|
|
|
}
|
|
|
|
|
|
|
|
func getFieldsIgnoreStar(fields []string) []string {
|
|
|
|
var result []string
|
|
|
|
for _, f := range fields {
|
|
|
|
if f != "*" {
|
|
|
|
result = append(result, f)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
2024-04-27 02:26:15 +00:00
|
|
|
|
|
|
|
func appendBlockColumnIndexes(dst []int, columns []BlockColumn, fields []string) []int {
|
|
|
|
for _, f := range fields {
|
|
|
|
idx := getBlockColumnIndex(columns, f)
|
|
|
|
dst = append(dst, idx)
|
|
|
|
}
|
|
|
|
return dst
|
|
|
|
}
|