mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-12-31 15:06:26 +00:00
wip
This commit is contained in:
parent
f9d0b21bb9
commit
75914210ec
4 changed files with 54 additions and 34 deletions
|
@ -64,10 +64,15 @@ func ProcessQueryRequest(ctx context.Context, w http.ResponseWriter, r *http.Req
|
||||||
blockResultPool.Put(bb)
|
blockResultPool.Put(bb)
|
||||||
}
|
}
|
||||||
|
|
||||||
vlstorage.RunQuery(ctxWithCancel, tenantIDs, q, writeBlock)
|
err = vlstorage.RunQuery(ctxWithCancel, tenantIDs, q, writeBlock)
|
||||||
|
|
||||||
sw.FinalFlush()
|
sw.FinalFlush()
|
||||||
putSortWriter(sw)
|
putSortWriter(sw)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
httpserver.Errorf(w, r, "cannot execute query: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var blockResultPool bytesutil.ByteBufferPool
|
var blockResultPool bytesutil.ByteBufferPool
|
||||||
|
|
|
@ -101,8 +101,8 @@ func MustAddRows(lr *logstorage.LogRows) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// RunQuery runs the given q and calls writeBlock for the returned data blocks
|
// RunQuery runs the given q and calls writeBlock for the returned data blocks
|
||||||
func RunQuery(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, writeBlock func(workerID uint, timestamps []int64, columns []logstorage.BlockColumn)) {
|
func RunQuery(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, writeBlock func(workerID uint, timestamps []int64, columns []logstorage.BlockColumn)) error {
|
||||||
strg.RunQuery(ctx, tenantIDs, q, writeBlock)
|
return strg.RunQuery(ctx, tenantIDs, q, writeBlock)
|
||||||
}
|
}
|
||||||
|
|
||||||
func initStorageMetrics(strg *logstorage.Storage) *metrics.Set {
|
func initStorageMetrics(strg *logstorage.Storage) *metrics.Set {
|
||||||
|
|
|
@ -24,7 +24,7 @@ type pipe interface {
|
||||||
// If stopCh is closed, the returned pipeProcessor must stop performing CPU-intensive tasks which take more than a few milliseconds.
|
// If stopCh is closed, the returned pipeProcessor must stop performing CPU-intensive tasks which take more than a few milliseconds.
|
||||||
// It is OK to continue processing pipeProcessor calls if they take less than a few milliseconds.
|
// It is OK to continue processing pipeProcessor calls if they take less than a few milliseconds.
|
||||||
//
|
//
|
||||||
// The returned pipeProcessor may call cancel() at any time in order to notify writeBlock callers that it doesn't accept more data.
|
// The returned pipeProcessor may call cancel() at any time in order to notify worker goroutines to stop sending new data to pipeProcessor.
|
||||||
newPipeProcessor(workersCount int, stopCh <-chan struct{}, cancel func(), ppBase pipeProcessor) pipeProcessor
|
newPipeProcessor(workersCount int, stopCh <-chan struct{}, cancel func(), ppBase pipeProcessor) pipeProcessor
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -32,16 +32,24 @@ type pipe interface {
|
||||||
type pipeProcessor interface {
|
type pipeProcessor interface {
|
||||||
// writeBlock must write the given block of data to the given pipeProcessor.
|
// writeBlock must write the given block of data to the given pipeProcessor.
|
||||||
//
|
//
|
||||||
// The workerID is the id of the worker goroutine, which called the writeBlock.
|
// writeBlock is called concurrently from worker goroutines.
|
||||||
|
// The workerID is the id of the worker goroutine, which calls the writeBlock.
|
||||||
// It is in the range 0 ... workersCount-1 .
|
// It is in the range 0 ... workersCount-1 .
|
||||||
//
|
//
|
||||||
// It is forbidden to hold references to columns after returning from writeBlock, since the caller re-uses columns.
|
// It is forbidden to hold references to columns after returning from writeBlock, since the caller re-uses columns.
|
||||||
|
//
|
||||||
|
// If any error occurs at writeBlock, then cancel() must be called by pipeProcessor in order to notify worker goroutines
|
||||||
|
// to stop sending new data. The occurred error must be returned from flush().
|
||||||
|
//
|
||||||
|
// cancel() may be called also when the pipeProcessor decides to stop accepting new data, even if there is no any error.
|
||||||
writeBlock(workerID uint, timestamps []int64, columns []BlockColumn)
|
writeBlock(workerID uint, timestamps []int64, columns []BlockColumn)
|
||||||
|
|
||||||
// flush must flush all the data accumulated in the pipeProcessor to the base pipeProcessor.
|
// flush must flush all the data accumulated in the pipeProcessor to the base pipeProcessor.
|
||||||
//
|
//
|
||||||
// The pipeProcessor must call cancel() and ppBase.flush(), which has been passed to newPipeProcessor, before returning from the flush.
|
// flush is called after all the worker goroutines are stopped.
|
||||||
flush()
|
//
|
||||||
|
// It is guaranteed that flush() is called for every pipeProcessor returned from pipe.newPipeProcessor().
|
||||||
|
flush() error
|
||||||
}
|
}
|
||||||
|
|
||||||
type defaultPipeProcessor func(workerID uint, timestamps []int64, columns []BlockColumn)
|
type defaultPipeProcessor func(workerID uint, timestamps []int64, columns []BlockColumn)
|
||||||
|
@ -54,8 +62,8 @@ func (dpp defaultPipeProcessor) writeBlock(workerID uint, timestamps []int64, co
|
||||||
dpp(workerID, timestamps, columns)
|
dpp(workerID, timestamps, columns)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dpp defaultPipeProcessor) flush() {
|
func (dpp defaultPipeProcessor) flush() error {
|
||||||
// Nothing to do
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parsePipes(lex *lexer) ([]pipe, error) {
|
func parsePipes(lex *lexer) ([]pipe, error) {
|
||||||
|
@ -111,17 +119,15 @@ func (fp *fieldsPipe) String() string {
|
||||||
return "fields " + fieldNamesString(fp.fields)
|
return "fields " + fieldNamesString(fp.fields)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fp *fieldsPipe) newPipeProcessor(_ int, _ <-chan struct{}, cancel func(), ppBase pipeProcessor) pipeProcessor {
|
func (fp *fieldsPipe) newPipeProcessor(_ int, _ <-chan struct{}, _ func(), ppBase pipeProcessor) pipeProcessor {
|
||||||
return &fieldsPipeProcessor{
|
return &fieldsPipeProcessor{
|
||||||
fp: fp,
|
fp: fp,
|
||||||
cancel: cancel,
|
|
||||||
ppBase: ppBase,
|
ppBase: ppBase,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type fieldsPipeProcessor struct {
|
type fieldsPipeProcessor struct {
|
||||||
fp *fieldsPipe
|
fp *fieldsPipe
|
||||||
cancel func()
|
|
||||||
ppBase pipeProcessor
|
ppBase pipeProcessor
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -147,9 +153,8 @@ func (fpp *fieldsPipeProcessor) writeBlock(workerID uint, timestamps []int64, co
|
||||||
putBlockRows(brs)
|
putBlockRows(brs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fpp *fieldsPipeProcessor) flush() {
|
func (fpp *fieldsPipeProcessor) flush() error {
|
||||||
fpp.cancel()
|
return nil
|
||||||
fpp.ppBase.flush()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseFieldsPipe(lex *lexer) (*fieldsPipe, error) {
|
func parseFieldsPipe(lex *lexer) (*fieldsPipe, error) {
|
||||||
|
@ -330,12 +335,7 @@ func (spp *statsPipeProcessor) writeBlock(workerID uint, timestamps []int64, col
|
||||||
shard.keyBuf = keyBuf
|
shard.keyBuf = keyBuf
|
||||||
}
|
}
|
||||||
|
|
||||||
func (spp *statsPipeProcessor) flush() {
|
func (spp *statsPipeProcessor) flush() error {
|
||||||
defer func() {
|
|
||||||
spp.cancel()
|
|
||||||
spp.ppBase.flush()
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Merge states across shards
|
// Merge states across shards
|
||||||
shards := spp.shards
|
shards := spp.shards
|
||||||
m := shards[0].m
|
m := shards[0].m
|
||||||
|
@ -347,7 +347,7 @@ func (spp *statsPipeProcessor) flush() {
|
||||||
// Stop processing data as soon as stopCh is closed without wasting CPU time.
|
// Stop processing data as soon as stopCh is closed without wasting CPU time.
|
||||||
select {
|
select {
|
||||||
case <-spp.stopCh:
|
case <-spp.stopCh:
|
||||||
return
|
return nil
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -377,7 +377,7 @@ func (spp *statsPipeProcessor) flush() {
|
||||||
// Stop processing data as soon as stopCh is closed without wasting CPU time.
|
// Stop processing data as soon as stopCh is closed without wasting CPU time.
|
||||||
select {
|
select {
|
||||||
case <-spp.stopCh:
|
case <-spp.stopCh:
|
||||||
return
|
return nil
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -415,6 +415,8 @@ func (spp *statsPipeProcessor) flush() {
|
||||||
}
|
}
|
||||||
spp.ppBase.writeBlock(0, []int64{0}, columns)
|
spp.ppBase.writeBlock(0, []int64{0}, columns)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sp *statsPipe) neededFields() []string {
|
func (sp *statsPipe) neededFields() []string {
|
||||||
|
@ -820,9 +822,8 @@ func (hpp *headPipeProcessor) writeBlock(workerID uint, timestamps []int64, colu
|
||||||
hpp.cancel()
|
hpp.cancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hpp *headPipeProcessor) flush() {
|
func (hpp *headPipeProcessor) flush() error {
|
||||||
hpp.cancel()
|
return nil
|
||||||
hpp.ppBase.flush()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseHeadPipe(lex *lexer) (*headPipe, error) {
|
func parseHeadPipe(lex *lexer) (*headPipe, error) {
|
||||||
|
@ -848,17 +849,15 @@ func (sp *skipPipe) String() string {
|
||||||
return fmt.Sprintf("skip %d", sp.n)
|
return fmt.Sprintf("skip %d", sp.n)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sp *skipPipe) newPipeProcessor(workersCount int, _ <-chan struct{}, cancel func(), ppBase pipeProcessor) pipeProcessor {
|
func (sp *skipPipe) newPipeProcessor(workersCount int, _ <-chan struct{}, _ func(), ppBase pipeProcessor) pipeProcessor {
|
||||||
return &skipPipeProcessor{
|
return &skipPipeProcessor{
|
||||||
sp: sp,
|
sp: sp,
|
||||||
cancel: cancel,
|
|
||||||
ppBase: ppBase,
|
ppBase: ppBase,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type skipPipeProcessor struct {
|
type skipPipeProcessor struct {
|
||||||
sp *skipPipe
|
sp *skipPipe
|
||||||
cancel func()
|
|
||||||
ppBase pipeProcessor
|
ppBase pipeProcessor
|
||||||
|
|
||||||
rowsSkipped atomic.Uint64
|
rowsSkipped atomic.Uint64
|
||||||
|
@ -887,9 +886,8 @@ func (spp *skipPipeProcessor) writeBlock(workerID uint, timestamps []int64, colu
|
||||||
spp.ppBase.writeBlock(workerID, timestamps, cs)
|
spp.ppBase.writeBlock(workerID, timestamps, cs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (spp *skipPipeProcessor) flush() {
|
func (spp *skipPipeProcessor) flush() error {
|
||||||
spp.cancel()
|
return nil
|
||||||
spp.ppBase.flush()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseSkipPipe(lex *lexer) (*skipPipe, error) {
|
func parseSkipPipe(lex *lexer) (*skipPipe, error) {
|
||||||
|
|
|
@ -45,7 +45,7 @@ type searchOptions struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// RunQuery runs the given q and calls writeBlock for results.
|
// RunQuery runs the given q and calls writeBlock for results.
|
||||||
func (s *Storage) RunQuery(ctx context.Context, tenantIDs []TenantID, q *Query, writeBlock func(workerID uint, timestamps []int64, columns []BlockColumn)) {
|
func (s *Storage) RunQuery(ctx context.Context, tenantIDs []TenantID, q *Query, writeBlock func(workerID uint, timestamps []int64, columns []BlockColumn)) error {
|
||||||
resultColumnNames := q.getResultColumnNames()
|
resultColumnNames := q.getResultColumnNames()
|
||||||
so := &genericSearchOptions{
|
so := &genericSearchOptions{
|
||||||
tenantIDs: tenantIDs,
|
tenantIDs: tenantIDs,
|
||||||
|
@ -56,13 +56,19 @@ func (s *Storage) RunQuery(ctx context.Context, tenantIDs []TenantID, q *Query,
|
||||||
workersCount := cgroup.AvailableCPUs()
|
workersCount := cgroup.AvailableCPUs()
|
||||||
|
|
||||||
pp := newDefaultPipeProcessor(writeBlock)
|
pp := newDefaultPipeProcessor(writeBlock)
|
||||||
|
ppMain := pp
|
||||||
stopCh := ctx.Done()
|
stopCh := ctx.Done()
|
||||||
|
cancels := make([]func(), len(q.pipes))
|
||||||
|
pps := make([]pipeProcessor, len(q.pipes))
|
||||||
for i := len(q.pipes) - 1; i >= 0; i-- {
|
for i := len(q.pipes) - 1; i >= 0; i-- {
|
||||||
p := q.pipes[i]
|
p := q.pipes[i]
|
||||||
ctxChild, cancel := context.WithCancel(ctx)
|
ctxChild, cancel := context.WithCancel(ctx)
|
||||||
stopCh = ctxChild.Done()
|
stopCh = ctxChild.Done()
|
||||||
pp = p.newPipeProcessor(workersCount, stopCh, cancel, pp)
|
pp = p.newPipeProcessor(workersCount, stopCh, cancel, pp)
|
||||||
ctx = ctxChild
|
ctx = ctxChild
|
||||||
|
|
||||||
|
cancels[i] = cancel
|
||||||
|
pps[i] = pp
|
||||||
}
|
}
|
||||||
|
|
||||||
s.search(workersCount, so, stopCh, func(workerID uint, br *blockResult) {
|
s.search(workersCount, so, stopCh, func(workerID uint, br *blockResult) {
|
||||||
|
@ -81,7 +87,18 @@ func (s *Storage) RunQuery(ctx context.Context, tenantIDs []TenantID, q *Query,
|
||||||
putBlockRows(brs)
|
putBlockRows(brs)
|
||||||
})
|
})
|
||||||
|
|
||||||
pp.flush()
|
var errFlush error
|
||||||
|
for i, pp := range pps {
|
||||||
|
if err := pp.flush(); err != nil && errFlush == nil {
|
||||||
|
errFlush = err
|
||||||
|
}
|
||||||
|
cancel := cancels[i]
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
if err := ppMain.flush(); err != nil && errFlush == nil {
|
||||||
|
errFlush = err
|
||||||
|
}
|
||||||
|
return errFlush
|
||||||
}
|
}
|
||||||
|
|
||||||
type blockRows struct {
|
type blockRows struct {
|
||||||
|
|
Loading…
Reference in a new issue