2024-05-12 14:33:29 +00:00
|
|
|
package logstorage
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"sync/atomic"
|
|
|
|
)
|
|
|
|
|
|
|
|
// pipeLimit implements '| limit ...' pipe.
|
|
|
|
//
|
|
|
|
// See https://docs.victoriametrics.com/victorialogs/logsql/#limit-pipe
|
|
|
|
type pipeLimit struct {
|
2024-05-20 02:08:30 +00:00
|
|
|
limit uint64
|
2024-05-12 14:33:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (pl *pipeLimit) String() string {
|
2024-05-20 02:08:30 +00:00
|
|
|
return fmt.Sprintf("limit %d", pl.limit)
|
2024-05-12 14:33:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (pl *pipeLimit) updateNeededFields(_, _ fieldsSet) {
|
2024-05-25 19:36:16 +00:00
|
|
|
// nothing to do
|
2024-05-12 14:33:29 +00:00
|
|
|
}
|
|
|
|
|
2024-05-25 19:36:16 +00:00
|
|
|
func (pl *pipeLimit) optimize() {
|
|
|
|
// nothing to do
|
|
|
|
}
|
|
|
|
|
|
|
|
func (pl *pipeLimit) hasFilterInWithQuery() bool {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2024-05-26 00:01:32 +00:00
|
|
|
func (pl *pipeLimit) initFilterInValues(_ map[string][]string, _ getFieldValuesFunc) (pipe, error) {
|
2024-05-25 19:36:16 +00:00
|
|
|
return pl, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (pl *pipeLimit) newPipeProcessor(_ int, _ <-chan struct{}, cancel func(), ppNext pipeProcessor) pipeProcessor {
|
2024-05-20 02:08:30 +00:00
|
|
|
if pl.limit == 0 {
|
2024-05-12 14:33:29 +00:00
|
|
|
// Special case - notify the caller to stop writing data to the returned pipeLimitProcessor
|
|
|
|
cancel()
|
|
|
|
}
|
|
|
|
return &pipeLimitProcessor{
|
|
|
|
pl: pl,
|
|
|
|
cancel: cancel,
|
2024-05-25 19:36:16 +00:00
|
|
|
ppNext: ppNext,
|
2024-05-12 14:33:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type pipeLimitProcessor struct {
|
|
|
|
pl *pipeLimit
|
|
|
|
cancel func()
|
2024-05-25 19:36:16 +00:00
|
|
|
ppNext pipeProcessor
|
2024-05-12 14:33:29 +00:00
|
|
|
|
|
|
|
rowsProcessed atomic.Uint64
|
|
|
|
}
|
|
|
|
|
|
|
|
func (plp *pipeLimitProcessor) writeBlock(workerID uint, br *blockResult) {
|
|
|
|
if len(br.timestamps) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
rowsProcessed := plp.rowsProcessed.Add(uint64(len(br.timestamps)))
|
2024-06-05 01:18:12 +00:00
|
|
|
limit := plp.pl.limit
|
|
|
|
if rowsProcessed <= limit {
|
2024-05-25 19:36:16 +00:00
|
|
|
// Fast path - write all the rows to ppNext.
|
|
|
|
plp.ppNext.writeBlock(workerID, br)
|
2024-06-05 01:18:12 +00:00
|
|
|
if rowsProcessed == limit {
|
|
|
|
plp.cancel()
|
|
|
|
}
|
2024-05-12 14:33:29 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Slow path - overflow. Write the remaining rows if needed.
|
|
|
|
rowsProcessed -= uint64(len(br.timestamps))
|
2024-06-05 01:18:12 +00:00
|
|
|
if rowsProcessed >= limit {
|
2024-05-12 14:33:29 +00:00
|
|
|
// Nothing to write. There is no need in cancel() call, since it has been called by another goroutine.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write remaining rows.
|
2024-06-05 01:18:12 +00:00
|
|
|
keepRows := limit - rowsProcessed
|
2024-05-12 14:33:29 +00:00
|
|
|
br.truncateRows(int(keepRows))
|
2024-05-25 19:36:16 +00:00
|
|
|
plp.ppNext.writeBlock(workerID, br)
|
2024-05-12 14:33:29 +00:00
|
|
|
|
|
|
|
// Notify the caller that it should stop passing more data to writeBlock().
|
|
|
|
plp.cancel()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (plp *pipeLimitProcessor) flush() error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func parsePipeLimit(lex *lexer) (*pipeLimit, error) {
|
|
|
|
if !lex.isKeyword("limit", "head") {
|
|
|
|
return nil, fmt.Errorf("expecting 'limit' or 'head'; got %q", lex.token)
|
|
|
|
}
|
|
|
|
lex.nextToken()
|
2024-05-28 17:29:41 +00:00
|
|
|
|
|
|
|
limit := uint64(10)
|
|
|
|
if !lex.isKeyword("|", ")", "") {
|
|
|
|
n, err := parseUint(lex.token)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("cannot parse rows limit from %q: %w", lex.token, err)
|
|
|
|
}
|
|
|
|
lex.nextToken()
|
|
|
|
limit = n
|
2024-05-12 14:33:29 +00:00
|
|
|
}
|
2024-05-28 17:29:41 +00:00
|
|
|
|
2024-05-12 14:33:29 +00:00
|
|
|
pl := &pipeLimit{
|
2024-05-28 17:29:41 +00:00
|
|
|
limit: limit,
|
2024-05-12 14:33:29 +00:00
|
|
|
}
|
|
|
|
return pl, nil
|
|
|
|
}
|