mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
lib/logstorage: read timestamps column when it is really needed during query execution
Previously timestamps column was read unconditionally on every query. This could significantly slow down queries, which do not need reading this column like in https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7070 .
This commit is contained in:
parent
f934f71708
commit
4599429f51
44 changed files with 299 additions and 213 deletions
|
@ -16,6 +16,7 @@ according to [these docs](https://docs.victoriametrics.com/victorialogs/quicksta
|
||||||
## tip
|
## tip
|
||||||
|
|
||||||
* FEATURE: [web UI](https://docs.victoriametrics.com/victorialogs/querying/#web-ui): add button for enabling auto refresh, similarly to VictoriaMetrics vmui. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7017).
|
* FEATURE: [web UI](https://docs.victoriametrics.com/victorialogs/querying/#web-ui): add button for enabling auto refresh, similarly to VictoriaMetrics vmui. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7017).
|
||||||
|
* FEATURE: improve performance of analytical queries, which do not need reading the `_time` field. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7070).
|
||||||
|
|
||||||
* BUGFIX: properly return logs without [`_msg`](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field) field when `*` query is passed to [`/select/logsql/query` endpoint](https://docs.victoriametrics.com/victorialogs/querying/#querying-logs) together with positive `limit` arg. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6785). Thanks to @jiekun for itentifying the root cause of the issue.
|
* BUGFIX: properly return logs without [`_msg`](https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field) field when `*` query is passed to [`/select/logsql/query` endpoint](https://docs.victoriametrics.com/victorialogs/querying/#querying-logs) together with positive `limit` arg. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6785). Thanks to @jiekun for itentifying the root cause of the issue.
|
||||||
|
|
||||||
|
|
|
@ -21,14 +21,29 @@ import (
|
||||||
//
|
//
|
||||||
// It is expected that its contents is accessed only from a single goroutine at a time.
|
// It is expected that its contents is accessed only from a single goroutine at a time.
|
||||||
type blockResult struct {
|
type blockResult struct {
|
||||||
|
// rowsLen is the number of rows in the given blockResult.
|
||||||
|
rowsLen int
|
||||||
|
|
||||||
|
// bs is the associated blockSearch for the given blockResult.
|
||||||
|
//
|
||||||
|
// bs is nil for the blockResult constructed by pipes.
|
||||||
|
bs *blockSearch
|
||||||
|
|
||||||
|
// bm is the associated bitmap for the given blockResult.
|
||||||
|
//
|
||||||
|
// bm is nil for the blockResult constructed by pipes.
|
||||||
|
bm *bitmap
|
||||||
|
|
||||||
// a holds all the bytes behind the requested column values in the block.
|
// a holds all the bytes behind the requested column values in the block.
|
||||||
a arena
|
a arena
|
||||||
|
|
||||||
// values holds all the requested column values in the block.
|
// valuesBuf holds all the requested column values in the block.
|
||||||
valuesBuf []string
|
valuesBuf []string
|
||||||
|
|
||||||
// timestamps contain timestamps for the selected log entries in the block.
|
// timestampsBuf contains cached timestamps for the selected log entries in the block.
|
||||||
timestamps []int64
|
//
|
||||||
|
// timestamps must be obtained via blockResult.getTimestamps() call.
|
||||||
|
timestampsBuf []int64
|
||||||
|
|
||||||
// csBuf contains requested columns.
|
// csBuf contains requested columns.
|
||||||
csBuf []blockResultColumn
|
csBuf []blockResultColumn
|
||||||
|
@ -47,12 +62,17 @@ type blockResult struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (br *blockResult) reset() {
|
func (br *blockResult) reset() {
|
||||||
|
br.rowsLen = 0
|
||||||
|
|
||||||
|
br.cs = nil
|
||||||
|
br.bm = nil
|
||||||
|
|
||||||
br.a.reset()
|
br.a.reset()
|
||||||
|
|
||||||
clear(br.valuesBuf)
|
clear(br.valuesBuf)
|
||||||
br.valuesBuf = br.valuesBuf[:0]
|
br.valuesBuf = br.valuesBuf[:0]
|
||||||
|
|
||||||
br.timestamps = br.timestamps[:0]
|
br.timestampsBuf = br.timestampsBuf[:0]
|
||||||
|
|
||||||
clear(br.csBuf)
|
clear(br.csBuf)
|
||||||
br.csBuf = br.csBuf[:0]
|
br.csBuf = br.csBuf[:0]
|
||||||
|
@ -76,6 +96,11 @@ func (br *blockResult) reset() {
|
||||||
func (br *blockResult) clone() *blockResult {
|
func (br *blockResult) clone() *blockResult {
|
||||||
brNew := &blockResult{}
|
brNew := &blockResult{}
|
||||||
|
|
||||||
|
brNew.rowsLen = br.rowsLen
|
||||||
|
|
||||||
|
// do not clone br.cs, since it may be updated at any time.
|
||||||
|
// do not clone br.bm, since it may be updated at any time.
|
||||||
|
|
||||||
cs := br.getColumns()
|
cs := br.getColumns()
|
||||||
|
|
||||||
// Pre-populate values in every column in order to properly calculate the needed backing buffer size below.
|
// Pre-populate values in every column in order to properly calculate the needed backing buffer size below.
|
||||||
|
@ -96,8 +121,10 @@ func (br *blockResult) clone() *blockResult {
|
||||||
}
|
}
|
||||||
brNew.valuesBuf = make([]string, 0, valuesBufLen)
|
brNew.valuesBuf = make([]string, 0, valuesBufLen)
|
||||||
|
|
||||||
brNew.timestamps = make([]int64, len(br.timestamps))
|
srcTimestamps := br.getTimestamps()
|
||||||
copy(brNew.timestamps, br.timestamps)
|
brNew.timestampsBuf = make([]int64, len(srcTimestamps))
|
||||||
|
copy(brNew.timestampsBuf, srcTimestamps)
|
||||||
|
brNew.checkTimestampsLen()
|
||||||
|
|
||||||
csNew := make([]blockResultColumn, len(cs))
|
csNew := make([]blockResultColumn, len(cs))
|
||||||
for i, c := range cs {
|
for i, c := range cs {
|
||||||
|
@ -112,18 +139,19 @@ func (br *blockResult) clone() *blockResult {
|
||||||
return brNew
|
return brNew
|
||||||
}
|
}
|
||||||
|
|
||||||
// initFromFilterAllColumns initializes br from brSrc by copying rows identified by set bets at bm.
|
// initFromFilterAllColumns initializes br from brSrc by copying rows identified by set bits at bm.
|
||||||
//
|
//
|
||||||
// The br is valid until brSrc or bm is updated.
|
// The br is valid until brSrc or bm is updated.
|
||||||
func (br *blockResult) initFromFilterAllColumns(brSrc *blockResult, bm *bitmap) {
|
func (br *blockResult) initFromFilterAllColumns(brSrc *blockResult, bm *bitmap) {
|
||||||
br.reset()
|
br.reset()
|
||||||
|
|
||||||
srcTimestamps := brSrc.timestamps
|
srcTimestamps := brSrc.getTimestamps()
|
||||||
dstTimestamps := br.timestamps[:0]
|
dstTimestamps := br.timestampsBuf[:0]
|
||||||
bm.forEachSetBitReadonly(func(idx int) {
|
bm.forEachSetBitReadonly(func(idx int) {
|
||||||
dstTimestamps = append(dstTimestamps, srcTimestamps[idx])
|
dstTimestamps = append(dstTimestamps, srcTimestamps[idx])
|
||||||
})
|
})
|
||||||
br.timestamps = dstTimestamps
|
br.timestampsBuf = dstTimestamps
|
||||||
|
br.rowsLen = len(br.timestampsBuf)
|
||||||
|
|
||||||
for _, cSrc := range brSrc.getColumns() {
|
for _, cSrc := range brSrc.getColumns() {
|
||||||
br.appendFilteredColumn(brSrc, cSrc, bm)
|
br.appendFilteredColumn(brSrc, cSrc, bm)
|
||||||
|
@ -134,7 +162,7 @@ func (br *blockResult) initFromFilterAllColumns(brSrc *blockResult, bm *bitmap)
|
||||||
//
|
//
|
||||||
// the br is valid until brSrc, cSrc or bm is updated.
|
// the br is valid until brSrc, cSrc or bm is updated.
|
||||||
func (br *blockResult) appendFilteredColumn(brSrc *blockResult, cSrc *blockResultColumn, bm *bitmap) {
|
func (br *blockResult) appendFilteredColumn(brSrc *blockResult, cSrc *blockResultColumn, bm *bitmap) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
cDst := blockResultColumn{
|
cDst := blockResultColumn{
|
||||||
|
@ -211,7 +239,7 @@ func (br *blockResult) sizeBytes() int {
|
||||||
|
|
||||||
n += br.a.sizeBytes()
|
n += br.a.sizeBytes()
|
||||||
n += cap(br.valuesBuf) * int(unsafe.Sizeof(br.valuesBuf[0]))
|
n += cap(br.valuesBuf) * int(unsafe.Sizeof(br.valuesBuf[0]))
|
||||||
n += cap(br.timestamps) * int(unsafe.Sizeof(br.timestamps[0]))
|
n += cap(br.timestampsBuf) * int(unsafe.Sizeof(br.timestampsBuf[0]))
|
||||||
n += cap(br.csBuf) * int(unsafe.Sizeof(br.csBuf[0]))
|
n += cap(br.csBuf) * int(unsafe.Sizeof(br.csBuf[0]))
|
||||||
n += cap(br.cs) * int(unsafe.Sizeof(br.cs[0]))
|
n += cap(br.cs) * int(unsafe.Sizeof(br.cs[0]))
|
||||||
|
|
||||||
|
@ -221,10 +249,10 @@ func (br *blockResult) sizeBytes() int {
|
||||||
// setResultColumns sets the given rcs as br columns.
|
// setResultColumns sets the given rcs as br columns.
|
||||||
//
|
//
|
||||||
// The br is valid only until rcs are modified.
|
// The br is valid only until rcs are modified.
|
||||||
func (br *blockResult) setResultColumns(rcs []resultColumn, rowsCount int) {
|
func (br *blockResult) setResultColumns(rcs []resultColumn, rowsLen int) {
|
||||||
br.reset()
|
br.reset()
|
||||||
|
|
||||||
br.timestamps = fastnum.AppendInt64Zeros(br.timestamps[:0], rowsCount)
|
br.rowsLen = rowsLen
|
||||||
|
|
||||||
for i := range rcs {
|
for i := range rcs {
|
||||||
br.addResultColumn(&rcs[i])
|
br.addResultColumn(&rcs[i])
|
||||||
|
@ -232,8 +260,8 @@ func (br *blockResult) setResultColumns(rcs []resultColumn, rowsCount int) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (br *blockResult) addResultColumn(rc *resultColumn) {
|
func (br *blockResult) addResultColumn(rc *resultColumn) {
|
||||||
if len(rc.values) != len(br.timestamps) {
|
if len(rc.values) != br.rowsLen {
|
||||||
logger.Panicf("BUG: column %q must contain %d rows, but it contains %d rows", rc.name, len(br.timestamps), len(rc.values))
|
logger.Panicf("BUG: column %q must contain %d rows, but it contains %d rows", rc.name, br.rowsLen, len(rc.values))
|
||||||
}
|
}
|
||||||
if areConstValues(rc.values) {
|
if areConstValues(rc.values) {
|
||||||
// This optimization allows reducing memory usage after br cloning
|
// This optimization allows reducing memory usage after br cloning
|
||||||
|
@ -252,11 +280,9 @@ func (br *blockResult) addResultColumn(rc *resultColumn) {
|
||||||
br.csInitialized = false
|
br.csInitialized = false
|
||||||
}
|
}
|
||||||
|
|
||||||
// initAllColumns initializes all the columns in br according to bs and bm.
|
// initAllColumns initializes all the columns in br.
|
||||||
//
|
func (br *blockResult) initAllColumns() {
|
||||||
// The initialized columns are valid until bs and bm are changed.
|
unneededColumnNames := br.bs.bsw.so.unneededColumnNames
|
||||||
func (br *blockResult) initAllColumns(bs *blockSearch, bm *bitmap) {
|
|
||||||
unneededColumnNames := bs.bsw.so.unneededColumnNames
|
|
||||||
|
|
||||||
if !slices.Contains(unneededColumnNames, "_time") {
|
if !slices.Contains(unneededColumnNames, "_time") {
|
||||||
// Add _time column
|
// Add _time column
|
||||||
|
@ -265,12 +291,12 @@ func (br *blockResult) initAllColumns(bs *blockSearch, bm *bitmap) {
|
||||||
|
|
||||||
if !slices.Contains(unneededColumnNames, "_stream_id") {
|
if !slices.Contains(unneededColumnNames, "_stream_id") {
|
||||||
// Add _stream_id column
|
// Add _stream_id column
|
||||||
br.addStreamIDColumn(bs)
|
br.addStreamIDColumn()
|
||||||
}
|
}
|
||||||
|
|
||||||
if !slices.Contains(unneededColumnNames, "_stream") {
|
if !slices.Contains(unneededColumnNames, "_stream") {
|
||||||
// Add _stream column
|
// Add _stream column
|
||||||
if !br.addStreamColumn(bs) {
|
if !br.addStreamColumn() {
|
||||||
// Skip the current block, since the associated stream tags are missing
|
// Skip the current block, since the associated stream tags are missing
|
||||||
br.reset()
|
br.reset()
|
||||||
return
|
return
|
||||||
|
@ -279,18 +305,18 @@ func (br *blockResult) initAllColumns(bs *blockSearch, bm *bitmap) {
|
||||||
|
|
||||||
if !slices.Contains(unneededColumnNames, "_msg") {
|
if !slices.Contains(unneededColumnNames, "_msg") {
|
||||||
// Add _msg column
|
// Add _msg column
|
||||||
v := bs.csh.getConstColumnValue("_msg")
|
v := br.bs.csh.getConstColumnValue("_msg")
|
||||||
if v != "" {
|
if v != "" {
|
||||||
br.addConstColumn("_msg", v)
|
br.addConstColumn("_msg", v)
|
||||||
} else if ch := bs.csh.getColumnHeader("_msg"); ch != nil {
|
} else if ch := br.bs.csh.getColumnHeader("_msg"); ch != nil {
|
||||||
br.addColumn(bs, bm, ch)
|
br.addColumn(ch)
|
||||||
} else {
|
} else {
|
||||||
br.addConstColumn("_msg", "")
|
br.addConstColumn("_msg", "")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add other const columns
|
// Add other const columns
|
||||||
for _, cc := range bs.csh.constColumns {
|
for _, cc := range br.bs.csh.constColumns {
|
||||||
if isMsgFieldName(cc.Name) {
|
if isMsgFieldName(cc.Name) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -300,30 +326,28 @@ func (br *blockResult) initAllColumns(bs *blockSearch, bm *bitmap) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add other non-const columns
|
// Add other non-const columns
|
||||||
chs := bs.csh.columnHeaders
|
chs := br.bs.csh.columnHeaders
|
||||||
for i := range chs {
|
for i := range chs {
|
||||||
ch := &chs[i]
|
ch := &chs[i]
|
||||||
if isMsgFieldName(ch.name) {
|
if isMsgFieldName(ch.name) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !slices.Contains(unneededColumnNames, ch.name) {
|
if !slices.Contains(unneededColumnNames, ch.name) {
|
||||||
br.addColumn(bs, bm, ch)
|
br.addColumn(ch)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
br.csInitFast()
|
br.csInitFast()
|
||||||
}
|
}
|
||||||
|
|
||||||
// initRequestedColumns initialized only requested columns in br according to bs and bm.
|
// initRequestedColumns initialized only requested columns in br.
|
||||||
//
|
func (br *blockResult) initRequestedColumns() {
|
||||||
// The initialized columns are valid until bs and bm are changed.
|
for _, columnName := range br.bs.bsw.so.neededColumnNames {
|
||||||
func (br *blockResult) initRequestedColumns(bs *blockSearch, bm *bitmap) {
|
|
||||||
for _, columnName := range bs.bsw.so.neededColumnNames {
|
|
||||||
switch columnName {
|
switch columnName {
|
||||||
case "_stream_id":
|
case "_stream_id":
|
||||||
br.addStreamIDColumn(bs)
|
br.addStreamIDColumn()
|
||||||
case "_stream":
|
case "_stream":
|
||||||
if !br.addStreamColumn(bs) {
|
if !br.addStreamColumn() {
|
||||||
// Skip the current block, since the associated stream tags are missing.
|
// Skip the current block, since the associated stream tags are missing.
|
||||||
br.reset()
|
br.reset()
|
||||||
return
|
return
|
||||||
|
@ -331,11 +355,11 @@ func (br *blockResult) initRequestedColumns(bs *blockSearch, bm *bitmap) {
|
||||||
case "_time":
|
case "_time":
|
||||||
br.addTimeColumn()
|
br.addTimeColumn()
|
||||||
default:
|
default:
|
||||||
v := bs.csh.getConstColumnValue(columnName)
|
v := br.bs.csh.getConstColumnValue(columnName)
|
||||||
if v != "" {
|
if v != "" {
|
||||||
br.addConstColumn(columnName, v)
|
br.addConstColumn(columnName, v)
|
||||||
} else if ch := bs.csh.getColumnHeader(columnName); ch != nil {
|
} else if ch := br.bs.csh.getColumnHeader(columnName); ch != nil {
|
||||||
br.addColumn(bs, bm, ch)
|
br.addColumn(ch)
|
||||||
} else {
|
} else {
|
||||||
br.addConstColumn(columnName, "")
|
br.addConstColumn(columnName, "")
|
||||||
}
|
}
|
||||||
|
@ -345,38 +369,92 @@ func (br *blockResult) initRequestedColumns(bs *blockSearch, bm *bitmap) {
|
||||||
br.csInitFast()
|
br.csInitFast()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mustInit initializes br with the given bs and bm.
|
||||||
|
//
|
||||||
|
// br is valid until bs or bm changes.
|
||||||
func (br *blockResult) mustInit(bs *blockSearch, bm *bitmap) {
|
func (br *blockResult) mustInit(bs *blockSearch, bm *bitmap) {
|
||||||
br.reset()
|
br.reset()
|
||||||
|
|
||||||
if bm.isZero() {
|
br.rowsLen = bm.onesCount()
|
||||||
// Nothing to initialize for zero matching log entries in the block.
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize timestamps, since they are required for all the further work with br.
|
br.bs = bs
|
||||||
so := bs.bsw.so
|
br.bm = bm
|
||||||
if !so.needAllColumns && !slices.Contains(so.neededColumnNames, "_time") || so.needAllColumns && slices.Contains(so.unneededColumnNames, "_time") {
|
}
|
||||||
// The fastest path - _time column wasn't requested, so it is enough to initialize br.timestamps with zeroes.
|
|
||||||
rowsLen := bm.onesCount()
|
func (br *blockResult) getMinTimestamp() int64 {
|
||||||
br.timestamps = fastnum.AppendInt64Zeros(br.timestamps[:0], rowsLen)
|
if br.bm != nil && br.bm.bitsLen == br.rowsLen {
|
||||||
|
return br.bs.bsw.bh.timestampsHeader.minTimestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
timestamps := br.getTimestamps()
|
||||||
|
if len(timestamps) == 0 {
|
||||||
|
return -1 << 63
|
||||||
|
}
|
||||||
|
minTimestamp := timestamps[0]
|
||||||
|
for i := 1; i < len(timestamps); i++ {
|
||||||
|
if timestamps[i] < minTimestamp {
|
||||||
|
minTimestamp = timestamps[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return minTimestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (br *blockResult) getMaxTimestamp() int64 {
|
||||||
|
if br.bm != nil && br.bm.bitsLen == br.rowsLen {
|
||||||
|
return br.bs.bsw.bh.timestampsHeader.maxTimestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
timestamps := br.getTimestamps()
|
||||||
|
if len(timestamps) == 0 {
|
||||||
|
return (1 << 63) - 1
|
||||||
|
}
|
||||||
|
maxTimestamp := timestamps[len(timestamps)-1]
|
||||||
|
for i := len(timestamps) - 2; i >= 0; i-- {
|
||||||
|
if timestamps[i] > maxTimestamp {
|
||||||
|
maxTimestamp = timestamps[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return maxTimestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (br *blockResult) getTimestamps() []int64 {
|
||||||
|
if br.rowsLen > 0 && len(br.timestampsBuf) == 0 {
|
||||||
|
br.initTimestamps()
|
||||||
|
}
|
||||||
|
return br.timestampsBuf
|
||||||
|
}
|
||||||
|
|
||||||
|
func (br *blockResult) initTimestamps() {
|
||||||
|
if br.bs == nil {
|
||||||
|
br.timestampsBuf = fastnum.AppendInt64Zeros(br.timestampsBuf[:0], br.rowsLen)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Slow path - the _time column is requested, so we need to initialize br.timestamps with real timestamps.
|
srcTimestamps := br.bs.getTimestamps()
|
||||||
srcTimestamps := bs.getTimestamps()
|
if br.bm.areAllBitsSet() {
|
||||||
if bm.areAllBitsSet() {
|
|
||||||
// Fast path - all the rows in the block are selected, so copy all the timestamps without any filtering.
|
// Fast path - all the rows in the block are selected, so copy all the timestamps without any filtering.
|
||||||
br.timestamps = append(br.timestamps[:0], srcTimestamps...)
|
br.timestampsBuf = append(br.timestampsBuf[:0], srcTimestamps...)
|
||||||
|
br.checkTimestampsLen()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Slow path - copy only the needed timestamps to br according to filter results.
|
// Slow path - copy only the needed timestamps to br according to filter results.
|
||||||
dstTimestamps := br.timestamps[:0]
|
dstTimestamps := br.timestampsBuf[:0]
|
||||||
bm.forEachSetBitReadonly(func(idx int) {
|
br.bm.forEachSetBitReadonly(func(idx int) {
|
||||||
ts := srcTimestamps[idx]
|
ts := srcTimestamps[idx]
|
||||||
dstTimestamps = append(dstTimestamps, ts)
|
dstTimestamps = append(dstTimestamps, ts)
|
||||||
})
|
})
|
||||||
br.timestamps = dstTimestamps
|
br.timestampsBuf = dstTimestamps
|
||||||
|
br.checkTimestampsLen()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (br *blockResult) checkTimestampsLen() {
|
||||||
|
if len(br.timestampsBuf) != br.rowsLen {
|
||||||
|
logger.Panicf("BUG: unexpected number of timestamps; got %d; want %d", len(br.timestampsBuf), br.rowsLen)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (br *blockResult) newValuesEncodedFromColumnHeader(bs *blockSearch, bm *bitmap, ch *columnHeader) []string {
|
func (br *blockResult) newValuesEncodedFromColumnHeader(bs *blockSearch, bm *bitmap, ch *columnHeader) []string {
|
||||||
|
@ -454,8 +532,8 @@ func (br *blockResult) newValuesEncodedFromColumnHeader(bs *blockSearch, bm *bit
|
||||||
|
|
||||||
// addColumn adds column for the given ch to br.
|
// addColumn adds column for the given ch to br.
|
||||||
//
|
//
|
||||||
// The added column is valid until bs, bm or ch is changed.
|
// The added column is valid until ch is changed.
|
||||||
func (br *blockResult) addColumn(bs *blockSearch, bm *bitmap, ch *columnHeader) {
|
func (br *blockResult) addColumn(ch *columnHeader) {
|
||||||
br.csBuf = append(br.csBuf, blockResultColumn{
|
br.csBuf = append(br.csBuf, blockResultColumn{
|
||||||
name: getCanonicalColumnName(ch.name),
|
name: getCanonicalColumnName(ch.name),
|
||||||
valueType: ch.valueType,
|
valueType: ch.valueType,
|
||||||
|
@ -466,8 +544,8 @@ func (br *blockResult) addColumn(bs *blockSearch, bm *bitmap, ch *columnHeader)
|
||||||
c := &br.csBuf[len(br.csBuf)-1]
|
c := &br.csBuf[len(br.csBuf)-1]
|
||||||
|
|
||||||
br.svecs = append(br.svecs, searchValuesEncodedCreator{
|
br.svecs = append(br.svecs, searchValuesEncodedCreator{
|
||||||
bs: bs,
|
bs: br.bs,
|
||||||
bm: bm,
|
bm: br.bm,
|
||||||
ch: ch,
|
ch: ch,
|
||||||
})
|
})
|
||||||
c.valuesEncodedCreator = &br.svecs[len(br.svecs)-1]
|
c.valuesEncodedCreator = &br.svecs[len(br.svecs)-1]
|
||||||
|
@ -492,15 +570,15 @@ func (br *blockResult) addTimeColumn() {
|
||||||
br.csInitialized = false
|
br.csInitialized = false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (br *blockResult) addStreamIDColumn(bs *blockSearch) {
|
func (br *blockResult) addStreamIDColumn() {
|
||||||
bb := bbPool.Get()
|
bb := bbPool.Get()
|
||||||
bb.B = bs.bsw.bh.streamID.marshalString(bb.B)
|
bb.B = br.bs.bsw.bh.streamID.marshalString(bb.B)
|
||||||
br.addConstColumn("_stream_id", bytesutil.ToUnsafeString(bb.B))
|
br.addConstColumn("_stream_id", bytesutil.ToUnsafeString(bb.B))
|
||||||
bbPool.Put(bb)
|
bbPool.Put(bb)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (br *blockResult) addStreamColumn(bs *blockSearch) bool {
|
func (br *blockResult) addStreamColumn() bool {
|
||||||
streamStr := bs.getStreamStr()
|
streamStr := br.bs.getStreamStr()
|
||||||
if streamStr == "" {
|
if streamStr == "" {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -562,16 +640,16 @@ func (br *blockResult) newValuesBucketedForColumn(c *blockResultColumn, bf *bySt
|
||||||
func (br *blockResult) getBucketedConstValues(v string, bf *byStatsField) []string {
|
func (br *blockResult) getBucketedConstValues(v string, bf *byStatsField) []string {
|
||||||
if v == "" {
|
if v == "" {
|
||||||
// Fast path - return a slice of empty strings without constructing the slice.
|
// Fast path - return a slice of empty strings without constructing the slice.
|
||||||
return getEmptyStrings(len(br.timestamps))
|
return getEmptyStrings(br.rowsLen)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Slower path - construct slice of identical values with the len(br.timestamps)
|
// Slower path - construct slice of identical values with the length equal to br.rowsLen
|
||||||
|
|
||||||
valuesBuf := br.valuesBuf
|
valuesBuf := br.valuesBuf
|
||||||
valuesBufLen := len(valuesBuf)
|
valuesBufLen := len(valuesBuf)
|
||||||
|
|
||||||
v = br.getBucketedValue(v, bf)
|
v = br.getBucketedValue(v, bf)
|
||||||
for range br.timestamps {
|
for i := 0; i < br.rowsLen; i++ {
|
||||||
valuesBuf = append(valuesBuf, v)
|
valuesBuf = append(valuesBuf, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -585,7 +663,7 @@ func (br *blockResult) getBucketedTimestampValues(bf *byStatsField) []string {
|
||||||
valuesBuf := br.valuesBuf
|
valuesBuf := br.valuesBuf
|
||||||
valuesBufLen := len(valuesBuf)
|
valuesBufLen := len(valuesBuf)
|
||||||
|
|
||||||
timestamps := br.timestamps
|
timestamps := br.getTimestamps()
|
||||||
var s string
|
var s string
|
||||||
|
|
||||||
if !bf.hasBucketConfig() {
|
if !bf.hasBucketConfig() {
|
||||||
|
@ -1401,7 +1479,11 @@ func getBlockResultColumnIdxByName(cs []*blockResultColumn, name string) int {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (br *blockResult) skipRows(skipRows int) {
|
func (br *blockResult) skipRows(skipRows int) {
|
||||||
br.timestamps = append(br.timestamps[:0], br.timestamps[skipRows:]...)
|
timestamps := br.getTimestamps()
|
||||||
|
br.timestampsBuf = append(br.timestampsBuf[:0], timestamps[skipRows:]...)
|
||||||
|
br.rowsLen -= skipRows
|
||||||
|
br.checkTimestampsLen()
|
||||||
|
|
||||||
for _, c := range br.getColumns() {
|
for _, c := range br.getColumns() {
|
||||||
if c.values != nil {
|
if c.values != nil {
|
||||||
c.values = append(c.values[:0], c.values[skipRows:]...)
|
c.values = append(c.values[:0], c.values[skipRows:]...)
|
||||||
|
@ -1421,7 +1503,11 @@ func (br *blockResult) skipRows(skipRows int) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (br *blockResult) truncateRows(keepRows int) {
|
func (br *blockResult) truncateRows(keepRows int) {
|
||||||
br.timestamps = br.timestamps[:keepRows]
|
timestamps := br.getTimestamps()
|
||||||
|
br.timestampsBuf = append(br.timestampsBuf[:0], timestamps[:keepRows]...)
|
||||||
|
br.rowsLen = keepRows
|
||||||
|
br.checkTimestampsLen()
|
||||||
|
|
||||||
for _, c := range br.getColumns() {
|
for _, c := range br.getColumns() {
|
||||||
if c.values != nil {
|
if c.values != nil {
|
||||||
c.values = c.values[:keepRows]
|
c.values = c.values[:keepRows]
|
||||||
|
@ -1676,10 +1762,10 @@ func (c *blockResultColumn) getFloatValueAtRow(br *blockResult, rowIdx int) (flo
|
||||||
func (c *blockResultColumn) sumLenValues(br *blockResult) uint64 {
|
func (c *blockResultColumn) sumLenValues(br *blockResult) uint64 {
|
||||||
if c.isConst {
|
if c.isConst {
|
||||||
v := c.valuesEncoded[0]
|
v := c.valuesEncoded[0]
|
||||||
return uint64(len(v)) * uint64(len(br.timestamps))
|
return uint64(len(v)) * uint64(br.rowsLen)
|
||||||
}
|
}
|
||||||
if c.isTime {
|
if c.isTime {
|
||||||
return uint64(len(time.RFC3339Nano)) * uint64(len(br.timestamps))
|
return uint64(len(time.RFC3339Nano)) * uint64(br.rowsLen)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch c.valueType {
|
switch c.valueType {
|
||||||
|
@ -1707,7 +1793,7 @@ func (c *blockResultColumn) sumLenValues(br *blockResult) uint64 {
|
||||||
case valueTypeIPv4:
|
case valueTypeIPv4:
|
||||||
return c.sumLenStringValues(br)
|
return c.sumLenStringValues(br)
|
||||||
case valueTypeTimestampISO8601:
|
case valueTypeTimestampISO8601:
|
||||||
return uint64(len(iso8601Timestamp)) * uint64(len(br.timestamps))
|
return uint64(len(iso8601Timestamp)) * uint64(br.rowsLen)
|
||||||
default:
|
default:
|
||||||
logger.Panicf("BUG: unknown valueType=%d", c.valueType)
|
logger.Panicf("BUG: unknown valueType=%d", c.valueType)
|
||||||
return 0
|
return 0
|
||||||
|
@ -1729,7 +1815,7 @@ func (c *blockResultColumn) sumValues(br *blockResult) (float64, int) {
|
||||||
if !ok {
|
if !ok {
|
||||||
return 0, 0
|
return 0, 0
|
||||||
}
|
}
|
||||||
return f * float64(len(br.timestamps)), len(br.timestamps)
|
return f * float64(br.rowsLen), br.rowsLen
|
||||||
}
|
}
|
||||||
if c.isTime {
|
if c.isTime {
|
||||||
return 0, 0
|
return 0, 0
|
||||||
|
@ -1780,25 +1866,25 @@ func (c *blockResultColumn) sumValues(br *blockResult) (float64, int) {
|
||||||
for _, v := range c.getValuesEncoded(br) {
|
for _, v := range c.getValuesEncoded(br) {
|
||||||
sum += uint64(unmarshalUint8(v))
|
sum += uint64(unmarshalUint8(v))
|
||||||
}
|
}
|
||||||
return float64(sum), len(br.timestamps)
|
return float64(sum), br.rowsLen
|
||||||
case valueTypeUint16:
|
case valueTypeUint16:
|
||||||
sum := uint64(0)
|
sum := uint64(0)
|
||||||
for _, v := range c.getValuesEncoded(br) {
|
for _, v := range c.getValuesEncoded(br) {
|
||||||
sum += uint64(unmarshalUint16(v))
|
sum += uint64(unmarshalUint16(v))
|
||||||
}
|
}
|
||||||
return float64(sum), len(br.timestamps)
|
return float64(sum), br.rowsLen
|
||||||
case valueTypeUint32:
|
case valueTypeUint32:
|
||||||
sum := uint64(0)
|
sum := uint64(0)
|
||||||
for _, v := range c.getValuesEncoded(br) {
|
for _, v := range c.getValuesEncoded(br) {
|
||||||
sum += uint64(unmarshalUint32(v))
|
sum += uint64(unmarshalUint32(v))
|
||||||
}
|
}
|
||||||
return float64(sum), len(br.timestamps)
|
return float64(sum), br.rowsLen
|
||||||
case valueTypeUint64:
|
case valueTypeUint64:
|
||||||
sum := float64(0)
|
sum := float64(0)
|
||||||
for _, v := range c.getValuesEncoded(br) {
|
for _, v := range c.getValuesEncoded(br) {
|
||||||
sum += float64(unmarshalUint64(v))
|
sum += float64(unmarshalUint64(v))
|
||||||
}
|
}
|
||||||
return sum, len(br.timestamps)
|
return sum, br.rowsLen
|
||||||
case valueTypeFloat64:
|
case valueTypeFloat64:
|
||||||
sum := float64(0)
|
sum := float64(0)
|
||||||
for _, v := range c.getValuesEncoded(br) {
|
for _, v := range c.getValuesEncoded(br) {
|
||||||
|
@ -1807,7 +1893,7 @@ func (c *blockResultColumn) sumValues(br *blockResult) (float64, int) {
|
||||||
sum += f
|
sum += f
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return sum, len(br.timestamps)
|
return sum, br.rowsLen
|
||||||
case valueTypeIPv4:
|
case valueTypeIPv4:
|
||||||
return 0, 0
|
return 0, 0
|
||||||
case valueTypeTimestampISO8601:
|
case valueTypeTimestampISO8601:
|
||||||
|
@ -1864,15 +1950,15 @@ func truncateTimestampToYear(timestamp int64) int64 {
|
||||||
return time.Date(t.Year(), time.January, 1, 0, 0, 0, 0, time.UTC).UnixNano()
|
return time.Date(t.Year(), time.January, 1, 0, 0, 0, 0, time.UTC).UnixNano()
|
||||||
}
|
}
|
||||||
|
|
||||||
func getEmptyStrings(rowsCount int) []string {
|
func getEmptyStrings(rowsLen int) []string {
|
||||||
p := emptyStrings.Load()
|
p := emptyStrings.Load()
|
||||||
if p == nil {
|
if p == nil {
|
||||||
values := make([]string, rowsCount)
|
values := make([]string, rowsLen)
|
||||||
emptyStrings.Store(&values)
|
emptyStrings.Store(&values)
|
||||||
return values
|
return values
|
||||||
}
|
}
|
||||||
values := *p
|
values := *p
|
||||||
return slicesutil.SetLength(values, rowsCount)
|
return slicesutil.SetLength(values, rowsLen)
|
||||||
}
|
}
|
||||||
|
|
||||||
var emptyStrings atomic.Pointer[[]string]
|
var emptyStrings atomic.Pointer[[]string]
|
||||||
|
|
|
@ -177,9 +177,9 @@ func (bs *blockSearch) search(bsw *blockSearchWork, bm *bitmap) {
|
||||||
|
|
||||||
// fetch the requested columns to bs.br.
|
// fetch the requested columns to bs.br.
|
||||||
if bs.bsw.so.needAllColumns {
|
if bs.bsw.so.needAllColumns {
|
||||||
bs.br.initAllColumns(bs, bm)
|
bs.br.initAllColumns()
|
||||||
} else {
|
} else {
|
||||||
bs.br.initRequestedColumns(bs, bm)
|
bs.br.initRequestedColumns()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -47,7 +47,7 @@ func (fr *filterDayRange) applyToBlockResult(br *blockResult, bm *bitmap) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if c.isTime {
|
if c.isTime {
|
||||||
timestamps := br.timestamps
|
timestamps := br.getTimestamps()
|
||||||
bm.forEachSetBit(func(idx int) bool {
|
bm.forEachSetBit(func(idx int) bool {
|
||||||
timestamp := timestamps[idx]
|
timestamp := timestamps[idx]
|
||||||
return fr.matchTimestampValue(timestamp)
|
return fr.matchTimestampValue(timestamp)
|
||||||
|
|
|
@ -44,9 +44,10 @@ func (fr *filterRange) applyToBlockResult(br *blockResult, bm *bitmap) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if c.isTime {
|
if c.isTime {
|
||||||
|
timestamps := br.getTimestamps()
|
||||||
minValueInt, maxValueInt := toInt64Range(minValue, maxValue)
|
minValueInt, maxValueInt := toInt64Range(minValue, maxValue)
|
||||||
bm.forEachSetBit(func(idx int) bool {
|
bm.forEachSetBit(func(idx int) bool {
|
||||||
timestamp := br.timestamps[idx]
|
timestamp := timestamps[idx]
|
||||||
return timestamp >= minValueInt && timestamp <= maxValueInt
|
return timestamp >= minValueInt && timestamp <= maxValueInt
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
|
|
|
@ -213,11 +213,12 @@ func testFilterMatchForStorage(t *testing.T, s *Storage, tenantID TenantID, f fi
|
||||||
t.Fatalf("unexpected number of columns in blockResult; got %d; want 2", len(cs))
|
t.Fatalf("unexpected number of columns in blockResult; got %d; want 2", len(cs))
|
||||||
}
|
}
|
||||||
values := cs[0].getValues(br)
|
values := cs[0].getValues(br)
|
||||||
|
timestamps := br.getTimestamps()
|
||||||
resultsMu.Lock()
|
resultsMu.Lock()
|
||||||
for i, v := range values {
|
for i, v := range values {
|
||||||
results = append(results, result{
|
results = append(results, result{
|
||||||
value: strings.Clone(v),
|
value: strings.Clone(v),
|
||||||
timestamp: br.timestamps[i],
|
timestamp: timestamps[i],
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
resultsMu.Unlock()
|
resultsMu.Unlock()
|
||||||
|
|
|
@ -41,7 +41,7 @@ func (ft *filterTime) applyToBlockResult(br *blockResult, bm *bitmap) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if c.isTime {
|
if c.isTime {
|
||||||
timestamps := br.timestamps
|
timestamps := br.getTimestamps()
|
||||||
bm.forEachSetBit(func(idx int) bool {
|
bm.forEachSetBit(func(idx int) bool {
|
||||||
timestamp := timestamps[idx]
|
timestamp := timestamps[idx]
|
||||||
return ft.matchTimestampValue(timestamp)
|
return ft.matchTimestampValue(timestamp)
|
||||||
|
|
|
@ -49,7 +49,7 @@ func (fr *filterWeekRange) applyToBlockResult(br *blockResult, bm *bitmap) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if c.isTime {
|
if c.isTime {
|
||||||
timestamps := br.timestamps
|
timestamps := br.getTimestamps()
|
||||||
bm.forEachSetBit(func(idx int) bool {
|
bm.forEachSetBit(func(idx int) bool {
|
||||||
timestamp := timestamps[idx]
|
timestamp := timestamps[idx]
|
||||||
return fr.matchTimestampValue(timestamp)
|
return fr.matchTimestampValue(timestamp)
|
||||||
|
|
|
@ -79,7 +79,7 @@ type pipeCopyProcessor struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pcp *pipeCopyProcessor) writeBlock(workerID uint, br *blockResult) {
|
func (pcp *pipeCopyProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -59,7 +59,7 @@ type pipeDeleteProcessor struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pdp *pipeDeleteProcessor) writeBlock(workerID uint, br *blockResult) {
|
func (pdp *pipeDeleteProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -66,7 +66,7 @@ type pipeDropEmptyFieldsProcessorShardNopad struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pdp *pipeDropEmptyFieldsProcessor) writeBlock(workerID uint, br *blockResult) {
|
func (pdp *pipeDropEmptyFieldsProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -90,7 +90,7 @@ func (pdp *pipeDropEmptyFieldsProcessor) writeBlock(workerID uint, br *blockResu
|
||||||
shard.wctx.init(workerID, pdp.ppNext)
|
shard.wctx.init(workerID, pdp.ppNext)
|
||||||
|
|
||||||
fields := shard.fields
|
fields := shard.fields
|
||||||
for rowIdx := range br.timestamps {
|
for rowIdx := 0; rowIdx < br.rowsLen; rowIdx++ {
|
||||||
fields = fields[:0]
|
fields = fields[:0]
|
||||||
for i, values := range columnValues {
|
for i, values := range columnValues {
|
||||||
v := values[rowIdx]
|
v := values[rowIdx]
|
||||||
|
|
|
@ -151,7 +151,7 @@ type pipeExtractProcessorShardNopad struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pep *pipeExtractProcessor) writeBlock(workerID uint, br *blockResult) {
|
func (pep *pipeExtractProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -159,7 +159,7 @@ func (pep *pipeExtractProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
shard := &pep.shards[workerID]
|
shard := &pep.shards[workerID]
|
||||||
|
|
||||||
bm := &shard.bm
|
bm := &shard.bm
|
||||||
bm.init(len(br.timestamps))
|
bm.init(br.rowsLen)
|
||||||
bm.setBits()
|
bm.setBits()
|
||||||
if iff := pe.iff; iff != nil {
|
if iff := pe.iff; iff != nil {
|
||||||
iff.f.applyToBlockResult(br, bm)
|
iff.f.applyToBlockResult(br, bm)
|
||||||
|
|
|
@ -175,7 +175,7 @@ type pipeExtractRegexpProcessorShardNopad struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pep *pipeExtractRegexpProcessor) writeBlock(workerID uint, br *blockResult) {
|
func (pep *pipeExtractRegexpProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -183,7 +183,7 @@ func (pep *pipeExtractRegexpProcessor) writeBlock(workerID uint, br *blockResult
|
||||||
shard := &pep.shards[workerID]
|
shard := &pep.shards[workerID]
|
||||||
|
|
||||||
bm := &shard.bm
|
bm := &shard.bm
|
||||||
bm.init(len(br.timestamps))
|
bm.init(br.rowsLen)
|
||||||
bm.setBits()
|
bm.setBits()
|
||||||
if iff := pe.iff; iff != nil {
|
if iff := pe.iff; iff != nil {
|
||||||
iff.f.applyToBlockResult(br, bm)
|
iff.f.applyToBlockResult(br, bm)
|
||||||
|
|
|
@ -94,7 +94,7 @@ func (shard *pipeFieldNamesProcessorShard) getM() map[string]*uint64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pfp *pipeFieldNamesProcessor) writeBlock(workerID uint, br *blockResult) {
|
func (pfp *pipeFieldNamesProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -113,7 +113,7 @@ func (pfp *pipeFieldNamesProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
|
|
||||||
// Assume that the column is set for all the rows in the block.
|
// Assume that the column is set for all the rows in the block.
|
||||||
// This is much faster than reading all the column values and counting non-empty rows.
|
// This is much faster than reading all the column values and counting non-empty rows.
|
||||||
*pHits += uint64(len(br.timestamps))
|
*pHits += uint64(br.rowsLen)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -79,7 +79,7 @@ type pipeFieldsProcessor struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pfp *pipeFieldsProcessor) writeBlock(workerID uint, br *blockResult) {
|
func (pfp *pipeFieldsProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -83,14 +83,14 @@ type pipeFilterProcessorShardNopad struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pfp *pipeFilterProcessor) writeBlock(workerID uint, br *blockResult) {
|
func (pfp *pipeFilterProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
shard := &pfp.shards[workerID]
|
shard := &pfp.shards[workerID]
|
||||||
|
|
||||||
bm := &shard.bm
|
bm := &shard.bm
|
||||||
bm.init(len(br.timestamps))
|
bm.init(br.rowsLen)
|
||||||
bm.setBits()
|
bm.setBits()
|
||||||
pfp.pf.f.applyToBlockResult(br, bm)
|
pfp.pf.f.applyToBlockResult(br, bm)
|
||||||
if bm.areAllBitsSet() {
|
if bm.areAllBitsSet() {
|
||||||
|
|
|
@ -136,7 +136,7 @@ type pipeFormatProcessorShardNopad struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pfp *pipeFormatProcessor) writeBlock(workerID uint, br *blockResult) {
|
func (pfp *pipeFormatProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -144,7 +144,7 @@ func (pfp *pipeFormatProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
pf := pfp.pf
|
pf := pfp.pf
|
||||||
|
|
||||||
bm := &shard.bm
|
bm := &shard.bm
|
||||||
bm.init(len(br.timestamps))
|
bm.init(br.rowsLen)
|
||||||
bm.setBits()
|
bm.setBits()
|
||||||
if iff := pf.iff; iff != nil {
|
if iff := pf.iff; iff != nil {
|
||||||
iff.f.applyToBlockResult(br, bm)
|
iff.f.applyToBlockResult(br, bm)
|
||||||
|
@ -157,7 +157,7 @@ func (pfp *pipeFormatProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
shard.rc.name = pf.resultField
|
shard.rc.name = pf.resultField
|
||||||
|
|
||||||
resultColumn := br.getColumnByName(pf.resultField)
|
resultColumn := br.getColumnByName(pf.resultField)
|
||||||
for rowIdx := range br.timestamps {
|
for rowIdx := 0; rowIdx < br.rowsLen; rowIdx++ {
|
||||||
v := ""
|
v := ""
|
||||||
if bm.isSetBit(rowIdx) {
|
if bm.isSetBit(rowIdx) {
|
||||||
v = shard.formatRow(pf, br, rowIdx)
|
v = shard.formatRow(pf, br, rowIdx)
|
||||||
|
|
|
@ -57,11 +57,11 @@ type pipeLimitProcessor struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plp *pipeLimitProcessor) writeBlock(workerID uint, br *blockResult) {
|
func (plp *pipeLimitProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
rowsProcessed := plp.rowsProcessed.Add(uint64(len(br.timestamps)))
|
rowsProcessed := plp.rowsProcessed.Add(uint64(br.rowsLen))
|
||||||
limit := plp.pl.limit
|
limit := plp.pl.limit
|
||||||
if rowsProcessed <= limit {
|
if rowsProcessed <= limit {
|
||||||
// Fast path - write all the rows to ppNext.
|
// Fast path - write all the rows to ppNext.
|
||||||
|
@ -73,7 +73,7 @@ func (plp *pipeLimitProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Slow path - overflow. Write the remaining rows if needed.
|
// Slow path - overflow. Write the remaining rows if needed.
|
||||||
rowsProcessed -= uint64(len(br.timestamps))
|
rowsProcessed -= uint64(br.rowsLen)
|
||||||
if rowsProcessed >= limit {
|
if rowsProcessed >= limit {
|
||||||
// Nothing to write. There is no need in cancel() call, since it has been called by another goroutine.
|
// Nothing to write. There is no need in cancel() call, since it has been called by another goroutine.
|
||||||
return
|
return
|
||||||
|
|
|
@ -293,12 +293,12 @@ func (shard *pipeMathProcessorShard) executeExpr(me *mathExpr, br *blockResult)
|
||||||
rIdx := len(shard.rs)
|
rIdx := len(shard.rs)
|
||||||
shard.rs = slicesutil.SetLength(shard.rs, len(shard.rs)+1)
|
shard.rs = slicesutil.SetLength(shard.rs, len(shard.rs)+1)
|
||||||
|
|
||||||
shard.rsBuf = slicesutil.SetLength(shard.rsBuf, len(shard.rsBuf)+len(br.timestamps))
|
shard.rsBuf = slicesutil.SetLength(shard.rsBuf, len(shard.rsBuf)+br.rowsLen)
|
||||||
shard.rs[rIdx] = shard.rsBuf[len(shard.rsBuf)-len(br.timestamps):]
|
shard.rs[rIdx] = shard.rsBuf[len(shard.rsBuf)-br.rowsLen:]
|
||||||
|
|
||||||
if me.isConst {
|
if me.isConst {
|
||||||
r := shard.rs[rIdx]
|
r := shard.rs[rIdx]
|
||||||
for i := range br.timestamps {
|
for i := 0; i < br.rowsLen; i++ {
|
||||||
r[i] = me.constValue
|
r[i] = me.constValue
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
|
@ -331,7 +331,7 @@ func (shard *pipeMathProcessorShard) executeExpr(me *mathExpr, br *blockResult)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pmp *pipeMathProcessor) writeBlock(workerID uint, br *blockResult) {
|
func (pmp *pipeMathProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -51,16 +51,16 @@ type pipeOffsetProcessor struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pop *pipeOffsetProcessor) writeBlock(workerID uint, br *blockResult) {
|
func (pop *pipeOffsetProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
rowsProcessed := pop.rowsProcessed.Add(uint64(len(br.timestamps)))
|
rowsProcessed := pop.rowsProcessed.Add(uint64(br.rowsLen))
|
||||||
if rowsProcessed <= pop.po.offset {
|
if rowsProcessed <= pop.po.offset {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
rowsProcessed -= uint64(len(br.timestamps))
|
rowsProcessed -= uint64(br.rowsLen)
|
||||||
if rowsProcessed >= pop.po.offset {
|
if rowsProcessed >= pop.po.offset {
|
||||||
pop.ppNext.writeBlock(workerID, br)
|
pop.ppNext.writeBlock(workerID, br)
|
||||||
return
|
return
|
||||||
|
|
|
@ -64,7 +64,7 @@ type pipePackProcessorShardNopad struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ppp *pipePackProcessor) writeBlock(workerID uint, br *blockResult) {
|
func (ppp *pipePackProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -86,7 +86,7 @@ func (ppp *pipePackProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
|
|
||||||
buf := shard.buf[:0]
|
buf := shard.buf[:0]
|
||||||
fields := shard.fields
|
fields := shard.fields
|
||||||
for rowIdx := range br.timestamps {
|
for rowIdx := 0; rowIdx < br.rowsLen; rowIdx++ {
|
||||||
fields = fields[:0]
|
fields = fields[:0]
|
||||||
for _, c := range cs {
|
for _, c := range cs {
|
||||||
v := c.getValueAtRow(br, rowIdx)
|
v := c.getValueAtRow(br, rowIdx)
|
||||||
|
|
|
@ -83,7 +83,7 @@ type pipeRenameProcessor struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (prp *pipeRenameProcessor) writeBlock(workerID uint, br *blockResult) {
|
func (prp *pipeRenameProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -245,11 +245,11 @@ func (shard *pipeSortProcessorShard) writeBlock(br *blockResult) {
|
||||||
shard.columnValues = columnValues
|
shard.columnValues = columnValues
|
||||||
|
|
||||||
// Generate byColumns
|
// Generate byColumns
|
||||||
valuesEncoded := make([]string, len(br.timestamps))
|
valuesEncoded := make([]string, br.rowsLen)
|
||||||
shard.stateSizeBudget -= len(valuesEncoded) * int(unsafe.Sizeof(valuesEncoded[0]))
|
shard.stateSizeBudget -= len(valuesEncoded) * int(unsafe.Sizeof(valuesEncoded[0]))
|
||||||
|
|
||||||
bb := bbPool.Get()
|
bb := bbPool.Get()
|
||||||
for rowIdx := range br.timestamps {
|
for rowIdx := 0; rowIdx < br.rowsLen; rowIdx++ {
|
||||||
// Marshal all the columns per each row into a single string
|
// Marshal all the columns per each row into a single string
|
||||||
// and sort rows by the resulting string.
|
// and sort rows by the resulting string.
|
||||||
bb.B = bb.B[:0]
|
bb.B = bb.B[:0]
|
||||||
|
@ -267,8 +267,8 @@ func (shard *pipeSortProcessorShard) writeBlock(br *blockResult) {
|
||||||
}
|
}
|
||||||
bbPool.Put(bb)
|
bbPool.Put(bb)
|
||||||
|
|
||||||
i64Values := make([]int64, len(br.timestamps))
|
i64Values := make([]int64, br.rowsLen)
|
||||||
f64Values := make([]float64, len(br.timestamps))
|
f64Values := make([]float64, br.rowsLen)
|
||||||
for i := range f64Values {
|
for i := range f64Values {
|
||||||
f64Values[i] = nan
|
f64Values[i] = nan
|
||||||
}
|
}
|
||||||
|
@ -347,7 +347,7 @@ func (shard *pipeSortProcessorShard) writeBlock(br *blockResult) {
|
||||||
blockIdx := len(shard.blocks) - 1
|
blockIdx := len(shard.blocks) - 1
|
||||||
rowRefs := shard.rowRefs
|
rowRefs := shard.rowRefs
|
||||||
rowRefsLen := len(rowRefs)
|
rowRefsLen := len(rowRefs)
|
||||||
for i := range br.timestamps {
|
for i := 0; i < br.rowsLen; i++ {
|
||||||
rowRefs = append(rowRefs, sortRowRef{
|
rowRefs = append(rowRefs, sortRowRef{
|
||||||
blockIdx: blockIdx,
|
blockIdx: blockIdx,
|
||||||
rowIdx: i,
|
rowIdx: i,
|
||||||
|
@ -405,7 +405,7 @@ func (shard *pipeSortProcessorShard) Less(i, j int) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (psp *pipeSortProcessor) writeBlock(workerID uint, br *blockResult) {
|
func (psp *pipeSortProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -686,8 +686,10 @@ func sortBlockLess(shardA *pipeSortProcessorShard, rowIdxA int, shardB *pipeSort
|
||||||
|
|
||||||
if cA.c.isTime && cB.c.isTime {
|
if cA.c.isTime && cB.c.isTime {
|
||||||
// Fast path - sort by _time
|
// Fast path - sort by _time
|
||||||
tA := bA.br.timestamps[rrA.rowIdx]
|
timestampsA := bA.br.getTimestamps()
|
||||||
tB := bB.br.timestamps[rrB.rowIdx]
|
timestampsB := bB.br.getTimestamps()
|
||||||
|
tA := timestampsA[rrA.rowIdx]
|
||||||
|
tB := timestampsB[rrB.rowIdx]
|
||||||
if tA == tB {
|
if tA == tB {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -182,7 +182,8 @@ func (shard *pipeTopkProcessorShard) writeBlock(br *blockResult) {
|
||||||
byColumns := shard.byColumns[:0]
|
byColumns := shard.byColumns[:0]
|
||||||
byColumnsIsTime := shard.byColumnsIsTime[:0]
|
byColumnsIsTime := shard.byColumnsIsTime[:0]
|
||||||
bb := bbPool.Get()
|
bb := bbPool.Get()
|
||||||
for rowIdx, timestamp := range br.timestamps {
|
timestamps := br.getTimestamps()
|
||||||
|
for rowIdx, timestamp := range timestamps {
|
||||||
byColumns = byColumns[:0]
|
byColumns = byColumns[:0]
|
||||||
bb.B = bb.B[:0]
|
bb.B = bb.B[:0]
|
||||||
for i, values := range byColumnValues {
|
for i, values := range byColumnValues {
|
||||||
|
@ -234,7 +235,8 @@ func (shard *pipeTopkProcessorShard) writeBlock(br *blockResult) {
|
||||||
|
|
||||||
// add rows to shard
|
// add rows to shard
|
||||||
byColumns := shard.byColumns[:0]
|
byColumns := shard.byColumns[:0]
|
||||||
for rowIdx, timestamp := range br.timestamps {
|
timestamps := br.getTimestamps()
|
||||||
|
for rowIdx, timestamp := range timestamps {
|
||||||
byColumns = byColumns[:0]
|
byColumns = byColumns[:0]
|
||||||
|
|
||||||
for i, values := range byColumnValues {
|
for i, values := range byColumnValues {
|
||||||
|
@ -307,7 +309,7 @@ func (shard *pipeTopkProcessorShard) sortRows(stopCh <-chan struct{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ptp *pipeTopkProcessor) writeBlock(workerID uint, br *blockResult) {
|
func (ptp *pipeTopkProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -269,7 +269,7 @@ func (shard *pipeStatsProcessorShard) writeBlock(br *blockResult) {
|
||||||
// Slower generic path for a column with different values.
|
// Slower generic path for a column with different values.
|
||||||
var psg *pipeStatsGroup
|
var psg *pipeStatsGroup
|
||||||
keyBuf := shard.keyBuf[:0]
|
keyBuf := shard.keyBuf[:0]
|
||||||
for i := range br.timestamps {
|
for i := 0; i < br.rowsLen; i++ {
|
||||||
if i <= 0 || values[i-1] != values[i] {
|
if i <= 0 || values[i-1] != values[i] {
|
||||||
keyBuf = encoding.MarshalBytes(keyBuf[:0], bytesutil.ToUnsafeBytes(values[i]))
|
keyBuf = encoding.MarshalBytes(keyBuf[:0], bytesutil.ToUnsafeBytes(values[i]))
|
||||||
psg = shard.getPipeStatsGroup(keyBuf)
|
psg = shard.getPipeStatsGroup(keyBuf)
|
||||||
|
@ -312,7 +312,7 @@ func (shard *pipeStatsProcessorShard) writeBlock(br *blockResult) {
|
||||||
// The slowest path - group by multiple columns with different values across rows.
|
// The slowest path - group by multiple columns with different values across rows.
|
||||||
var psg *pipeStatsGroup
|
var psg *pipeStatsGroup
|
||||||
keyBuf := shard.keyBuf[:0]
|
keyBuf := shard.keyBuf[:0]
|
||||||
for i := range br.timestamps {
|
for i := 0; i < br.rowsLen; i++ {
|
||||||
// Verify whether the key for 'by (...)' fields equals the previous key
|
// Verify whether the key for 'by (...)' fields equals the previous key
|
||||||
sameValue := i > 0
|
sameValue := i > 0
|
||||||
for _, values := range columnValues {
|
for _, values := range columnValues {
|
||||||
|
@ -338,7 +338,7 @@ func (shard *pipeStatsProcessorShard) applyPerFunctionFilters(br *blockResult) {
|
||||||
funcs := shard.ps.funcs
|
funcs := shard.ps.funcs
|
||||||
for i := range funcs {
|
for i := range funcs {
|
||||||
bm := &shard.bms[i]
|
bm := &shard.bms[i]
|
||||||
bm.init(len(br.timestamps))
|
bm.init(br.rowsLen)
|
||||||
bm.setBits()
|
bm.setBits()
|
||||||
|
|
||||||
iff := funcs[i].iff
|
iff := funcs[i].iff
|
||||||
|
@ -400,7 +400,7 @@ func (psg *pipeStatsGroup) updateStatsForRow(bms []bitmap, br *blockResult, rowI
|
||||||
}
|
}
|
||||||
|
|
||||||
func (psp *pipeStatsProcessor) writeBlock(workerID uint, br *blockResult) {
|
func (psp *pipeStatsProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -139,7 +139,8 @@ func getStreamRows(ctx context.Context, s *Storage, streamID string, minTimestam
|
||||||
}
|
}
|
||||||
|
|
||||||
cs := br.getColumns()
|
cs := br.getColumns()
|
||||||
for i, timestamp := range br.timestamps {
|
timestamps := br.getTimestamps()
|
||||||
|
for i, timestamp := range timestamps {
|
||||||
fields := make([]Field, len(cs))
|
fields := make([]Field, len(cs))
|
||||||
stateSize += int(unsafe.Sizeof(fields[0])) * len(fields)
|
stateSize += int(unsafe.Sizeof(fields[0])) * len(fields)
|
||||||
|
|
||||||
|
@ -210,7 +211,8 @@ func (shard *pipeStreamContextProcessorShard) writeBlock(br *blockResult) {
|
||||||
cs := br.getColumns()
|
cs := br.getColumns()
|
||||||
cStreamID := br.getColumnByName("_stream_id")
|
cStreamID := br.getColumnByName("_stream_id")
|
||||||
stateSize := 0
|
stateSize := 0
|
||||||
for i, timestamp := range br.timestamps {
|
timestamps := br.getTimestamps()
|
||||||
|
for i, timestamp := range timestamps {
|
||||||
fields := make([]Field, len(cs))
|
fields := make([]Field, len(cs))
|
||||||
stateSize += int(unsafe.Sizeof(fields[0])) * len(fields)
|
stateSize += int(unsafe.Sizeof(fields[0])) * len(fields)
|
||||||
|
|
||||||
|
@ -250,7 +252,7 @@ func (shard *pipeStreamContextProcessorShard) getM() map[string][]streamContextR
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pcp *pipeStreamContextProcessor) writeBlock(workerID uint, br *blockResult) {
|
func (pcp *pipeStreamContextProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if pcp.pc.linesBefore <= 0 && pcp.pc.linesAfter <= 0 {
|
if pcp.pc.linesBefore <= 0 && pcp.pc.linesAfter <= 0 {
|
||||||
|
|
|
@ -145,7 +145,7 @@ func (shard *pipeTopProcessorShard) writeBlock(br *blockResult) {
|
||||||
// Take into account all the columns in br.
|
// Take into account all the columns in br.
|
||||||
keyBuf := shard.keyBuf
|
keyBuf := shard.keyBuf
|
||||||
cs := br.getColumns()
|
cs := br.getColumns()
|
||||||
for i := range br.timestamps {
|
for i := 0; i < br.rowsLen; i++ {
|
||||||
keyBuf = keyBuf[:0]
|
keyBuf = keyBuf[:0]
|
||||||
for _, c := range cs {
|
for _, c := range cs {
|
||||||
v := c.getValueAtRow(br, i)
|
v := c.getValueAtRow(br, i)
|
||||||
|
@ -162,7 +162,7 @@ func (shard *pipeTopProcessorShard) writeBlock(br *blockResult) {
|
||||||
c := br.getColumnByName(byFields[0])
|
c := br.getColumnByName(byFields[0])
|
||||||
if c.isConst {
|
if c.isConst {
|
||||||
v := c.valuesEncoded[0]
|
v := c.valuesEncoded[0]
|
||||||
shard.updateState(v, uint64(len(br.timestamps)))
|
shard.updateState(v, uint64(br.rowsLen))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if c.valueType == valueTypeDict {
|
if c.valueType == valueTypeDict {
|
||||||
|
@ -197,7 +197,7 @@ func (shard *pipeTopProcessorShard) writeBlock(br *blockResult) {
|
||||||
shard.columnValues = columnValues
|
shard.columnValues = columnValues
|
||||||
|
|
||||||
keyBuf := shard.keyBuf
|
keyBuf := shard.keyBuf
|
||||||
for i := range br.timestamps {
|
for i := 0; i < br.rowsLen; i++ {
|
||||||
keyBuf = keyBuf[:0]
|
keyBuf = keyBuf[:0]
|
||||||
for _, values := range columnValues {
|
for _, values := range columnValues {
|
||||||
keyBuf = encoding.MarshalBytes(keyBuf, bytesutil.ToUnsafeBytes(values[i]))
|
keyBuf = encoding.MarshalBytes(keyBuf, bytesutil.ToUnsafeBytes(values[i]))
|
||||||
|
@ -228,7 +228,7 @@ func (shard *pipeTopProcessorShard) getM() map[string]*uint64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ptp *pipeTopProcessor) writeBlock(workerID uint, br *blockResult) {
|
func (ptp *pipeTopProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -147,7 +147,7 @@ func (shard *pipeUniqProcessorShard) writeBlock(br *blockResult) bool {
|
||||||
// Take into account all the columns in br.
|
// Take into account all the columns in br.
|
||||||
keyBuf := shard.keyBuf
|
keyBuf := shard.keyBuf
|
||||||
cs := br.getColumns()
|
cs := br.getColumns()
|
||||||
for i := range br.timestamps {
|
for i := 0; i < br.rowsLen; i++ {
|
||||||
keyBuf = keyBuf[:0]
|
keyBuf = keyBuf[:0]
|
||||||
for _, c := range cs {
|
for _, c := range cs {
|
||||||
v := c.getValueAtRow(br, i)
|
v := c.getValueAtRow(br, i)
|
||||||
|
@ -164,7 +164,7 @@ func (shard *pipeUniqProcessorShard) writeBlock(br *blockResult) bool {
|
||||||
c := br.getColumnByName(byFields[0])
|
c := br.getColumnByName(byFields[0])
|
||||||
if c.isConst {
|
if c.isConst {
|
||||||
v := c.valuesEncoded[0]
|
v := c.valuesEncoded[0]
|
||||||
shard.updateState(v, uint64(len(br.timestamps)))
|
shard.updateState(v, uint64(br.rowsLen))
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if c.valueType == valueTypeDict {
|
if c.valueType == valueTypeDict {
|
||||||
|
@ -207,7 +207,7 @@ func (shard *pipeUniqProcessorShard) writeBlock(br *blockResult) bool {
|
||||||
shard.columnValues = columnValues
|
shard.columnValues = columnValues
|
||||||
|
|
||||||
keyBuf := shard.keyBuf
|
keyBuf := shard.keyBuf
|
||||||
for i := range br.timestamps {
|
for i := 0; i < br.rowsLen; i++ {
|
||||||
seenValue := true
|
seenValue := true
|
||||||
for _, values := range columnValues {
|
for _, values := range columnValues {
|
||||||
if needHits || i == 0 || values[i-1] != values[i] {
|
if needHits || i == 0 || values[i-1] != values[i] {
|
||||||
|
@ -251,7 +251,7 @@ func (shard *pipeUniqProcessorShard) getM() map[string]*uint64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pup *pipeUniqProcessor) writeBlock(workerID uint, br *blockResult) {
|
func (pup *pipeUniqProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -148,7 +148,7 @@ type pipeUnpackProcessorShardNopad struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pup *pipeUnpackProcessor) writeBlock(workerID uint, br *blockResult) {
|
func (pup *pipeUnpackProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -157,7 +157,7 @@ func (pup *pipeUnpackProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
shard.uctx.init(pup.fieldPrefix)
|
shard.uctx.init(pup.fieldPrefix)
|
||||||
|
|
||||||
bm := &shard.bm
|
bm := &shard.bm
|
||||||
bm.init(len(br.timestamps))
|
bm.init(br.rowsLen)
|
||||||
bm.setBits()
|
bm.setBits()
|
||||||
if pup.iff != nil {
|
if pup.iff != nil {
|
||||||
pup.iff.f.applyToBlockResult(br, bm)
|
pup.iff.f.applyToBlockResult(br, bm)
|
||||||
|
@ -172,7 +172,7 @@ func (pup *pipeUnpackProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
v := c.valuesEncoded[0]
|
v := c.valuesEncoded[0]
|
||||||
shard.uctx.resetFields()
|
shard.uctx.resetFields()
|
||||||
pup.unpackFunc(&shard.uctx, v)
|
pup.unpackFunc(&shard.uctx, v)
|
||||||
for rowIdx := range br.timestamps {
|
for rowIdx := 0; rowIdx < br.rowsLen; rowIdx++ {
|
||||||
if bm.isSetBit(rowIdx) {
|
if bm.isSetBit(rowIdx) {
|
||||||
shard.wctx.writeRow(rowIdx, shard.uctx.fields)
|
shard.wctx.writeRow(rowIdx, shard.uctx.fields)
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -106,7 +106,7 @@ type pipeUnrollProcessorShardNopad struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pup *pipeUnrollProcessor) writeBlock(workerID uint, br *blockResult) {
|
func (pup *pipeUnrollProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -115,7 +115,7 @@ func (pup *pipeUnrollProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
shard.wctx.init(workerID, pup.ppNext, false, false, br)
|
shard.wctx.init(workerID, pup.ppNext, false, false, br)
|
||||||
|
|
||||||
bm := &shard.bm
|
bm := &shard.bm
|
||||||
bm.init(len(br.timestamps))
|
bm.init(br.rowsLen)
|
||||||
bm.setBits()
|
bm.setBits()
|
||||||
if iff := pu.iff; iff != nil {
|
if iff := pu.iff; iff != nil {
|
||||||
iff.f.applyToBlockResult(br, bm)
|
iff.f.applyToBlockResult(br, bm)
|
||||||
|
@ -133,7 +133,7 @@ func (pup *pipeUnrollProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
}
|
}
|
||||||
|
|
||||||
fields := shard.fields
|
fields := shard.fields
|
||||||
for rowIdx := range br.timestamps {
|
for rowIdx := 0; rowIdx < br.rowsLen; rowIdx++ {
|
||||||
if bm.isSetBit(rowIdx) {
|
if bm.isSetBit(rowIdx) {
|
||||||
if needStop(pup.stopCh) {
|
if needStop(pup.stopCh) {
|
||||||
return
|
return
|
||||||
|
|
|
@ -62,14 +62,14 @@ type pipeUpdateProcessorShardNopad struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pup *pipeUpdateProcessor) writeBlock(workerID uint, br *blockResult) {
|
func (pup *pipeUpdateProcessor) writeBlock(workerID uint, br *blockResult) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
shard := &pup.shards[workerID]
|
shard := &pup.shards[workerID]
|
||||||
|
|
||||||
bm := &shard.bm
|
bm := &shard.bm
|
||||||
bm.init(len(br.timestamps))
|
bm.init(br.rowsLen)
|
||||||
bm.setBits()
|
bm.setBits()
|
||||||
if iff := pup.iff; iff != nil {
|
if iff := pup.iff; iff != nil {
|
||||||
iff.f.applyToBlockResult(br, bm)
|
iff.f.applyToBlockResult(br, bm)
|
||||||
|
|
|
@ -136,7 +136,7 @@ func (pp *testPipeProcessor) writeBlock(_ uint, br *blockResult) {
|
||||||
columnValues = append(columnValues, values)
|
columnValues = append(columnValues, values)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range br.timestamps {
|
for i := 0; i < br.rowsLen; i++ {
|
||||||
row := make([]Field, len(columnValues))
|
row := make([]Field, len(columnValues))
|
||||||
for j, values := range columnValues {
|
for j, values := range columnValues {
|
||||||
r := &row[j]
|
r := &row[j]
|
||||||
|
|
|
@ -18,7 +18,7 @@ func (sc *statsCount) String() string {
|
||||||
|
|
||||||
func (sc *statsCount) updateNeededFields(neededFields fieldsSet) {
|
func (sc *statsCount) updateNeededFields(neededFields fieldsSet) {
|
||||||
if len(sc.fields) == 0 {
|
if len(sc.fields) == 0 {
|
||||||
// There is no need in fetching any columns for count(*) - the number of matching rows can be calculated as len(blockResult.timestamps)
|
// There is no need in fetching any columns for count(*) - the number of matching rows can be calculated as blockResult.rowsLen
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
neededFields.addFields(sc.fields)
|
neededFields.addFields(sc.fields)
|
||||||
|
@ -41,7 +41,7 @@ func (scp *statsCountProcessor) updateStatsForAllRows(br *blockResult) int {
|
||||||
fields := scp.sc.fields
|
fields := scp.sc.fields
|
||||||
if len(fields) == 0 {
|
if len(fields) == 0 {
|
||||||
// Fast path - unconditionally count all the columns.
|
// Fast path - unconditionally count all the columns.
|
||||||
scp.rowsCount += uint64(len(br.timestamps))
|
scp.rowsCount += uint64(br.rowsLen)
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
if len(fields) == 1 {
|
if len(fields) == 1 {
|
||||||
|
@ -49,12 +49,12 @@ func (scp *statsCountProcessor) updateStatsForAllRows(br *blockResult) int {
|
||||||
c := br.getColumnByName(fields[0])
|
c := br.getColumnByName(fields[0])
|
||||||
if c.isConst {
|
if c.isConst {
|
||||||
if c.valuesEncoded[0] != "" {
|
if c.valuesEncoded[0] != "" {
|
||||||
scp.rowsCount += uint64(len(br.timestamps))
|
scp.rowsCount += uint64(br.rowsLen)
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
if c.isTime {
|
if c.isTime {
|
||||||
scp.rowsCount += uint64(len(br.timestamps))
|
scp.rowsCount += uint64(br.rowsLen)
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
switch c.valueType {
|
switch c.valueType {
|
||||||
|
@ -68,7 +68,7 @@ func (scp *statsCountProcessor) updateStatsForAllRows(br *blockResult) int {
|
||||||
case valueTypeDict:
|
case valueTypeDict:
|
||||||
zeroDictIdx := slices.Index(c.dictValues, "")
|
zeroDictIdx := slices.Index(c.dictValues, "")
|
||||||
if zeroDictIdx < 0 {
|
if zeroDictIdx < 0 {
|
||||||
scp.rowsCount += uint64(len(br.timestamps))
|
scp.rowsCount += uint64(br.rowsLen)
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
for _, v := range c.getValuesEncoded(br) {
|
for _, v := range c.getValuesEncoded(br) {
|
||||||
|
@ -78,7 +78,7 @@ func (scp *statsCountProcessor) updateStatsForAllRows(br *blockResult) int {
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
case valueTypeUint8, valueTypeUint16, valueTypeUint32, valueTypeUint64, valueTypeFloat64, valueTypeIPv4, valueTypeTimestampISO8601:
|
case valueTypeUint8, valueTypeUint16, valueTypeUint32, valueTypeUint64, valueTypeFloat64, valueTypeIPv4, valueTypeTimestampISO8601:
|
||||||
scp.rowsCount += uint64(len(br.timestamps))
|
scp.rowsCount += uint64(br.rowsLen)
|
||||||
return 0
|
return 0
|
||||||
default:
|
default:
|
||||||
logger.Panicf("BUG: unknown valueType=%d", c.valueType)
|
logger.Panicf("BUG: unknown valueType=%d", c.valueType)
|
||||||
|
@ -87,7 +87,7 @@ func (scp *statsCountProcessor) updateStatsForAllRows(br *blockResult) int {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Slow path - count rows containing at least a single non-empty value for the fields enumerated inside count().
|
// Slow path - count rows containing at least a single non-empty value for the fields enumerated inside count().
|
||||||
bm := getBitmap(len(br.timestamps))
|
bm := getBitmap(br.rowsLen)
|
||||||
defer putBitmap(bm)
|
defer putBitmap(bm)
|
||||||
|
|
||||||
bm.setBits()
|
bm.setBits()
|
||||||
|
@ -95,13 +95,13 @@ func (scp *statsCountProcessor) updateStatsForAllRows(br *blockResult) int {
|
||||||
c := br.getColumnByName(f)
|
c := br.getColumnByName(f)
|
||||||
if c.isConst {
|
if c.isConst {
|
||||||
if c.valuesEncoded[0] != "" {
|
if c.valuesEncoded[0] != "" {
|
||||||
scp.rowsCount += uint64(len(br.timestamps))
|
scp.rowsCount += uint64(br.rowsLen)
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if c.isTime {
|
if c.isTime {
|
||||||
scp.rowsCount += uint64(len(br.timestamps))
|
scp.rowsCount += uint64(br.rowsLen)
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -113,7 +113,7 @@ func (scp *statsCountProcessor) updateStatsForAllRows(br *blockResult) int {
|
||||||
})
|
})
|
||||||
case valueTypeDict:
|
case valueTypeDict:
|
||||||
if !slices.Contains(c.dictValues, "") {
|
if !slices.Contains(c.dictValues, "") {
|
||||||
scp.rowsCount += uint64(len(br.timestamps))
|
scp.rowsCount += uint64(br.rowsLen)
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
valuesEncoded := c.getValuesEncoded(br)
|
valuesEncoded := c.getValuesEncoded(br)
|
||||||
|
@ -122,7 +122,7 @@ func (scp *statsCountProcessor) updateStatsForAllRows(br *blockResult) int {
|
||||||
return c.dictValues[dictIdx] == ""
|
return c.dictValues[dictIdx] == ""
|
||||||
})
|
})
|
||||||
case valueTypeUint8, valueTypeUint16, valueTypeUint32, valueTypeUint64, valueTypeFloat64, valueTypeIPv4, valueTypeTimestampISO8601:
|
case valueTypeUint8, valueTypeUint16, valueTypeUint32, valueTypeUint64, valueTypeFloat64, valueTypeIPv4, valueTypeTimestampISO8601:
|
||||||
scp.rowsCount += uint64(len(br.timestamps))
|
scp.rowsCount += uint64(br.rowsLen)
|
||||||
return 0
|
return 0
|
||||||
default:
|
default:
|
||||||
logger.Panicf("BUG: unknown valueType=%d", c.valueType)
|
logger.Panicf("BUG: unknown valueType=%d", c.valueType)
|
||||||
|
@ -130,7 +130,7 @@ func (scp *statsCountProcessor) updateStatsForAllRows(br *blockResult) int {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
scp.rowsCount += uint64(len(br.timestamps))
|
scp.rowsCount += uint64(br.rowsLen)
|
||||||
scp.rowsCount -= uint64(bm.onesCount())
|
scp.rowsCount -= uint64(bm.onesCount())
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,7 +36,7 @@ type statsCountEmptyProcessor struct {
|
||||||
func (scp *statsCountEmptyProcessor) updateStatsForAllRows(br *blockResult) int {
|
func (scp *statsCountEmptyProcessor) updateStatsForAllRows(br *blockResult) int {
|
||||||
fields := scp.sc.fields
|
fields := scp.sc.fields
|
||||||
if len(fields) == 0 {
|
if len(fields) == 0 {
|
||||||
bm := getBitmap(len(br.timestamps))
|
bm := getBitmap(br.rowsLen)
|
||||||
bm.setBits()
|
bm.setBits()
|
||||||
for _, c := range br.getColumns() {
|
for _, c := range br.getColumns() {
|
||||||
values := c.getValues(br)
|
values := c.getValues(br)
|
||||||
|
@ -53,7 +53,7 @@ func (scp *statsCountEmptyProcessor) updateStatsForAllRows(br *blockResult) int
|
||||||
c := br.getColumnByName(fields[0])
|
c := br.getColumnByName(fields[0])
|
||||||
if c.isConst {
|
if c.isConst {
|
||||||
if c.valuesEncoded[0] == "" {
|
if c.valuesEncoded[0] == "" {
|
||||||
scp.rowsCount += uint64(len(br.timestamps))
|
scp.rowsCount += uint64(br.rowsLen)
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
@ -88,7 +88,7 @@ func (scp *statsCountEmptyProcessor) updateStatsForAllRows(br *blockResult) int
|
||||||
}
|
}
|
||||||
|
|
||||||
// Slow path - count rows containing empty value for all the fields enumerated inside count_empty().
|
// Slow path - count rows containing empty value for all the fields enumerated inside count_empty().
|
||||||
bm := getBitmap(len(br.timestamps))
|
bm := getBitmap(br.rowsLen)
|
||||||
defer putBitmap(bm)
|
defer putBitmap(bm)
|
||||||
|
|
||||||
bm.setBits()
|
bm.setBits()
|
||||||
|
|
|
@ -64,7 +64,7 @@ func (sup *statsCountUniqProcessor) updateStatsForAllRows(br *blockResult) int {
|
||||||
sup.columnValues = columnValues
|
sup.columnValues = columnValues
|
||||||
|
|
||||||
keyBuf := sup.keyBuf[:0]
|
keyBuf := sup.keyBuf[:0]
|
||||||
for i := range br.timestamps {
|
for i := 0; i < br.rowsLen; i++ {
|
||||||
seenKey := true
|
seenKey := true
|
||||||
for _, values := range columnValues {
|
for _, values := range columnValues {
|
||||||
if i == 0 || values[i-1] != values[i] {
|
if i == 0 || values[i-1] != values[i] {
|
||||||
|
@ -103,8 +103,8 @@ func (sup *statsCountUniqProcessor) updateStatsForAllRows(br *blockResult) int {
|
||||||
// This guarantees that keys do not clash for different column types across blocks.
|
// This guarantees that keys do not clash for different column types across blocks.
|
||||||
c := br.getColumnByName(fields[0])
|
c := br.getColumnByName(fields[0])
|
||||||
if c.isTime {
|
if c.isTime {
|
||||||
// Count unique br.timestamps
|
// Count unique timestamps
|
||||||
timestamps := br.timestamps
|
timestamps := br.getTimestamps()
|
||||||
keyBuf := sup.keyBuf[:0]
|
keyBuf := sup.keyBuf[:0]
|
||||||
for i, timestamp := range timestamps {
|
for i, timestamp := range timestamps {
|
||||||
if i > 0 && timestamps[i-1] == timestamps[i] {
|
if i > 0 && timestamps[i-1] == timestamps[i] {
|
||||||
|
@ -180,7 +180,7 @@ func (sup *statsCountUniqProcessor) updateStatsForAllRows(br *blockResult) int {
|
||||||
sup.columnValues = columnValues
|
sup.columnValues = columnValues
|
||||||
|
|
||||||
keyBuf := sup.keyBuf[:0]
|
keyBuf := sup.keyBuf[:0]
|
||||||
for i := range br.timestamps {
|
for i := 0; i < br.rowsLen; i++ {
|
||||||
seenKey := true
|
seenKey := true
|
||||||
for _, values := range columnValues {
|
for _, values := range columnValues {
|
||||||
if i == 0 || values[i-1] != values[i] {
|
if i == 0 || values[i-1] != values[i] {
|
||||||
|
@ -247,10 +247,11 @@ func (sup *statsCountUniqProcessor) updateStatsForRow(br *blockResult, rowIdx in
|
||||||
// This guarantees that keys do not clash for different column types across blocks.
|
// This guarantees that keys do not clash for different column types across blocks.
|
||||||
c := br.getColumnByName(fields[0])
|
c := br.getColumnByName(fields[0])
|
||||||
if c.isTime {
|
if c.isTime {
|
||||||
// Count unique br.timestamps
|
// Count unique timestamps
|
||||||
|
timestamps := br.getTimestamps()
|
||||||
keyBuf := sup.keyBuf[:0]
|
keyBuf := sup.keyBuf[:0]
|
||||||
keyBuf = append(keyBuf[:0], 1)
|
keyBuf = append(keyBuf[:0], 1)
|
||||||
keyBuf = encoding.MarshalInt64(keyBuf, br.timestamps[rowIdx])
|
keyBuf = encoding.MarshalInt64(keyBuf, timestamps[rowIdx])
|
||||||
stateSizeIncrease += sup.updateState(keyBuf)
|
stateSizeIncrease += sup.updateState(keyBuf)
|
||||||
sup.keyBuf = keyBuf
|
sup.keyBuf = keyBuf
|
||||||
return stateSizeIncrease
|
return stateSizeIncrease
|
||||||
|
|
|
@ -80,20 +80,12 @@ func (smp *statsMaxProcessor) mergeState(sfp statsProcessor) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (smp *statsMaxProcessor) updateStateForColumn(br *blockResult, c *blockResultColumn) {
|
func (smp *statsMaxProcessor) updateStateForColumn(br *blockResult, c *blockResultColumn) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.isTime {
|
if c.isTime {
|
||||||
// Special case for time column
|
maxTimestamp := br.getMaxTimestamp()
|
||||||
timestamps := br.timestamps
|
|
||||||
maxTimestamp := timestamps[len(timestamps)-1]
|
|
||||||
for _, timestamp := range timestamps[:len(timestamps)-1] {
|
|
||||||
if timestamp > maxTimestamp {
|
|
||||||
maxTimestamp = timestamp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bb := bbPool.Get()
|
bb := bbPool.Get()
|
||||||
bb.B = marshalTimestampRFC3339NanoString(bb.B[:0], maxTimestamp)
|
bb.B = marshalTimestampRFC3339NanoString(bb.B[:0], maxTimestamp)
|
||||||
smp.updateStateBytes(bb.B)
|
smp.updateStateBytes(bb.B)
|
||||||
|
|
|
@ -82,20 +82,12 @@ func (smp *statsMinProcessor) mergeState(sfp statsProcessor) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (smp *statsMinProcessor) updateStateForColumn(br *blockResult, c *blockResultColumn) {
|
func (smp *statsMinProcessor) updateStateForColumn(br *blockResult, c *blockResultColumn) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.isTime {
|
if c.isTime {
|
||||||
// Special case for time column
|
minTimestamp := br.getMinTimestamp()
|
||||||
timestamps := br.timestamps
|
|
||||||
minTimestamp := timestamps[0]
|
|
||||||
for _, timestamp := range timestamps[1:] {
|
|
||||||
if timestamp < minTimestamp {
|
|
||||||
minTimestamp = timestamp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bb := bbPool.Get()
|
bb := bbPool.Get()
|
||||||
bb.B = marshalTimestampRFC3339NanoString(bb.B[:0], minTimestamp)
|
bb.B = marshalTimestampRFC3339NanoString(bb.B[:0], minTimestamp)
|
||||||
smp.updateStateBytes(bb.B)
|
smp.updateStateBytes(bb.B)
|
||||||
|
|
|
@ -96,7 +96,7 @@ func (sqp *statsQuantileProcessor) updateStateForColumn(br *blockResult, c *bloc
|
||||||
if c.isConst {
|
if c.isConst {
|
||||||
f, ok := tryParseFloat64(c.valuesEncoded[0])
|
f, ok := tryParseFloat64(c.valuesEncoded[0])
|
||||||
if ok {
|
if ok {
|
||||||
for range br.timestamps {
|
for i := 0; i < br.rowsLen; i++ {
|
||||||
stateSizeIncrease += h.update(f)
|
stateSizeIncrease += h.update(f)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,7 +39,7 @@ type statsRowAnyProcessor struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sap *statsRowAnyProcessor) updateStatsForAllRows(br *blockResult) int {
|
func (sap *statsRowAnyProcessor) updateStatsForAllRows(br *blockResult) int {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
if sap.captured {
|
if sap.captured {
|
||||||
|
|
|
@ -60,8 +60,9 @@ func (smp *statsRowMaxProcessor) updateStatsForAllRows(br *blockResult) int {
|
||||||
return stateSizeIncrease
|
return stateSizeIncrease
|
||||||
}
|
}
|
||||||
if c.isTime {
|
if c.isTime {
|
||||||
|
maxTimestamp := br.getMaxTimestamp()
|
||||||
bb := bbPool.Get()
|
bb := bbPool.Get()
|
||||||
bb.B = marshalTimestampRFC3339NanoString(bb.B[:0], br.timestamps[0])
|
bb.B = marshalTimestampRFC3339NanoString(bb.B[:0], maxTimestamp)
|
||||||
v := bytesutil.ToUnsafeString(bb.B)
|
v := bytesutil.ToUnsafeString(bb.B)
|
||||||
stateSizeIncrease += smp.updateState(v, br, 0)
|
stateSizeIncrease += smp.updateState(v, br, 0)
|
||||||
bbPool.Put(bb)
|
bbPool.Put(bb)
|
||||||
|
@ -124,8 +125,9 @@ func (smp *statsRowMaxProcessor) updateStatsForRow(br *blockResult, rowIdx int)
|
||||||
return stateSizeIncrease
|
return stateSizeIncrease
|
||||||
}
|
}
|
||||||
if c.isTime {
|
if c.isTime {
|
||||||
|
timestamps := br.getTimestamps()
|
||||||
bb := bbPool.Get()
|
bb := bbPool.Get()
|
||||||
bb.B = marshalTimestampRFC3339NanoString(bb.B[:0], br.timestamps[rowIdx])
|
bb.B = marshalTimestampRFC3339NanoString(bb.B[:0], timestamps[rowIdx])
|
||||||
v := bytesutil.ToUnsafeString(bb.B)
|
v := bytesutil.ToUnsafeString(bb.B)
|
||||||
stateSizeIncrease += smp.updateState(v, br, rowIdx)
|
stateSizeIncrease += smp.updateState(v, br, rowIdx)
|
||||||
bbPool.Put(bb)
|
bbPool.Put(bb)
|
||||||
|
|
|
@ -60,8 +60,9 @@ func (smp *statsRowMinProcessor) updateStatsForAllRows(br *blockResult) int {
|
||||||
return stateSizeIncrease
|
return stateSizeIncrease
|
||||||
}
|
}
|
||||||
if c.isTime {
|
if c.isTime {
|
||||||
|
minTimestamp := br.getMinTimestamp()
|
||||||
bb := bbPool.Get()
|
bb := bbPool.Get()
|
||||||
bb.B = marshalTimestampRFC3339NanoString(bb.B[:0], br.timestamps[0])
|
bb.B = marshalTimestampRFC3339NanoString(bb.B[:0], minTimestamp)
|
||||||
v := bytesutil.ToUnsafeString(bb.B)
|
v := bytesutil.ToUnsafeString(bb.B)
|
||||||
stateSizeIncrease += smp.updateState(v, br, 0)
|
stateSizeIncrease += smp.updateState(v, br, 0)
|
||||||
bbPool.Put(bb)
|
bbPool.Put(bb)
|
||||||
|
@ -124,8 +125,9 @@ func (smp *statsRowMinProcessor) updateStatsForRow(br *blockResult, rowIdx int)
|
||||||
return stateSizeIncrease
|
return stateSizeIncrease
|
||||||
}
|
}
|
||||||
if c.isTime {
|
if c.isTime {
|
||||||
|
timestamps := br.getTimestamps()
|
||||||
bb := bbPool.Get()
|
bb := bbPool.Get()
|
||||||
bb.B = marshalTimestampRFC3339NanoString(bb.B[:0], br.timestamps[rowIdx])
|
bb.B = marshalTimestampRFC3339NanoString(bb.B[:0], timestamps[rowIdx])
|
||||||
v := bytesutil.ToUnsafeString(bb.B)
|
v := bytesutil.ToUnsafeString(bb.B)
|
||||||
stateSizeIncrease += smp.updateState(v, br, rowIdx)
|
stateSizeIncrease += smp.updateState(v, br, rowIdx)
|
||||||
bbPool.Put(bb)
|
bbPool.Put(bb)
|
||||||
|
|
|
@ -64,12 +64,12 @@ func (svp *statsValuesProcessor) updateStatsForAllRowsColumn(c *blockResultColum
|
||||||
stateSizeIncrease += len(v)
|
stateSizeIncrease += len(v)
|
||||||
|
|
||||||
values := svp.values
|
values := svp.values
|
||||||
for range br.timestamps {
|
for i := 0; i < br.rowsLen; i++ {
|
||||||
values = append(values, v)
|
values = append(values, v)
|
||||||
}
|
}
|
||||||
svp.values = values
|
svp.values = values
|
||||||
|
|
||||||
stateSizeIncrease += len(br.timestamps) * int(unsafe.Sizeof(values[0]))
|
stateSizeIncrease += br.rowsLen * int(unsafe.Sizeof(values[0]))
|
||||||
return stateSizeIncrease
|
return stateSizeIncrease
|
||||||
}
|
}
|
||||||
if c.valueType == valueTypeDict {
|
if c.valueType == valueTypeDict {
|
||||||
|
@ -86,7 +86,7 @@ func (svp *statsValuesProcessor) updateStatsForAllRowsColumn(c *blockResultColum
|
||||||
}
|
}
|
||||||
svp.values = values
|
svp.values = values
|
||||||
|
|
||||||
stateSizeIncrease += len(br.timestamps) * int(unsafe.Sizeof(values[0]))
|
stateSizeIncrease += br.rowsLen * int(unsafe.Sizeof(values[0]))
|
||||||
return stateSizeIncrease
|
return stateSizeIncrease
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -100,7 +100,7 @@ func (svp *statsValuesProcessor) updateStatsForAllRowsColumn(c *blockResultColum
|
||||||
}
|
}
|
||||||
svp.values = values
|
svp.values = values
|
||||||
|
|
||||||
stateSizeIncrease += len(br.timestamps) * int(unsafe.Sizeof(values[0]))
|
stateSizeIncrease += br.rowsLen * int(unsafe.Sizeof(values[0]))
|
||||||
return stateSizeIncrease
|
return stateSizeIncrease
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -86,7 +86,7 @@ func (s *Storage) RunQuery(ctx context.Context, tenantIDs []TenantID, q *Query,
|
||||||
}
|
}
|
||||||
|
|
||||||
writeBlockResult := func(workerID uint, br *blockResult) {
|
writeBlockResult := func(workerID uint, br *blockResult) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -101,7 +101,9 @@ func (s *Storage) RunQuery(ctx context.Context, tenantIDs []TenantID, q *Query,
|
||||||
Values: values,
|
Values: values,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
writeBlock(workerID, br.timestamps, csDst)
|
|
||||||
|
timestamps := br.getTimestamps()
|
||||||
|
writeBlock(workerID, timestamps, csDst)
|
||||||
|
|
||||||
brs.cs = csDst
|
brs.cs = csDst
|
||||||
putBlockRows(brs)
|
putBlockRows(brs)
|
||||||
|
@ -233,7 +235,7 @@ func (s *Storage) getFieldValuesNoHits(ctx context.Context, tenantIDs []TenantID
|
||||||
var values []string
|
var values []string
|
||||||
var valuesLock sync.Mutex
|
var valuesLock sync.Mutex
|
||||||
writeBlockResult := func(_ uint, br *blockResult) {
|
writeBlockResult := func(_ uint, br *blockResult) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -396,7 +398,7 @@ func (s *Storage) runValuesWithHitsQuery(ctx context.Context, tenantIDs []Tenant
|
||||||
var results []ValueWithHits
|
var results []ValueWithHits
|
||||||
var resultsLock sync.Mutex
|
var resultsLock sync.Mutex
|
||||||
writeBlockResult := func(_ uint, br *blockResult) {
|
writeBlockResult := func(_ uint, br *blockResult) {
|
||||||
if len(br.timestamps) == 0 {
|
if br.rowsLen == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -656,7 +658,7 @@ func (s *Storage) search(workersCount int, so *genericSearchOptions, stopCh <-ch
|
||||||
}
|
}
|
||||||
|
|
||||||
bs.search(bsw, bm)
|
bs.search(bsw, bm)
|
||||||
if len(bs.br.timestamps) > 0 {
|
if bs.br.rowsLen > 0 {
|
||||||
processBlockResult(workerID, &bs.br)
|
processBlockResult(workerID, &bs.br)
|
||||||
}
|
}
|
||||||
bsw.reset()
|
bsw.reset()
|
||||||
|
|
|
@ -876,7 +876,7 @@ func TestStorageSearch(t *testing.T) {
|
||||||
so := newTestGenericSearchOptions([]TenantID{tenantID}, f, []string{"_msg"})
|
so := newTestGenericSearchOptions([]TenantID{tenantID}, f, []string{"_msg"})
|
||||||
var rowsCountTotal atomic.Uint32
|
var rowsCountTotal atomic.Uint32
|
||||||
processBlock := func(_ uint, br *blockResult) {
|
processBlock := func(_ uint, br *blockResult) {
|
||||||
rowsCountTotal.Add(uint32(len(br.timestamps)))
|
rowsCountTotal.Add(uint32(br.rowsLen))
|
||||||
}
|
}
|
||||||
s.search(workersCount, so, nil, processBlock)
|
s.search(workersCount, so, nil, processBlock)
|
||||||
|
|
||||||
|
@ -893,7 +893,7 @@ func TestStorageSearch(t *testing.T) {
|
||||||
so := newTestGenericSearchOptions(allTenantIDs, f, []string{"_msg"})
|
so := newTestGenericSearchOptions(allTenantIDs, f, []string{"_msg"})
|
||||||
var rowsCountTotal atomic.Uint32
|
var rowsCountTotal atomic.Uint32
|
||||||
processBlock := func(_ uint, br *blockResult) {
|
processBlock := func(_ uint, br *blockResult) {
|
||||||
rowsCountTotal.Add(uint32(len(br.timestamps)))
|
rowsCountTotal.Add(uint32(br.rowsLen))
|
||||||
}
|
}
|
||||||
s.search(workersCount, so, nil, processBlock)
|
s.search(workersCount, so, nil, processBlock)
|
||||||
|
|
||||||
|
@ -926,7 +926,7 @@ func TestStorageSearch(t *testing.T) {
|
||||||
so := newTestGenericSearchOptions([]TenantID{tenantID}, f, []string{"_msg"})
|
so := newTestGenericSearchOptions([]TenantID{tenantID}, f, []string{"_msg"})
|
||||||
var rowsCountTotal atomic.Uint32
|
var rowsCountTotal atomic.Uint32
|
||||||
processBlock := func(_ uint, br *blockResult) {
|
processBlock := func(_ uint, br *blockResult) {
|
||||||
rowsCountTotal.Add(uint32(len(br.timestamps)))
|
rowsCountTotal.Add(uint32(br.rowsLen))
|
||||||
}
|
}
|
||||||
s.search(workersCount, so, nil, processBlock)
|
s.search(workersCount, so, nil, processBlock)
|
||||||
|
|
||||||
|
@ -948,7 +948,7 @@ func TestStorageSearch(t *testing.T) {
|
||||||
so := newTestGenericSearchOptions([]TenantID{tenantID}, f, []string{"_msg"})
|
so := newTestGenericSearchOptions([]TenantID{tenantID}, f, []string{"_msg"})
|
||||||
var rowsCountTotal atomic.Uint32
|
var rowsCountTotal atomic.Uint32
|
||||||
processBlock := func(_ uint, br *blockResult) {
|
processBlock := func(_ uint, br *blockResult) {
|
||||||
rowsCountTotal.Add(uint32(len(br.timestamps)))
|
rowsCountTotal.Add(uint32(br.rowsLen))
|
||||||
}
|
}
|
||||||
s.search(workersCount, so, nil, processBlock)
|
s.search(workersCount, so, nil, processBlock)
|
||||||
|
|
||||||
|
@ -978,7 +978,7 @@ func TestStorageSearch(t *testing.T) {
|
||||||
so := newTestGenericSearchOptions([]TenantID{tenantID}, f, []string{"_msg"})
|
so := newTestGenericSearchOptions([]TenantID{tenantID}, f, []string{"_msg"})
|
||||||
var rowsCountTotal atomic.Uint32
|
var rowsCountTotal atomic.Uint32
|
||||||
processBlock := func(_ uint, br *blockResult) {
|
processBlock := func(_ uint, br *blockResult) {
|
||||||
rowsCountTotal.Add(uint32(len(br.timestamps)))
|
rowsCountTotal.Add(uint32(br.rowsLen))
|
||||||
}
|
}
|
||||||
s.search(workersCount, so, nil, processBlock)
|
s.search(workersCount, so, nil, processBlock)
|
||||||
|
|
||||||
|
@ -999,7 +999,7 @@ func TestStorageSearch(t *testing.T) {
|
||||||
so := newTestGenericSearchOptions([]TenantID{tenantID}, f, []string{"_msg"})
|
so := newTestGenericSearchOptions([]TenantID{tenantID}, f, []string{"_msg"})
|
||||||
var rowsCountTotal atomic.Uint32
|
var rowsCountTotal atomic.Uint32
|
||||||
processBlock := func(_ uint, br *blockResult) {
|
processBlock := func(_ uint, br *blockResult) {
|
||||||
rowsCountTotal.Add(uint32(len(br.timestamps)))
|
rowsCountTotal.Add(uint32(br.rowsLen))
|
||||||
}
|
}
|
||||||
s.search(workersCount, so, nil, processBlock)
|
s.search(workersCount, so, nil, processBlock)
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue