This commit is contained in:
Aliaksandr Valialkin 2024-05-11 03:13:34 +02:00
parent 8b4d4e00bc
commit 5556dda356
No known key found for this signature in database
GPG key ID: 52C003EE2BCDB9EB
20 changed files with 65 additions and 121 deletions

View file

@ -5,6 +5,7 @@ import (
"sync" "sync"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fastnum" "github.com/VictoriaMetrics/VictoriaMetrics/lib/fastnum"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
) )
// CalibrateScale calibrates a and b with the corresponding exponents ae, be // CalibrateScale calibrates a and b with the corresponding exponents ae, be
@ -81,29 +82,17 @@ var decimalMultipliers = []int64{1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e
// ExtendFloat64sCapacity extends dst capacity to hold additionalItems // ExtendFloat64sCapacity extends dst capacity to hold additionalItems
// and returns the extended dst. // and returns the extended dst.
func ExtendFloat64sCapacity(dst []float64, additionalItems int) []float64 { func ExtendFloat64sCapacity(dst []float64, additionalItems int) []float64 {
dstLen := len(dst) return slicesutil.ExtendCapacity(dst, additionalItems)
if n := dstLen + additionalItems - cap(dst); n > 0 {
dst = append(dst[:cap(dst)], make([]float64, n)...)
}
return dst[:dstLen]
} }
// ExtendInt64sCapacity extends dst capacity to hold additionalItems // ExtendInt64sCapacity extends dst capacity to hold additionalItems
// and returns the extended dst. // and returns the extended dst.
func ExtendInt64sCapacity(dst []int64, additionalItems int) []int64 { func ExtendInt64sCapacity(dst []int64, additionalItems int) []int64 {
dstLen := len(dst) return slicesutil.ExtendCapacity(dst, additionalItems)
if n := dstLen + additionalItems - cap(dst); n > 0 {
dst = append(dst[:cap(dst)], make([]int64, n)...)
}
return dst[:dstLen]
} }
func extendInt16sCapacity(dst []int16, additionalItems int) []int16 { func extendInt16sCapacity(dst []int16, additionalItems int) []int16 {
dstLen := len(dst) return slicesutil.ExtendCapacity(dst, additionalItems)
if n := dstLen + additionalItems - cap(dst); n > 0 {
dst = append(dst[:cap(dst)], make([]int16, n)...)
}
return dst[:dstLen]
} }
// AppendDecimalToFloat converts each item in va to f=v*10^e, appends it // AppendDecimalToFloat converts each item in va to f=v*10^e, appends it

View file

@ -2,6 +2,8 @@ package encoding
import ( import (
"sync" "sync"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
) )
// GetFloat64s returns a slice of float64 values with the given size. // GetFloat64s returns a slice of float64 values with the given size.
@ -14,9 +16,7 @@ func GetFloat64s(size int) *Float64s {
v = &Float64s{} v = &Float64s{}
} }
a := v.(*Float64s) a := v.(*Float64s)
if n := len(a.A) + size - cap(a.A); n > 0 { a.A = slicesutil.ExtendCapacity(a.A, size)
a.A = append(a.A[:cap(a.A)], make([]float64, n)...)
}
a.A = a.A[:size] a.A = a.A[:size]
return a return a
} }

View file

@ -4,6 +4,8 @@ import (
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"sync" "sync"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
) )
// MarshalUint16 appends marshaled v to dst and returns the result. // MarshalUint16 appends marshaled v to dst and returns the result.
@ -517,9 +519,7 @@ func GetInt64s(size int) *Int64s {
} }
} }
is := v.(*Int64s) is := v.(*Int64s)
if n := len(is.A) + size - cap(is.A); n > 0 { is.A = slicesutil.ExtendCapacity(is.A, size)
is.A = append(is.A[:cap(is.A)], make([]int64, n)...)
}
is.A = is.A[:size] is.A = is.A[:size]
return is return is
} }
@ -546,9 +546,7 @@ func GetUint64s(size int) *Uint64s {
} }
} }
is := v.(*Uint64s) is := v.(*Uint64s)
if n := len(is.A) + size - cap(is.A); n > 0 { is.A = slicesutil.ExtendCapacity(is.A, size)
is.A = append(is.A[:cap(is.A)], make([]uint64, n)...)
}
is.A = is.A[:size] is.A = is.A[:size]
return is return is
} }
@ -575,9 +573,7 @@ func GetUint32s(size int) *Uint32s {
} }
} }
is := v.(*Uint32s) is := v.(*Uint32s)
if n := len(is.A) + size - cap(is.A); n > 0 { is.A = slicesutil.ExtendCapacity(is.A, size)
is.A = append(is.A[:cap(is.A)], make([]uint32, n)...)
}
is.A = is.A[:size] is.A = is.A[:size]
return is return is
} }

View file

@ -4,6 +4,7 @@ import (
"fmt" "fmt"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
) )
// marshalInt64NearestDelta2 encodes src using `nearest delta2` encoding // marshalInt64NearestDelta2 encodes src using `nearest delta2` encoding
@ -70,9 +71,7 @@ func unmarshalInt64NearestDelta2(dst []int64, src []byte, firstValue int64, item
} }
dstLen := len(dst) dstLen := len(dst)
if n := dstLen + itemsCount - cap(dst); n > 0 { dst = slicesutil.ExtendCapacity(dst, itemsCount)
dst = append(dst[:cap(dst)], make([]int64, n)...)
}
dst = dst[:dstLen+itemsCount] dst = dst[:dstLen+itemsCount]
as := dst[dstLen:] as := dst[dstLen:]

View file

@ -5,6 +5,7 @@ import (
"sync" "sync"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
) )
func getBitmap(bitsLen int) *bitmap { func getBitmap(bitsLen int) *bitmap {
@ -46,9 +47,7 @@ func (bm *bitmap) copyFrom(src *bitmap) {
func (bm *bitmap) init(bitsLen int) { func (bm *bitmap) init(bitsLen int) {
a := bm.a a := bm.a
wordsLen := (bitsLen + 63) / 64 wordsLen := (bitsLen + 63) / 64
if n := len(a) + wordsLen - cap(a); n > 0 { a = slicesutil.ExtendCapacity(a, wordsLen)
a = append(a[:cap(a)], make([]uint64, n)...)
}
a = a[:wordsLen] a = a[:wordsLen]
bm.a = a bm.a = a
bm.bitsLen = bitsLen bm.bitsLen = bitsLen

View file

@ -8,6 +8,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding" "github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
) )
// block represents a block of log entries. // block represents a block of log entries.
@ -142,13 +143,9 @@ func (c *column) canStoreInConstColumn() bool {
} }
func (c *column) resizeValues(valuesLen int) []string { func (c *column) resizeValues(valuesLen int) []string {
values := c.values values := slicesutil.ExtendCapacity(c.values, valuesLen)
if n := len(values) + valuesLen - cap(values); n > 0 { c.values = values[:valuesLen]
values = append(values[:cap(values)], make([]string, n)...) return c.values
}
values = values[:valuesLen]
c.values = values
return values
} }
// mustWriteTo writes c to sw and updates ch accordingly. // mustWriteTo writes c to sw and updates ch accordingly.
@ -370,13 +367,9 @@ func (b *block) extendColumns() *column {
} }
func (b *block) resizeColumns(columnsLen int) []column { func (b *block) resizeColumns(columnsLen int) []column {
cs := b.columns cs := slicesutil.ExtendCapacity(b.columns, columnsLen)
if n := len(cs) + columnsLen - cap(cs); n > 0 { b.columns = cs[:columnsLen]
cs = append(cs[:cap(cs)], make([]column, n)...) return b.columns
}
cs = cs[:columnsLen]
b.columns = cs
return cs
} }
func (b *block) sortColumnsByName() { func (b *block) sortColumnsByName() {

View file

@ -4,6 +4,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding" "github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
) )
// blockData contains packed data for a single block. // blockData contains packed data for a single block.
@ -52,13 +53,9 @@ func (bd *blockData) reset() {
} }
func (bd *blockData) resizeColumnsData(columnsDataLen int) []columnData { func (bd *blockData) resizeColumnsData(columnsDataLen int) []columnData {
cds := bd.columnsData cds := slicesutil.ExtendCapacity(bd.columnsData, columnsDataLen)
if n := len(cds) + columnsDataLen - cap(cds); n > 0 { bd.columnsData = cds[:columnsDataLen]
cds = append(cds[:cap(cds)], make([]columnData, n)...) return bd.columnsData
}
cds = cds[:columnsDataLen]
bd.columnsData = cds
return cds
} }
// copyFrom copies src to bd. // copyFrom copies src to bd.

View file

@ -8,6 +8,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding" "github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
) )
// blockHeader contains information about a single block. // blockHeader contains information about a single block.
@ -263,23 +264,15 @@ func (csh *columnsHeader) getColumnHeader(name string) *columnHeader {
} }
func (csh *columnsHeader) resizeConstColumns(columnsLen int) []Field { func (csh *columnsHeader) resizeConstColumns(columnsLen int) []Field {
ccs := csh.constColumns ccs := slicesutil.ExtendCapacity(csh.constColumns, columnsLen)
if n := len(ccs) + columnsLen - cap(ccs); n > 0 { csh.constColumns = ccs[:columnsLen]
ccs = append(ccs[:cap(ccs)], make([]Field, n)...) return csh.constColumns
}
ccs = ccs[:columnsLen]
csh.constColumns = ccs
return ccs
} }
func (csh *columnsHeader) resizeColumnHeaders(columnHeadersLen int) []columnHeader { func (csh *columnsHeader) resizeColumnHeaders(columnHeadersLen int) []columnHeader {
chs := csh.columnHeaders chs := slicesutil.ExtendCapacity(csh.columnHeaders, columnHeadersLen)
if n := len(chs) + columnHeadersLen - cap(chs); n > 0 { csh.columnHeaders = chs[:columnHeadersLen]
chs = append(chs[:cap(chs)], make([]columnHeader, n)...) return csh.columnHeaders
}
chs = chs[:columnHeadersLen]
csh.columnHeaders = chs
return chs
} }
func (csh *columnsHeader) marshal(dst []byte) []byte { func (csh *columnsHeader) marshal(dst []byte) []byte {

View file

@ -5,9 +5,11 @@ import (
"sync" "sync"
"unsafe" "unsafe"
"github.com/cespare/xxhash/v2"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding" "github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
"github.com/cespare/xxhash/v2" "github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
) )
// bloomFilterHashesCount is the number of different hashes to use for bloom filter. // bloomFilterHashesCount is the number of different hashes to use for bloom filter.
@ -53,10 +55,7 @@ func (bf *bloomFilter) unmarshal(src []byte) error {
} }
bf.reset() bf.reset()
wordsCount := len(src) / 8 wordsCount := len(src) / 8
bits := bf.bits bits := slicesutil.ExtendCapacity(bf.bits, wordsCount)
if n := len(bits) + wordsCount - cap(bits); n > 0 {
bits = append(bits[:cap(bits)], make([]uint64, n)...)
}
bits = bits[:wordsCount] bits = bits[:wordsCount]
for i := range bits { for i := range bits {
bits[i] = encoding.UnmarshalUint64(src) bits[i] = encoding.UnmarshalUint64(src)
@ -70,10 +69,7 @@ func (bf *bloomFilter) unmarshal(src []byte) error {
func (bf *bloomFilter) mustInit(tokens []string) { func (bf *bloomFilter) mustInit(tokens []string) {
bitsCount := len(tokens) * bloomFilterBitsPerItem bitsCount := len(tokens) * bloomFilterBitsPerItem
wordsCount := (bitsCount + 63) / 64 wordsCount := (bitsCount + 63) / 64
bits := bf.bits bits := slicesutil.ExtendCapacity(bf.bits, wordsCount)
if n := len(bits) + wordsCount - cap(bits); n > 0 {
bits = append(bits[:cap(bits)], make([]uint64, n)...)
}
bits = bits[:wordsCount] bits = bits[:wordsCount]
bloomFilterAdd(bits, tokens) bloomFilterAdd(bits, tokens)
bf.bits = bits bf.bits = bits

View file

@ -9,6 +9,7 @@ import (
"sync/atomic" "sync/atomic"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup" "github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
) )
// genericSearchOptions contain options used for search. // genericSearchOptions contain options used for search.
@ -197,11 +198,7 @@ func getEmptyStrings(rowsCount int) []string {
return values return values
} }
values := *p values := *p
if n := len(values) + rowsCount - cap(values); n > 0 { values = slicesutil.ExtendCapacity(values, rowsCount)
valuesNew := append(values[:cap(values)], make([]string, n)...)
emptyStrings.Store(&valuesNew)
values = valuesNew
}
return values[:rowsCount] return values[:rowsCount]
} }

View file

@ -7,6 +7,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding" "github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
) )
type blockHeader struct { type blockHeader struct {
@ -160,9 +161,7 @@ func unmarshalBlockHeadersNoCopy(dst []blockHeader, src []byte, blockHeadersCoun
logger.Panicf("BUG: blockHeadersCount must be greater than 0; got %d", blockHeadersCount) logger.Panicf("BUG: blockHeadersCount must be greater than 0; got %d", blockHeadersCount)
} }
dstLen := len(dst) dstLen := len(dst)
if n := dstLen + blockHeadersCount - cap(dst); n > 0 { dst = slicesutil.ExtendCapacity(dst, blockHeadersCount)
dst = append(dst[:cap(dst)], make([]blockHeader, n)...)
}
dst = dst[:dstLen+blockHeadersCount] dst = dst[:dstLen+blockHeadersCount]
for i := 0; i < blockHeadersCount; i++ { for i := 0; i < blockHeadersCount; i++ {
tail, err := dst[dstLen+i].UnmarshalNoCopy(src) tail, err := dst[dstLen+i].UnmarshalNoCopy(src)

View file

@ -12,6 +12,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding" "github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
) )
// Item represents a single item for storing in a mergeset. // Item represents a single item for storing in a mergeset.
@ -412,9 +413,7 @@ func (ib *inmemoryBlock) UnmarshalData(sb *storageBlock, firstItem, commonPrefix
// since the data isn't going to be resized after unmarshaling. // since the data isn't going to be resized after unmarshaling.
// This may save memory for caching the unmarshaled block. // This may save memory for caching the unmarshaled block.
data := bytesutil.ResizeNoCopyNoOverallocate(ib.data, dataLen) data := bytesutil.ResizeNoCopyNoOverallocate(ib.data, dataLen)
if n := len(ib.items) + int(itemsCount) - cap(ib.items); n > 0 { ib.items = slicesutil.ExtendCapacity(ib.items, int(itemsCount))
ib.items = append(ib.items[:cap(ib.items)], make([]Item, n)...)
}
ib.items = ib.items[:itemsCount] ib.items = ib.items[:itemsCount]
data = append(data[:0], firstItem...) data = append(data[:0], firstItem...)
items := ib.items items := ib.items

View file

@ -7,6 +7,7 @@ import (
"io" "io"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
) )
// TableSearch is a reusable cursor used for searching in the Table. // TableSearch is a reusable cursor used for searching in the Table.
@ -71,9 +72,7 @@ func (ts *TableSearch) Init(tb *Table) {
ts.pws = ts.tb.getParts(ts.pws[:0]) ts.pws = ts.tb.getParts(ts.pws[:0])
// Initialize the psPool. // Initialize the psPool.
if n := len(ts.psPool) + len(ts.pws) - cap(ts.psPool); n > 0 { ts.psPool = slicesutil.ExtendCapacity(ts.psPool, len(ts.pws))
ts.psPool = append(ts.psPool[:cap(ts.psPool)], make([]partSearch, n)...)
}
ts.psPool = ts.psPool[:len(ts.pws)] ts.psPool = ts.psPool[:len(ts.pws)]
for i, pw := range ts.pws { for i, pw := range ts.pws {
ts.psPool[i].Init(pw.p) ts.psPool[i].Init(pw.p)

View file

@ -2,15 +2,15 @@ package prompbmarshal
import ( import (
"fmt" "fmt"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
) )
// MarshalProtobuf marshals wr to dst and returns the result. // MarshalProtobuf marshals wr to dst and returns the result.
func (wr *WriteRequest) MarshalProtobuf(dst []byte) []byte { func (wr *WriteRequest) MarshalProtobuf(dst []byte) []byte {
size := wr.Size() size := wr.Size()
dstLen := len(dst) dstLen := len(dst)
if n := dstLen + size - cap(dst); n > 0 { dst = slicesutil.ExtendCapacity(dst, size)
dst = append(dst[:cap(dst)], make([]byte, n)...)
}
dst = dst[:dstLen+size] dst = dst[:dstLen+size]
n, err := wr.MarshalToSizedBuffer(dst[dstLen:]) n, err := wr.MarshalToSizedBuffer(dst[dstLen:])
if err != nil { if err != nil {

View file

@ -7,6 +7,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding" "github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
) )
// blockHeader is a header for a time series block. // blockHeader is a header for a time series block.
@ -252,10 +253,7 @@ func unmarshalBlockHeaders(dst []blockHeader, src []byte, blockHeadersCount int)
logger.Panicf("BUG: blockHeadersCount must be greater than zero; got %d", blockHeadersCount) logger.Panicf("BUG: blockHeadersCount must be greater than zero; got %d", blockHeadersCount)
} }
dstLen := len(dst) dstLen := len(dst)
if n := dstLen + blockHeadersCount - cap(dst); n > 0 { dst = slicesutil.ExtendCapacity(dst, blockHeadersCount)
dst = append(dst[:cap(dst)], make([]blockHeader, n)...)
dst = dst[:dstLen]
}
var bh blockHeader var bh blockHeader
for len(src) > 0 { for len(src) > 0 {
tmp, err := bh.Unmarshal(src) tmp, err := bh.Unmarshal(src)

View file

@ -15,6 +15,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding" "github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb" "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
) )
const ( const (
@ -702,9 +703,7 @@ func (mn *MetricName) sortTags() {
} }
cts := getCanonicalTags() cts := getCanonicalTags()
if n := len(cts.tags) + len(mn.Tags) - cap(cts.tags); n > 0 { cts.tags = slicesutil.ExtendCapacity(cts.tags, len(mn.Tags))
cts.tags = append(cts.tags[:cap(cts.tags)], make([]canonicalTag, n)...)
}
dst := cts.tags[:len(mn.Tags)] dst := cts.tags[:len(mn.Tags)]
for i := range mn.Tags { for i := range mn.Tags {
tag := &mn.Tags[i] tag := &mn.Tags[i]
@ -775,9 +774,7 @@ func (ts *canonicalTagsSort) Swap(i, j int) {
func copyTags(dst, src []Tag) []Tag { func copyTags(dst, src []Tag) []Tag {
dstLen := len(dst) dstLen := len(dst)
if n := dstLen + len(src) - cap(dst); n > 0 { dst = slicesutil.ExtendCapacity(dst, len(src))
dst = append(dst[:cap(dst)], make([]Tag, n)...)
}
dst = dst[:dstLen+len(src)] dst = dst[:dstLen+len(src)]
for i := range src { for i := range src {
dst[dstLen+i].copyFrom(&src[i]) dst[dstLen+i].copyFrom(&src[i])

View file

@ -6,6 +6,7 @@ import (
"io" "io"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
) )
// partitionSearch represents a search in the partition. // partitionSearch represents a search in the partition.
@ -83,9 +84,7 @@ func (pts *partitionSearch) Init(pt *partition, tsids []TSID, tr TimeRange) {
pts.pws = pt.GetParts(pts.pws[:0], true) pts.pws = pt.GetParts(pts.pws[:0], true)
// Initialize psPool. // Initialize psPool.
if n := len(pts.psPool) + len(pts.pws) - cap(pts.psPool); n > 0 { pts.psPool = slicesutil.ExtendCapacity(pts.psPool, len(pts.pws))
pts.psPool = append(pts.psPool[:cap(pts.psPool)], make([]partSearch, n)...)
}
pts.psPool = pts.psPool[:len(pts.pws)] pts.psPool = pts.psPool[:len(pts.pws)]
for i, pw := range pts.pws { for i, pw := range pts.pws {
pts.psPool[i].Init(pw.p, tsids, tr) pts.psPool[i].Init(pw.p, tsids, tr)

View file

@ -10,6 +10,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime" "github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer" "github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/stringsutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/stringsutil"
) )
@ -413,9 +414,7 @@ func (sq *SearchQuery) Unmarshal(src []byte) ([]byte, error) {
if err != nil { if err != nil {
return src, fmt.Errorf("cannot unmarshal the count of TagFilterss: %w", err) return src, fmt.Errorf("cannot unmarshal the count of TagFilterss: %w", err)
} }
if n := len(sq.TagFilterss) + int(tfssCount) - cap(sq.TagFilterss); n > 0 { sq.TagFilterss = slicesutil.ExtendCapacity(sq.TagFilterss, int(tfssCount))
sq.TagFilterss = append(sq.TagFilterss[:cap(sq.TagFilterss)], make([][]TagFilter, n)...)
}
sq.TagFilterss = sq.TagFilterss[:tfssCount] sq.TagFilterss = sq.TagFilterss[:tfssCount]
src = tail src = tail
@ -427,9 +426,7 @@ func (sq *SearchQuery) Unmarshal(src []byte) ([]byte, error) {
src = tail src = tail
tagFilters := sq.TagFilterss[i] tagFilters := sq.TagFilterss[i]
if n := len(tagFilters) + int(tfsCount) - cap(tagFilters); n > 0 { tagFilters = slicesutil.ExtendCapacity(tagFilters, int(tfsCount))
tagFilters = append(tagFilters[:cap(tagFilters)], make([]TagFilter, n)...)
}
tagFilters = tagFilters[:tfsCount] tagFilters = tagFilters[:tfsCount]
for j := 0; j < int(tfsCount); j++ { for j := 0; j < int(tfsCount); j++ {
tail, err := tagFilters[j].Unmarshal(src) tail, err := tagFilters[j].Unmarshal(src)

View file

@ -7,6 +7,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime" "github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
) )
// tableSearch performs searches in the table. // tableSearch performs searches in the table.
@ -84,9 +85,7 @@ func (ts *tableSearch) Init(tb *table, tsids []TSID, tr TimeRange) {
ts.ptws = tb.GetPartitions(ts.ptws[:0]) ts.ptws = tb.GetPartitions(ts.ptws[:0])
// Initialize the ptsPool. // Initialize the ptsPool.
if n := len(ts.ptsPool) + len(ts.ptws) - cap(ts.ptsPool); n > 0 { ts.ptsPool = slicesutil.ExtendCapacity(ts.ptsPool, len(ts.ptws))
ts.ptsPool = append(ts.ptsPool[:cap(ts.ptsPool)], make([]partitionSearch, n)...)
}
ts.ptsPool = ts.ptsPool[:len(ts.ptws)] ts.ptsPool = ts.ptsPool[:len(ts.ptws)]
for i, ptw := range ts.ptws { for i, ptw := range ts.ptws {
ts.ptsPool[i].Init(ptw.pt, tsids, tr) ts.ptsPool[i].Init(ptw.pt, tsids, tr)

View file

@ -6,6 +6,8 @@ import (
"sync" "sync"
"sync/atomic" "sync/atomic"
"unsafe" "unsafe"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
) )
// Set is a fast set for uint64. // Set is a fast set for uint64.
@ -226,12 +228,8 @@ func (s *Set) AppendTo(dst []uint64) []uint64 {
} }
// pre-allocate memory for dst // pre-allocate memory for dst
dstLen := len(dst)
sLen := s.Len() sLen := s.Len()
if n := dstLen + sLen - cap(dst); n > 0 { dst = slicesutil.ExtendCapacity(dst, sLen)
dst = append(dst[:cap(dst)], make([]uint64, n)...)
dst = dst[:dstLen]
}
s.sort() s.sort()
for i := range s.buckets { for i := range s.buckets {
dst = s.buckets[i].appendTo(dst) dst = s.buckets[i].appendTo(dst)