diff --git a/app/vminsert/common/insert_ctx.go b/app/vminsert/common/insert_ctx.go index 02be393ea..e5ce6d3ea 100644 --- a/app/vminsert/common/insert_ctx.go +++ b/app/vminsert/common/insert_ctx.go @@ -9,6 +9,7 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver" "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/storage" ) @@ -37,12 +38,9 @@ func (ctx *InsertCtx) Reset(rowsLen int) { for i := range mrs { cleanMetricRow(&mrs[i]) } + mrs = slicesutil.SetLength(mrs, rowsLen) ctx.mrs = mrs[:0] - if n := rowsLen - cap(ctx.mrs); n > 0 { - ctx.mrs = append(ctx.mrs[:cap(ctx.mrs)], make([]storage.MetricRow, n)...) - } - ctx.mrs = ctx.mrs[:0] ctx.metricNamesBuf = ctx.metricNamesBuf[:0] ctx.relabelCtx.Reset() ctx.streamAggrCtx.Reset() diff --git a/app/vmselect/promql/rollup_result_cache.go b/app/vmselect/promql/rollup_result_cache.go index 7687d13e1..aee2c2fa8 100644 --- a/app/vmselect/promql/rollup_result_cache.go +++ b/app/vmselect/promql/rollup_result_cache.go @@ -16,6 +16,7 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/memory" "github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/storage" "github.com/VictoriaMetrics/VictoriaMetrics/lib/stringsutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/workingsetcache" @@ -736,10 +737,7 @@ func (mi *rollupResultCacheMetainfo) Unmarshal(src []byte) error { } entriesLen := int(encoding.UnmarshalUint32(src)) src = src[4:] - if n := entriesLen - cap(mi.entries); n > 0 { - mi.entries = append(mi.entries[:cap(mi.entries)], make([]rollupResultCacheMetainfoEntry, n)...) - } - mi.entries = mi.entries[:entriesLen] + mi.entries = slicesutil.SetLength(mi.entries, entriesLen) for i := 0; i < entriesLen; i++ { tail, err := mi.entries[i].Unmarshal(src) if err != nil { diff --git a/app/vmselect/promql/timeseries.go b/app/vmselect/promql/timeseries.go index 5cb8cbdd4..ff58cbcbe 100644 --- a/app/vmselect/promql/timeseries.go +++ b/app/vmselect/promql/timeseries.go @@ -10,6 +10,7 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/storage" ) @@ -246,10 +247,7 @@ func unmarshalMetricNameFast(mn *storage.MetricName, src []byte) ([]byte, error) } tagsLen := encoding.UnmarshalUint16(src) src = src[2:] - if n := int(tagsLen) - cap(mn.Tags); n > 0 { - mn.Tags = append(mn.Tags[:cap(mn.Tags)], make([]storage.Tag, n)...) - } - mn.Tags = mn.Tags[:tagsLen] + mn.Tags = slicesutil.SetLength(mn.Tags, int(tagsLen)) for i := range mn.Tags { tail, key, err := unmarshalBytesFast(src) if err != nil { diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 0ec5a1665..aee3a8d20 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -44,6 +44,7 @@ See also [LTS releases](https://docs.victoriametrics.com/lts-releases/). * BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix bug that prevents the first query trace from expanding on click event. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6186). The issue was introduced in [v1.100.0](https://docs.victoriametrics.com/changelog/#v11000) release. * BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent/): prevent potential panic during [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html) if more than one `--remoteWrite.streamAggr.dedupInterval` is configured. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6205). * BUGFIX: [stream aggregation](https://docs.victoriametrics.com/stream-aggregation/): set correct suffix `_prometheus` for aggregation outputs [increase_prometheus](https://docs.victoriametrics.com/stream-aggregation/#increase_prometheus) and [total_prometheus](https://docs.victoriametrics.com/stream-aggregation/#total_prometheus). Before, outputs `total` and `total_prometheus` or `increase` and `increase_prometheus` had the same suffix. +* BUGFIX: properly estimate the needed memory for query execution if it has the format [`aggr_func`](https://docs.victoriametrics.com/metricsql/#aggregate-functions)([`rollup_func[d]`](https://docs.victoriametrics.com/metricsql/#rollup-functions) (for example, `sum(rate(request_duration_seconds_bucket[5m]))`). This should allow performing aggregations over bigger number of time series when VictoriaMetrics runs in environments with small amounts of available memory. The issue has been introduced in [this commit](https://github.com/VictoriaMetrics/VictoriaMetrics/commit/5138eaeea0791caa34bcfab410e0ca9cd253cd8f) in [v1.83.0](https://docs.victoriametrics.com/changelog_2022/#v1830). ## [v1.101.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.101.0) diff --git a/lib/bytesutil/bytebuffer.go b/lib/bytesutil/bytebuffer.go index 013ab67bc..2060cae89 100644 --- a/lib/bytesutil/bytebuffer.go +++ b/lib/bytesutil/bytebuffer.go @@ -8,6 +8,7 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/filestream" "github.com/VictoriaMetrics/VictoriaMetrics/lib/fs" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil" ) var ( @@ -64,8 +65,7 @@ func (bb *ByteBuffer) ReadFrom(r io.Reader) (int64, error) { offset := bLen for { if free := len(b) - offset; free < offset { - n := len(b) - b = append(b, make([]byte, n)...) + b = slicesutil.SetLength(b, 2*len(b)) } n, err := r.Read(b[offset:]) offset += n diff --git a/lib/encoding/float.go b/lib/encoding/float.go index 17fc9fa32..b9394c620 100644 --- a/lib/encoding/float.go +++ b/lib/encoding/float.go @@ -16,8 +16,7 @@ func GetFloat64s(size int) *Float64s { v = &Float64s{} } a := v.(*Float64s) - a.A = slicesutil.ExtendCapacity(a.A, size) - a.A = a.A[:size] + a.A = slicesutil.SetLength(a.A, size) return a } diff --git a/lib/encoding/int.go b/lib/encoding/int.go index 0b8d11a69..aef274a8e 100644 --- a/lib/encoding/int.go +++ b/lib/encoding/int.go @@ -519,8 +519,7 @@ func GetInt64s(size int) *Int64s { } } is := v.(*Int64s) - is.A = slicesutil.ExtendCapacity(is.A, size) - is.A = is.A[:size] + is.A = slicesutil.SetLength(is.A, size) return is } @@ -546,8 +545,7 @@ func GetUint64s(size int) *Uint64s { } } is := v.(*Uint64s) - is.A = slicesutil.ExtendCapacity(is.A, size) - is.A = is.A[:size] + is.A = slicesutil.SetLength(is.A, size) return is } @@ -573,8 +571,7 @@ func GetUint32s(size int) *Uint32s { } } is := v.(*Uint32s) - is.A = slicesutil.ExtendCapacity(is.A, size) - is.A = is.A[:size] + is.A = slicesutil.SetLength(is.A, size) return is } diff --git a/lib/leveledbytebufferpool/pool_test.go b/lib/leveledbytebufferpool/pool_test.go index 851513c68..d1aa84ec1 100644 --- a/lib/leveledbytebufferpool/pool_test.go +++ b/lib/leveledbytebufferpool/pool_test.go @@ -4,6 +4,8 @@ import ( "fmt" "testing" "time" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil" ) func TestGetPutConcurrent(t *testing.T) { @@ -19,7 +21,7 @@ func TestGetPutConcurrent(t *testing.T) { if capacity < 0 { capacity = 0 } - bb.B = append(bb.B, make([]byte, capacity)...) + bb.B = slicesutil.SetLength(bb.B, len(bb.B)+capacity) Put(bb) } doneCh <- struct{}{} diff --git a/lib/logstorage/bitmap.go b/lib/logstorage/bitmap.go index ea962fb2f..df72f7069 100644 --- a/lib/logstorage/bitmap.go +++ b/lib/logstorage/bitmap.go @@ -47,8 +47,7 @@ func (bm *bitmap) copyFrom(src *bitmap) { func (bm *bitmap) init(bitsLen int) { a := bm.a wordsLen := (bitsLen + 63) / 64 - a = slicesutil.ExtendCapacity(a, wordsLen) - a = a[:wordsLen] + a = slicesutil.SetLength(a, wordsLen) bm.a = a bm.bitsLen = bitsLen } diff --git a/lib/logstorage/block.go b/lib/logstorage/block.go index 77d3d7595..3b6cd073f 100644 --- a/lib/logstorage/block.go +++ b/lib/logstorage/block.go @@ -143,8 +143,7 @@ func (c *column) canStoreInConstColumn() bool { } func (c *column) resizeValues(valuesLen int) []string { - values := slicesutil.ExtendCapacity(c.values, valuesLen) - c.values = values[:valuesLen] + c.values = slicesutil.SetLength(c.values, valuesLen) return c.values } @@ -367,8 +366,7 @@ func (b *block) extendColumns() *column { } func (b *block) resizeColumns(columnsLen int) []column { - cs := slicesutil.ExtendCapacity(b.columns, columnsLen) - b.columns = cs[:columnsLen] + b.columns = slicesutil.SetLength(b.columns, columnsLen) return b.columns } diff --git a/lib/logstorage/block_data.go b/lib/logstorage/block_data.go index feff014e8..308e4d109 100644 --- a/lib/logstorage/block_data.go +++ b/lib/logstorage/block_data.go @@ -53,8 +53,7 @@ func (bd *blockData) reset() { } func (bd *blockData) resizeColumnsData(columnsDataLen int) []columnData { - cds := slicesutil.ExtendCapacity(bd.columnsData, columnsDataLen) - bd.columnsData = cds[:columnsDataLen] + bd.columnsData = slicesutil.SetLength(bd.columnsData, columnsDataLen) return bd.columnsData } diff --git a/lib/logstorage/block_header.go b/lib/logstorage/block_header.go index 921ecf705..edd28fe9f 100644 --- a/lib/logstorage/block_header.go +++ b/lib/logstorage/block_header.go @@ -264,14 +264,12 @@ func (csh *columnsHeader) getColumnHeader(name string) *columnHeader { } func (csh *columnsHeader) resizeConstColumns(columnsLen int) []Field { - ccs := slicesutil.ExtendCapacity(csh.constColumns, columnsLen) - csh.constColumns = ccs[:columnsLen] + csh.constColumns = slicesutil.SetLength(csh.constColumns, columnsLen) return csh.constColumns } func (csh *columnsHeader) resizeColumnHeaders(columnHeadersLen int) []columnHeader { - chs := slicesutil.ExtendCapacity(csh.columnHeaders, columnHeadersLen) - csh.columnHeaders = chs[:columnHeadersLen] + csh.columnHeaders = slicesutil.SetLength(csh.columnHeaders, columnHeadersLen) return csh.columnHeaders } diff --git a/lib/logstorage/bloomfilter.go b/lib/logstorage/bloomfilter.go index c260b24b6..9c0a5dd6a 100644 --- a/lib/logstorage/bloomfilter.go +++ b/lib/logstorage/bloomfilter.go @@ -55,8 +55,7 @@ func (bf *bloomFilter) unmarshal(src []byte) error { } bf.reset() wordsCount := len(src) / 8 - bits := slicesutil.ExtendCapacity(bf.bits, wordsCount) - bits = bits[:wordsCount] + bits := slicesutil.SetLength(bf.bits, wordsCount) for i := range bits { bits[i] = encoding.UnmarshalUint64(src) src = src[8:] @@ -69,8 +68,7 @@ func (bf *bloomFilter) unmarshal(src []byte) error { func (bf *bloomFilter) mustInit(tokens []string) { bitsCount := len(tokens) * bloomFilterBitsPerItem wordsCount := (bitsCount + 63) / 64 - bits := slicesutil.ExtendCapacity(bf.bits, wordsCount) - bits = bits[:wordsCount] + bits := slicesutil.SetLength(bf.bits, wordsCount) bloomFilterAdd(bits, tokens) bf.bits = bits } diff --git a/lib/logstorage/indexdb.go b/lib/logstorage/indexdb.go index f7c4c70b9..e98afb244 100644 --- a/lib/logstorage/indexdb.go +++ b/lib/logstorage/indexdb.go @@ -14,6 +14,7 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/mergeset" "github.com/VictoriaMetrics/VictoriaMetrics/lib/regexutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil" ) const ( @@ -853,13 +854,9 @@ func (sp *tagToStreamIDsRowParser) ParseStreamIDs() { } tail := sp.tail n := len(tail) / 16 - streamIDs := sp.StreamIDs[:0] - if n <= cap(streamIDs) { - streamIDs = streamIDs[:n] - } else { - streamIDs = append(streamIDs[:cap(streamIDs)], make([]u128, n-cap(streamIDs))...) - } - sp.StreamIDs = streamIDs + sp.StreamIDs = slicesutil.SetLength(sp.StreamIDs, n) + streamIDs := sp.StreamIDs + _ = streamIDs[n-1] for i := 0; i < n; i++ { var err error tail, err = streamIDs[i].unmarshal(tail) diff --git a/lib/logstorage/storage_search.go b/lib/logstorage/storage_search.go index b76138762..68f27f648 100644 --- a/lib/logstorage/storage_search.go +++ b/lib/logstorage/storage_search.go @@ -198,8 +198,7 @@ func getEmptyStrings(rowsCount int) []string { return values } values := *p - values = slicesutil.ExtendCapacity(values, rowsCount) - return values[:rowsCount] + return slicesutil.SetLength(values, rowsCount) } var emptyStrings atomic.Pointer[[]string] diff --git a/lib/mergeset/block_header.go b/lib/mergeset/block_header.go index 795492357..817fee7e6 100644 --- a/lib/mergeset/block_header.go +++ b/lib/mergeset/block_header.go @@ -161,8 +161,7 @@ func unmarshalBlockHeadersNoCopy(dst []blockHeader, src []byte, blockHeadersCoun logger.Panicf("BUG: blockHeadersCount must be greater than 0; got %d", blockHeadersCount) } dstLen := len(dst) - dst = slicesutil.ExtendCapacity(dst, blockHeadersCount) - dst = dst[:dstLen+blockHeadersCount] + dst = slicesutil.SetLength(dst, dstLen+blockHeadersCount) for i := 0; i < blockHeadersCount; i++ { tail, err := dst[dstLen+i].UnmarshalNoCopy(src) if err != nil { diff --git a/lib/mergeset/encoding.go b/lib/mergeset/encoding.go index da3f2a2f6..7a29a0ca8 100644 --- a/lib/mergeset/encoding.go +++ b/lib/mergeset/encoding.go @@ -413,8 +413,7 @@ func (ib *inmemoryBlock) UnmarshalData(sb *storageBlock, firstItem, commonPrefix // since the data isn't going to be resized after unmarshaling. // This may save memory for caching the unmarshaled block. data := bytesutil.ResizeNoCopyNoOverallocate(ib.data, dataLen) - ib.items = slicesutil.ExtendCapacity(ib.items, int(itemsCount)) - ib.items = ib.items[:itemsCount] + ib.items = slicesutil.SetLength(ib.items, int(itemsCount)) data = append(data[:0], firstItem...) items := ib.items items[0] = Item{ @@ -553,10 +552,7 @@ func getLensBuffer(n int) *lensBuffer { v = &lensBuffer{} } lb := v.(*lensBuffer) - if nn := n - cap(lb.lens); nn > 0 { - lb.lens = append(lb.lens[:cap(lb.lens)], make([]uint64, nn)...) - } - lb.lens = lb.lens[:n] + lb.lens = slicesutil.SetLength(lb.lens, n) return lb } diff --git a/lib/mergeset/table_search.go b/lib/mergeset/table_search.go index 412fbd838..70f741561 100644 --- a/lib/mergeset/table_search.go +++ b/lib/mergeset/table_search.go @@ -72,8 +72,7 @@ func (ts *TableSearch) Init(tb *Table) { ts.pws = ts.tb.getParts(ts.pws[:0]) // Initialize the psPool. - ts.psPool = slicesutil.ExtendCapacity(ts.psPool, len(ts.pws)) - ts.psPool = ts.psPool[:len(ts.pws)] + ts.psPool = slicesutil.SetLength(ts.psPool, len(ts.pws)) for i, pw := range ts.pws { ts.psPool[i].Init(pw.p) } diff --git a/lib/prompbmarshal/util.go b/lib/prompbmarshal/util.go index cdfcc5862..688c450a5 100644 --- a/lib/prompbmarshal/util.go +++ b/lib/prompbmarshal/util.go @@ -10,8 +10,7 @@ import ( func (wr *WriteRequest) MarshalProtobuf(dst []byte) []byte { size := wr.Size() dstLen := len(dst) - dst = slicesutil.ExtendCapacity(dst, size) - dst = dst[:dstLen+size] + dst = slicesutil.SetLength(dst, dstLen+size) n, err := wr.MarshalToSizedBuffer(dst[dstLen:]) if err != nil { panic(fmt.Errorf("BUG: unexpected error when marshaling WriteRequest: %w", err)) diff --git a/lib/slicesutil/slicesutil.go b/lib/slicesutil/slicesutil.go index 4ce954224..14cdeb1ae 100644 --- a/lib/slicesutil/slicesutil.go +++ b/lib/slicesutil/slicesutil.go @@ -1,10 +1,23 @@ package slicesutil -// ExtendCapacity returns a with the capacity extended to len(a)+n if needed. -func ExtendCapacity[T any](a []T, n int) []T { +// ExtendCapacity returns a with the capacity extended to len(a)+itemsToAdd. +// +// It may allocate new slice if cap(a) is smaller than len(a)+itemsToAdd. +func ExtendCapacity[T any](a []T, itemsToAdd int) []T { aLen := len(a) - if n := aLen + n - cap(a); n > 0 { + if n := aLen + itemsToAdd - cap(a); n > 0 { + a = append(a[:cap(a)], make([]T, n)...) + return a[:aLen] + } + return a +} + +// SetLength sets len(a) to newLen and returns the result. +// +// It may allocate new slice if cap(a) is smaller than newLen. +func SetLength[T any](a []T, newLen int) []T { + if n := newLen - cap(a); n > 0 { a = append(a[:cap(a)], make([]T, n)...) } - return a[:aLen] + return a[:newLen] } diff --git a/lib/storage/index_db.go b/lib/storage/index_db.go index 01df33fd0..af698e5bd 100644 --- a/lib/storage/index_db.go +++ b/lib/storage/index_db.go @@ -22,6 +22,7 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/memory" "github.com/VictoriaMetrics/VictoriaMetrics/lib/mergeset" "github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/uint64set" "github.com/VictoriaMetrics/VictoriaMetrics/lib/workingsetcache" "github.com/VictoriaMetrics/fastcache" @@ -3153,13 +3154,8 @@ func (mp *tagToMetricIDsRowParser) ParseMetricIDs() { return } tail := mp.tail - mp.MetricIDs = mp.MetricIDs[:0] n := len(tail) / 8 - if n <= cap(mp.MetricIDs) { - mp.MetricIDs = mp.MetricIDs[:n] - } else { - mp.MetricIDs = append(mp.MetricIDs[:cap(mp.MetricIDs)], make([]uint64, n-cap(mp.MetricIDs))...) - } + mp.MetricIDs = slicesutil.SetLength(mp.MetricIDs, n) metricIDs := mp.MetricIDs _ = metricIDs[n-1] for i := 0; i < n; i++ { diff --git a/lib/storage/metric_name.go b/lib/storage/metric_name.go index 85bff0e1d..4f102bd79 100644 --- a/lib/storage/metric_name.go +++ b/lib/storage/metric_name.go @@ -703,8 +703,8 @@ func (mn *MetricName) sortTags() { } cts := getCanonicalTags() - cts.tags = slicesutil.ExtendCapacity(cts.tags, len(mn.Tags)) - dst := cts.tags[:len(mn.Tags)] + cts.tags = slicesutil.SetLength(cts.tags, len(mn.Tags)) + dst := cts.tags for i := range mn.Tags { tag := &mn.Tags[i] ct := &dst[i] @@ -774,8 +774,7 @@ func (ts *canonicalTagsSort) Swap(i, j int) { func copyTags(dst, src []Tag) []Tag { dstLen := len(dst) - dst = slicesutil.ExtendCapacity(dst, len(src)) - dst = dst[:dstLen+len(src)] + dst = slicesutil.SetLength(dst, dstLen+len(src)) for i := range src { dst[dstLen+i].copyFrom(&src[i]) } diff --git a/lib/storage/partition_search.go b/lib/storage/partition_search.go index e86305fa0..cc1bacd82 100644 --- a/lib/storage/partition_search.go +++ b/lib/storage/partition_search.go @@ -84,8 +84,7 @@ func (pts *partitionSearch) Init(pt *partition, tsids []TSID, tr TimeRange) { pts.pws = pt.GetParts(pts.pws[:0], true) // Initialize psPool. - pts.psPool = slicesutil.ExtendCapacity(pts.psPool, len(pts.pws)) - pts.psPool = pts.psPool[:len(pts.pws)] + pts.psPool = slicesutil.SetLength(pts.psPool, len(pts.pws)) for i, pw := range pts.pws { pts.psPool[i].Init(pw.p, tsids, tr) } diff --git a/lib/storage/search.go b/lib/storage/search.go index 45d535e28..0e406b344 100644 --- a/lib/storage/search.go +++ b/lib/storage/search.go @@ -414,8 +414,7 @@ func (sq *SearchQuery) Unmarshal(src []byte) ([]byte, error) { if err != nil { return src, fmt.Errorf("cannot unmarshal the count of TagFilterss: %w", err) } - sq.TagFilterss = slicesutil.ExtendCapacity(sq.TagFilterss, int(tfssCount)) - sq.TagFilterss = sq.TagFilterss[:tfssCount] + sq.TagFilterss = slicesutil.SetLength(sq.TagFilterss, int(tfssCount)) src = tail for i := 0; i < int(tfssCount); i++ { @@ -426,8 +425,7 @@ func (sq *SearchQuery) Unmarshal(src []byte) ([]byte, error) { src = tail tagFilters := sq.TagFilterss[i] - tagFilters = slicesutil.ExtendCapacity(tagFilters, int(tfsCount)) - tagFilters = tagFilters[:tfsCount] + tagFilters = slicesutil.SetLength(tagFilters, int(tfsCount)) for j := 0; j < int(tfsCount); j++ { tail, err := tagFilters[j].Unmarshal(src) if err != nil { diff --git a/lib/storage/table_search.go b/lib/storage/table_search.go index 955ba9d83..1809a8ae1 100644 --- a/lib/storage/table_search.go +++ b/lib/storage/table_search.go @@ -85,8 +85,7 @@ func (ts *tableSearch) Init(tb *table, tsids []TSID, tr TimeRange) { ts.ptws = tb.GetPartitions(ts.ptws[:0]) // Initialize the ptsPool. - ts.ptsPool = slicesutil.ExtendCapacity(ts.ptsPool, len(ts.ptws)) - ts.ptsPool = ts.ptsPool[:len(ts.ptws)] + ts.ptsPool = slicesutil.SetLength(ts.ptsPool, len(ts.ptws)) for i, ptw := range ts.ptws { ts.ptsPool[i].Init(ptw.pt, tsids, tr) } diff --git a/lib/uint64set/uint64set.go b/lib/uint64set/uint64set.go index d524e8788..44204b509 100644 --- a/lib/uint64set/uint64set.go +++ b/lib/uint64set/uint64set.go @@ -230,6 +230,7 @@ func (s *Set) AppendTo(dst []uint64) []uint64 { // pre-allocate memory for dst sLen := s.Len() dst = slicesutil.ExtendCapacity(dst, sLen) + s.sort() for i := range s.buckets { dst = s.buckets[i].appendTo(dst)