lib/slicesutil: add helper functions for setting slice length and extending its capacity

The added helper functions - SetLength() and ExtendCapacity() - replace error-prone code with simple function calls.
This commit is contained in:
Aliaksandr Valialkin 2024-05-12 11:24:48 +02:00
parent 9607902289
commit 87338633b1
No known key found for this signature in database
GPG key ID: 52C003EE2BCDB9EB
19 changed files with 75 additions and 104 deletions

View file

@ -17,6 +17,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/stringsutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/workingsetcache"
@ -689,10 +690,7 @@ func (mi *rollupResultCacheMetainfo) Unmarshal(src []byte) error {
}
entriesLen := int(encoding.UnmarshalUint32(src))
src = src[4:]
if n := entriesLen - cap(mi.entries); n > 0 {
mi.entries = append(mi.entries[:cap(mi.entries)], make([]rollupResultCacheMetainfoEntry, n)...)
}
mi.entries = mi.entries[:entriesLen]
mi.entries = slicesutil.SetLength(mi.entries, entriesLen)
for i := 0; i < entriesLen; i++ {
tail, err := mi.entries[i].Unmarshal(src)
if err != nil {

View file

@ -10,6 +10,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
)
@ -255,10 +256,7 @@ func unmarshalMetricNameFast(mn *storage.MetricName, src []byte) ([]byte, error)
}
tagsLen := encoding.UnmarshalUint16(src)
src = src[2:]
if n := int(tagsLen) - cap(mn.Tags); n > 0 {
mn.Tags = append(mn.Tags[:cap(mn.Tags)], make([]storage.Tag, n)...)
}
mn.Tags = mn.Tags[:tagsLen]
mn.Tags = slicesutil.SetLength(mn.Tags, int(tagsLen))
for i := range mn.Tags {
tail, key, err := unmarshalBytesFast(src)
if err != nil {

View file

@ -8,6 +8,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/filestream"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
)
var (
@ -64,8 +65,7 @@ func (bb *ByteBuffer) ReadFrom(r io.Reader) (int64, error) {
offset := bLen
for {
if free := len(b) - offset; free < offset {
n := len(b)
b = append(b, make([]byte, n)...)
b = slicesutil.SetLength(b, 2*len(b))
}
n, err := r.Read(b[offset:])
offset += n

View file

@ -5,6 +5,7 @@ import (
"sync"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fastnum"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
)
// CalibrateScale calibrates a and b with the corresponding exponents ae, be
@ -81,29 +82,17 @@ var decimalMultipliers = []int64{1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e
// ExtendFloat64sCapacity extends dst capacity to hold additionalItems
// and returns the extended dst.
func ExtendFloat64sCapacity(dst []float64, additionalItems int) []float64 {
dstLen := len(dst)
if n := dstLen + additionalItems - cap(dst); n > 0 {
dst = append(dst[:cap(dst)], make([]float64, n)...)
}
return dst[:dstLen]
return slicesutil.ExtendCapacity(dst, additionalItems)
}
// ExtendInt64sCapacity extends dst capacity to hold additionalItems
// and returns the extended dst.
func ExtendInt64sCapacity(dst []int64, additionalItems int) []int64 {
dstLen := len(dst)
if n := dstLen + additionalItems - cap(dst); n > 0 {
dst = append(dst[:cap(dst)], make([]int64, n)...)
}
return dst[:dstLen]
return slicesutil.ExtendCapacity(dst, additionalItems)
}
func extendInt16sCapacity(dst []int16, additionalItems int) []int16 {
dstLen := len(dst)
if n := dstLen + additionalItems - cap(dst); n > 0 {
dst = append(dst[:cap(dst)], make([]int16, n)...)
}
return dst[:dstLen]
return slicesutil.ExtendCapacity(dst, additionalItems)
}
// AppendDecimalToFloat converts each item in va to f=v*10^e, appends it

View file

@ -4,6 +4,8 @@ import (
"encoding/binary"
"fmt"
"sync"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
)
// MarshalUint16 appends marshaled v to dst and returns the result.
@ -490,10 +492,7 @@ func GetInt64s(size int) *Int64s {
}
}
is := v.(*Int64s)
if n := size - cap(is.A); n > 0 {
is.A = append(is.A[:cap(is.A)], make([]int64, n)...)
}
is.A = is.A[:size]
is.A = slicesutil.SetLength(is.A, size)
return is
}
@ -519,10 +518,7 @@ func GetUint64s(size int) *Uint64s {
}
}
is := v.(*Uint64s)
if n := size - cap(is.A); n > 0 {
is.A = append(is.A[:cap(is.A)], make([]uint64, n)...)
}
is.A = is.A[:size]
is.A = slicesutil.SetLength(is.A, size)
return is
}
@ -548,10 +544,7 @@ func GetUint32s(size int) *Uint32s {
}
}
is := v.(*Uint32s)
if n := size - cap(is.A); n > 0 {
is.A = append(is.A[:cap(is.A)], make([]uint32, n)...)
}
is.A = is.A[:size]
is.A = slicesutil.SetLength(is.A, size)
return is
}

View file

@ -4,6 +4,8 @@ import (
"fmt"
"testing"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
)
func TestGetPutConcurrent(t *testing.T) {
@ -19,7 +21,7 @@ func TestGetPutConcurrent(t *testing.T) {
if capacity < 0 {
capacity = 0
}
bb.B = append(bb.B, make([]byte, capacity)...)
bb.B = slicesutil.SetLength(bb.B, len(bb.B)+capacity)
Put(bb)
}
doneCh <- struct{}{}

View file

@ -14,6 +14,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/mergeset"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/regexutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
)
const (
@ -853,13 +854,9 @@ func (sp *tagToStreamIDsRowParser) ParseStreamIDs() {
}
tail := sp.tail
n := len(tail) / 16
streamIDs := sp.StreamIDs[:0]
if n <= cap(streamIDs) {
streamIDs = streamIDs[:n]
} else {
streamIDs = append(streamIDs[:cap(streamIDs)], make([]u128, n-cap(streamIDs))...)
}
sp.StreamIDs = streamIDs
sp.StreamIDs = slicesutil.SetLength(sp.StreamIDs, n)
streamIDs := sp.StreamIDs
_ = streamIDs[n-1]
for i := 0; i < n; i++ {
var err error
tail, err = streamIDs[i].unmarshal(tail)

View file

@ -7,6 +7,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
)
type blockHeader struct {
@ -160,10 +161,7 @@ func unmarshalBlockHeadersNoCopy(dst []blockHeader, src []byte, blockHeadersCoun
logger.Panicf("BUG: blockHeadersCount must be greater than 0; got %d", blockHeadersCount)
}
dstLen := len(dst)
if n := dstLen + blockHeadersCount - cap(dst); n > 0 {
dst = append(dst[:cap(dst)], make([]blockHeader, n)...)
}
dst = dst[:dstLen+blockHeadersCount]
dst = slicesutil.SetLength(dst, dstLen+blockHeadersCount)
for i := 0; i < blockHeadersCount; i++ {
tail, err := dst[dstLen+i].UnmarshalNoCopy(src)
if err != nil {

View file

@ -12,6 +12,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
)
// Item represents a single item for storing in a mergeset.
@ -412,10 +413,7 @@ func (ib *inmemoryBlock) UnmarshalData(sb *storageBlock, firstItem, commonPrefix
// since the data isn't going to be resized after unmarshaling.
// This may save memory for caching the unmarshaled block.
data := bytesutil.ResizeNoCopyNoOverallocate(ib.data, dataLen)
if n := int(itemsCount) - cap(ib.items); n > 0 {
ib.items = append(ib.items[:cap(ib.items)], make([]Item, n)...)
}
ib.items = ib.items[:itemsCount]
ib.items = slicesutil.SetLength(ib.items, int(itemsCount))
data = append(data[:0], firstItem...)
items := ib.items
items[0] = Item{
@ -554,10 +552,7 @@ func getLensBuffer(n int) *lensBuffer {
v = &lensBuffer{}
}
lb := v.(*lensBuffer)
if nn := n - cap(lb.lens); nn > 0 {
lb.lens = append(lb.lens[:cap(lb.lens)], make([]uint64, nn)...)
}
lb.lens = lb.lens[:n]
lb.lens = slicesutil.SetLength(lb.lens, n)
return lb
}

View file

@ -7,6 +7,7 @@ import (
"io"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
)
// TableSearch is a reusable cursor used for searching in the Table.
@ -71,10 +72,7 @@ func (ts *TableSearch) Init(tb *Table) {
ts.pws = ts.tb.getParts(ts.pws[:0])
// Initialize the psPool.
if n := len(ts.pws) - cap(ts.psPool); n > 0 {
ts.psPool = append(ts.psPool[:cap(ts.psPool)], make([]partSearch, n)...)
}
ts.psPool = ts.psPool[:len(ts.pws)]
ts.psPool = slicesutil.SetLength(ts.psPool, len(ts.pws))
for i, pw := range ts.pws {
ts.psPool[i].Init(pw.p)
}

View file

@ -2,16 +2,15 @@ package prompbmarshal
import (
"fmt"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
)
// MarshalProtobuf marshals wr to dst and returns the result.
func (wr *WriteRequest) MarshalProtobuf(dst []byte) []byte {
size := wr.Size()
dstLen := len(dst)
if n := size - (cap(dst) - dstLen); n > 0 {
dst = append(dst[:cap(dst)], make([]byte, n)...)
}
dst = dst[:dstLen+size]
dst = slicesutil.SetLength(dst, dstLen+size)
n, err := wr.MarshalToSizedBuffer(dst[dstLen:])
if err != nil {
panic(fmt.Errorf("BUG: unexpected error when marshaling WriteRequest: %w", err))

View file

@ -0,0 +1,23 @@
package slicesutil
// ExtendCapacity returns a with the capacity extended to len(a)+itemsToAdd.
//
// It may allocate new slice if cap(a) is smaller than len(a)+itemsToAdd.
func ExtendCapacity[T any](a []T, itemsToAdd int) []T {
aLen := len(a)
if n := aLen + itemsToAdd - cap(a); n > 0 {
a = append(a[:cap(a)], make([]T, n)...)
return a[:aLen]
}
return a
}
// SetLength sets len(a) to newLen and returns the result.
//
// It may allocate new slice if cap(a) is smaller than newLen.
func SetLength[T any](a []T, newLen int) []T {
if n := newLen - cap(a); n > 0 {
a = append(a[:cap(a)], make([]T, n)...)
}
return a[:newLen]
}

View file

@ -7,6 +7,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
)
// blockHeader is a header for a time series block.
@ -252,10 +253,7 @@ func unmarshalBlockHeaders(dst []blockHeader, src []byte, blockHeadersCount int)
logger.Panicf("BUG: blockHeadersCount must be greater than zero; got %d", blockHeadersCount)
}
dstLen := len(dst)
if n := dstLen + blockHeadersCount - cap(dst); n > 0 {
dst = append(dst[:cap(dst)], make([]blockHeader, n)...)
dst = dst[:dstLen]
}
dst = slicesutil.ExtendCapacity(dst, blockHeadersCount)
var bh blockHeader
for len(src) > 0 {
tmp, err := bh.Unmarshal(src)

View file

@ -22,6 +22,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/mergeset"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/uint64set"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/workingsetcache"
"github.com/VictoriaMetrics/fastcache"
@ -3323,13 +3324,8 @@ func (mp *tagToMetricIDsRowParser) ParseMetricIDs() {
return
}
tail := mp.tail
mp.MetricIDs = mp.MetricIDs[:0]
n := len(tail) / 8
if n <= cap(mp.MetricIDs) {
mp.MetricIDs = mp.MetricIDs[:n]
} else {
mp.MetricIDs = append(mp.MetricIDs[:cap(mp.MetricIDs)], make([]uint64, n-cap(mp.MetricIDs))...)
}
mp.MetricIDs = slicesutil.SetLength(mp.MetricIDs, n)
metricIDs := mp.MetricIDs
_ = metricIDs[n-1]
for i := 0; i < n; i++ {

View file

@ -15,6 +15,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
)
const (
@ -790,10 +791,8 @@ func (mn *MetricName) sortTags() {
}
cts := getCanonicalTags()
if n := len(mn.Tags) - cap(cts.tags); n > 0 {
cts.tags = append(cts.tags[:cap(cts.tags)], make([]canonicalTag, n)...)
}
dst := cts.tags[:len(mn.Tags)]
cts.tags = slicesutil.SetLength(cts.tags, len(mn.Tags))
dst := cts.tags
for i := range mn.Tags {
tag := &mn.Tags[i]
ct := &dst[i]
@ -863,10 +862,7 @@ func (ts *canonicalTagsSort) Swap(i, j int) {
func copyTags(dst, src []Tag) []Tag {
dstLen := len(dst)
if n := dstLen + len(src) - cap(dst); n > 0 {
dst = append(dst[:cap(dst)], make([]Tag, n)...)
}
dst = dst[:dstLen+len(src)]
dst = slicesutil.SetLength(dst, dstLen+len(src))
for i := range src {
dst[dstLen+i].copyFrom(&src[i])
}

View file

@ -6,6 +6,7 @@ import (
"io"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
)
// partitionSearch represents a search in the partition.
@ -83,10 +84,7 @@ func (pts *partitionSearch) Init(pt *partition, tsids []TSID, tr TimeRange) {
pts.pws = pt.GetParts(pts.pws[:0], true)
// Initialize psPool.
if n := len(pts.pws) - cap(pts.psPool); n > 0 {
pts.psPool = append(pts.psPool[:cap(pts.psPool)], make([]partSearch, n)...)
}
pts.psPool = pts.psPool[:len(pts.pws)]
pts.psPool = slicesutil.SetLength(pts.psPool, len(pts.pws))
for i, pw := range pts.pws {
pts.psPool[i].Init(pw.p, tsids, tr)
}

View file

@ -10,6 +10,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/stringsutil"
)
@ -471,10 +472,7 @@ func (sq *SearchQuery) Unmarshal(src []byte) ([]byte, error) {
if err != nil {
return src, fmt.Errorf("cannot unmarshal the count of TagFilterss: %w", err)
}
if n := int(tfssCount) - cap(sq.TagFilterss); n > 0 {
sq.TagFilterss = append(sq.TagFilterss[:cap(sq.TagFilterss)], make([][]TagFilter, n)...)
}
sq.TagFilterss = sq.TagFilterss[:tfssCount]
sq.TagFilterss = slicesutil.SetLength(sq.TagFilterss, int(tfssCount))
src = tail
for i := 0; i < int(tfssCount); i++ {
@ -485,10 +483,7 @@ func (sq *SearchQuery) Unmarshal(src []byte) ([]byte, error) {
src = tail
tagFilters := sq.TagFilterss[i]
if n := int(tfsCount) - cap(tagFilters); n > 0 {
tagFilters = append(tagFilters[:cap(tagFilters)], make([]TagFilter, n)...)
}
tagFilters = tagFilters[:tfsCount]
tagFilters = slicesutil.SetLength(tagFilters, int(tfsCount))
for j := 0; j < int(tfsCount); j++ {
tail, err := tagFilters[j].Unmarshal(src)
if err != nil {

View file

@ -7,6 +7,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
)
// tableSearch performs searches in the table.
@ -84,10 +85,7 @@ func (ts *tableSearch) Init(tb *table, tsids []TSID, tr TimeRange) {
ts.ptws = tb.GetPartitions(ts.ptws[:0])
// Initialize the ptsPool.
if n := len(ts.ptws) - cap(ts.ptsPool); n > 0 {
ts.ptsPool = append(ts.ptsPool[:cap(ts.ptsPool)], make([]partitionSearch, n)...)
}
ts.ptsPool = ts.ptsPool[:len(ts.ptws)]
ts.ptsPool = slicesutil.SetLength(ts.ptsPool, len(ts.ptws))
for i, ptw := range ts.ptws {
ts.ptsPool[i].Init(ptw.pt, tsids, tr)
}

View file

@ -6,6 +6,8 @@ import (
"sync"
"sync/atomic"
"unsafe"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
)
// Set is a fast set for uint64.
@ -226,11 +228,9 @@ func (s *Set) AppendTo(dst []uint64) []uint64 {
}
// pre-allocate memory for dst
dstLen := len(dst)
if n := s.Len() - cap(dst) + dstLen; n > 0 {
dst = append(dst[:cap(dst)], make([]uint64, n)...)
dst = dst[:dstLen]
}
sLen := s.Len()
dst = slicesutil.ExtendCapacity(dst, sLen)
s.sort()
for i := range s.buckets {
dst = s.buckets[i].appendTo(dst)