mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-12-31 15:06:26 +00:00
Merge branch 'public-single-node' into victorialogs-wip
This commit is contained in:
commit
14a7a3e8e4
26 changed files with 61 additions and 82 deletions
|
@ -9,6 +9,7 @@ import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -37,12 +38,9 @@ func (ctx *InsertCtx) Reset(rowsLen int) {
|
||||||
for i := range mrs {
|
for i := range mrs {
|
||||||
cleanMetricRow(&mrs[i])
|
cleanMetricRow(&mrs[i])
|
||||||
}
|
}
|
||||||
|
mrs = slicesutil.SetLength(mrs, rowsLen)
|
||||||
ctx.mrs = mrs[:0]
|
ctx.mrs = mrs[:0]
|
||||||
|
|
||||||
if n := rowsLen - cap(ctx.mrs); n > 0 {
|
|
||||||
ctx.mrs = append(ctx.mrs[:cap(ctx.mrs)], make([]storage.MetricRow, n)...)
|
|
||||||
}
|
|
||||||
ctx.mrs = ctx.mrs[:0]
|
|
||||||
ctx.metricNamesBuf = ctx.metricNamesBuf[:0]
|
ctx.metricNamesBuf = ctx.metricNamesBuf[:0]
|
||||||
ctx.relabelCtx.Reset()
|
ctx.relabelCtx.Reset()
|
||||||
ctx.streamAggrCtx.Reset()
|
ctx.streamAggrCtx.Reset()
|
||||||
|
|
|
@ -16,6 +16,7 @@ import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/stringsutil"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/stringsutil"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/workingsetcache"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/workingsetcache"
|
||||||
|
@ -736,10 +737,7 @@ func (mi *rollupResultCacheMetainfo) Unmarshal(src []byte) error {
|
||||||
}
|
}
|
||||||
entriesLen := int(encoding.UnmarshalUint32(src))
|
entriesLen := int(encoding.UnmarshalUint32(src))
|
||||||
src = src[4:]
|
src = src[4:]
|
||||||
if n := entriesLen - cap(mi.entries); n > 0 {
|
mi.entries = slicesutil.SetLength(mi.entries, entriesLen)
|
||||||
mi.entries = append(mi.entries[:cap(mi.entries)], make([]rollupResultCacheMetainfoEntry, n)...)
|
|
||||||
}
|
|
||||||
mi.entries = mi.entries[:entriesLen]
|
|
||||||
for i := 0; i < entriesLen; i++ {
|
for i := 0; i < entriesLen; i++ {
|
||||||
tail, err := mi.entries[i].Unmarshal(src)
|
tail, err := mi.entries[i].Unmarshal(src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -10,6 +10,7 @@ import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -246,10 +247,7 @@ func unmarshalMetricNameFast(mn *storage.MetricName, src []byte) ([]byte, error)
|
||||||
}
|
}
|
||||||
tagsLen := encoding.UnmarshalUint16(src)
|
tagsLen := encoding.UnmarshalUint16(src)
|
||||||
src = src[2:]
|
src = src[2:]
|
||||||
if n := int(tagsLen) - cap(mn.Tags); n > 0 {
|
mn.Tags = slicesutil.SetLength(mn.Tags, int(tagsLen))
|
||||||
mn.Tags = append(mn.Tags[:cap(mn.Tags)], make([]storage.Tag, n)...)
|
|
||||||
}
|
|
||||||
mn.Tags = mn.Tags[:tagsLen]
|
|
||||||
for i := range mn.Tags {
|
for i := range mn.Tags {
|
||||||
tail, key, err := unmarshalBytesFast(src)
|
tail, key, err := unmarshalBytesFast(src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -44,6 +44,7 @@ See also [LTS releases](https://docs.victoriametrics.com/lts-releases/).
|
||||||
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix bug that prevents the first query trace from expanding on click event. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6186). The issue was introduced in [v1.100.0](https://docs.victoriametrics.com/changelog/#v11000) release.
|
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix bug that prevents the first query trace from expanding on click event. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6186). The issue was introduced in [v1.100.0](https://docs.victoriametrics.com/changelog/#v11000) release.
|
||||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent/): prevent potential panic during [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html) if more than one `--remoteWrite.streamAggr.dedupInterval` is configured. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6205).
|
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent/): prevent potential panic during [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html) if more than one `--remoteWrite.streamAggr.dedupInterval` is configured. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6205).
|
||||||
* BUGFIX: [stream aggregation](https://docs.victoriametrics.com/stream-aggregation/): set correct suffix `<output>_prometheus` for aggregation outputs [increase_prometheus](https://docs.victoriametrics.com/stream-aggregation/#increase_prometheus) and [total_prometheus](https://docs.victoriametrics.com/stream-aggregation/#total_prometheus). Before, outputs `total` and `total_prometheus` or `increase` and `increase_prometheus` had the same suffix.
|
* BUGFIX: [stream aggregation](https://docs.victoriametrics.com/stream-aggregation/): set correct suffix `<output>_prometheus` for aggregation outputs [increase_prometheus](https://docs.victoriametrics.com/stream-aggregation/#increase_prometheus) and [total_prometheus](https://docs.victoriametrics.com/stream-aggregation/#total_prometheus). Before, outputs `total` and `total_prometheus` or `increase` and `increase_prometheus` had the same suffix.
|
||||||
|
* BUGFIX: properly estimate the needed memory for query execution if it has the format [`aggr_func`](https://docs.victoriametrics.com/metricsql/#aggregate-functions)([`rollup_func[d]`](https://docs.victoriametrics.com/metricsql/#rollup-functions) (for example, `sum(rate(request_duration_seconds_bucket[5m]))`). This should allow performing aggregations over bigger number of time series when VictoriaMetrics runs in environments with small amounts of available memory. The issue has been introduced in [this commit](https://github.com/VictoriaMetrics/VictoriaMetrics/commit/5138eaeea0791caa34bcfab410e0ca9cd253cd8f) in [v1.83.0](https://docs.victoriametrics.com/changelog_2022/#v1830).
|
||||||
|
|
||||||
## [v1.101.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.101.0)
|
## [v1.101.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.101.0)
|
||||||
|
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/filestream"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/filestream"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -64,8 +65,7 @@ func (bb *ByteBuffer) ReadFrom(r io.Reader) (int64, error) {
|
||||||
offset := bLen
|
offset := bLen
|
||||||
for {
|
for {
|
||||||
if free := len(b) - offset; free < offset {
|
if free := len(b) - offset; free < offset {
|
||||||
n := len(b)
|
b = slicesutil.SetLength(b, 2*len(b))
|
||||||
b = append(b, make([]byte, n)...)
|
|
||||||
}
|
}
|
||||||
n, err := r.Read(b[offset:])
|
n, err := r.Read(b[offset:])
|
||||||
offset += n
|
offset += n
|
||||||
|
|
|
@ -16,8 +16,7 @@ func GetFloat64s(size int) *Float64s {
|
||||||
v = &Float64s{}
|
v = &Float64s{}
|
||||||
}
|
}
|
||||||
a := v.(*Float64s)
|
a := v.(*Float64s)
|
||||||
a.A = slicesutil.ExtendCapacity(a.A, size)
|
a.A = slicesutil.SetLength(a.A, size)
|
||||||
a.A = a.A[:size]
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -519,8 +519,7 @@ func GetInt64s(size int) *Int64s {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
is := v.(*Int64s)
|
is := v.(*Int64s)
|
||||||
is.A = slicesutil.ExtendCapacity(is.A, size)
|
is.A = slicesutil.SetLength(is.A, size)
|
||||||
is.A = is.A[:size]
|
|
||||||
return is
|
return is
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -546,8 +545,7 @@ func GetUint64s(size int) *Uint64s {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
is := v.(*Uint64s)
|
is := v.(*Uint64s)
|
||||||
is.A = slicesutil.ExtendCapacity(is.A, size)
|
is.A = slicesutil.SetLength(is.A, size)
|
||||||
is.A = is.A[:size]
|
|
||||||
return is
|
return is
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -573,8 +571,7 @@ func GetUint32s(size int) *Uint32s {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
is := v.(*Uint32s)
|
is := v.(*Uint32s)
|
||||||
is.A = slicesutil.ExtendCapacity(is.A, size)
|
is.A = slicesutil.SetLength(is.A, size)
|
||||||
is.A = is.A[:size]
|
|
||||||
return is
|
return is
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4,6 +4,8 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestGetPutConcurrent(t *testing.T) {
|
func TestGetPutConcurrent(t *testing.T) {
|
||||||
|
@ -19,7 +21,7 @@ func TestGetPutConcurrent(t *testing.T) {
|
||||||
if capacity < 0 {
|
if capacity < 0 {
|
||||||
capacity = 0
|
capacity = 0
|
||||||
}
|
}
|
||||||
bb.B = append(bb.B, make([]byte, capacity)...)
|
bb.B = slicesutil.SetLength(bb.B, len(bb.B)+capacity)
|
||||||
Put(bb)
|
Put(bb)
|
||||||
}
|
}
|
||||||
doneCh <- struct{}{}
|
doneCh <- struct{}{}
|
||||||
|
|
|
@ -47,8 +47,7 @@ func (bm *bitmap) copyFrom(src *bitmap) {
|
||||||
func (bm *bitmap) init(bitsLen int) {
|
func (bm *bitmap) init(bitsLen int) {
|
||||||
a := bm.a
|
a := bm.a
|
||||||
wordsLen := (bitsLen + 63) / 64
|
wordsLen := (bitsLen + 63) / 64
|
||||||
a = slicesutil.ExtendCapacity(a, wordsLen)
|
a = slicesutil.SetLength(a, wordsLen)
|
||||||
a = a[:wordsLen]
|
|
||||||
bm.a = a
|
bm.a = a
|
||||||
bm.bitsLen = bitsLen
|
bm.bitsLen = bitsLen
|
||||||
}
|
}
|
||||||
|
|
|
@ -143,8 +143,7 @@ func (c *column) canStoreInConstColumn() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *column) resizeValues(valuesLen int) []string {
|
func (c *column) resizeValues(valuesLen int) []string {
|
||||||
values := slicesutil.ExtendCapacity(c.values, valuesLen)
|
c.values = slicesutil.SetLength(c.values, valuesLen)
|
||||||
c.values = values[:valuesLen]
|
|
||||||
return c.values
|
return c.values
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -367,8 +366,7 @@ func (b *block) extendColumns() *column {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *block) resizeColumns(columnsLen int) []column {
|
func (b *block) resizeColumns(columnsLen int) []column {
|
||||||
cs := slicesutil.ExtendCapacity(b.columns, columnsLen)
|
b.columns = slicesutil.SetLength(b.columns, columnsLen)
|
||||||
b.columns = cs[:columnsLen]
|
|
||||||
return b.columns
|
return b.columns
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -53,8 +53,7 @@ func (bd *blockData) reset() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bd *blockData) resizeColumnsData(columnsDataLen int) []columnData {
|
func (bd *blockData) resizeColumnsData(columnsDataLen int) []columnData {
|
||||||
cds := slicesutil.ExtendCapacity(bd.columnsData, columnsDataLen)
|
bd.columnsData = slicesutil.SetLength(bd.columnsData, columnsDataLen)
|
||||||
bd.columnsData = cds[:columnsDataLen]
|
|
||||||
return bd.columnsData
|
return bd.columnsData
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -264,14 +264,12 @@ func (csh *columnsHeader) getColumnHeader(name string) *columnHeader {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (csh *columnsHeader) resizeConstColumns(columnsLen int) []Field {
|
func (csh *columnsHeader) resizeConstColumns(columnsLen int) []Field {
|
||||||
ccs := slicesutil.ExtendCapacity(csh.constColumns, columnsLen)
|
csh.constColumns = slicesutil.SetLength(csh.constColumns, columnsLen)
|
||||||
csh.constColumns = ccs[:columnsLen]
|
|
||||||
return csh.constColumns
|
return csh.constColumns
|
||||||
}
|
}
|
||||||
|
|
||||||
func (csh *columnsHeader) resizeColumnHeaders(columnHeadersLen int) []columnHeader {
|
func (csh *columnsHeader) resizeColumnHeaders(columnHeadersLen int) []columnHeader {
|
||||||
chs := slicesutil.ExtendCapacity(csh.columnHeaders, columnHeadersLen)
|
csh.columnHeaders = slicesutil.SetLength(csh.columnHeaders, columnHeadersLen)
|
||||||
csh.columnHeaders = chs[:columnHeadersLen]
|
|
||||||
return csh.columnHeaders
|
return csh.columnHeaders
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -55,8 +55,7 @@ func (bf *bloomFilter) unmarshal(src []byte) error {
|
||||||
}
|
}
|
||||||
bf.reset()
|
bf.reset()
|
||||||
wordsCount := len(src) / 8
|
wordsCount := len(src) / 8
|
||||||
bits := slicesutil.ExtendCapacity(bf.bits, wordsCount)
|
bits := slicesutil.SetLength(bf.bits, wordsCount)
|
||||||
bits = bits[:wordsCount]
|
|
||||||
for i := range bits {
|
for i := range bits {
|
||||||
bits[i] = encoding.UnmarshalUint64(src)
|
bits[i] = encoding.UnmarshalUint64(src)
|
||||||
src = src[8:]
|
src = src[8:]
|
||||||
|
@ -69,8 +68,7 @@ func (bf *bloomFilter) unmarshal(src []byte) error {
|
||||||
func (bf *bloomFilter) mustInit(tokens []string) {
|
func (bf *bloomFilter) mustInit(tokens []string) {
|
||||||
bitsCount := len(tokens) * bloomFilterBitsPerItem
|
bitsCount := len(tokens) * bloomFilterBitsPerItem
|
||||||
wordsCount := (bitsCount + 63) / 64
|
wordsCount := (bitsCount + 63) / 64
|
||||||
bits := slicesutil.ExtendCapacity(bf.bits, wordsCount)
|
bits := slicesutil.SetLength(bf.bits, wordsCount)
|
||||||
bits = bits[:wordsCount]
|
|
||||||
bloomFilterAdd(bits, tokens)
|
bloomFilterAdd(bits, tokens)
|
||||||
bf.bits = bits
|
bf.bits = bits
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,6 +14,7 @@ import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/mergeset"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/mergeset"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/regexutil"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/regexutil"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -853,13 +854,9 @@ func (sp *tagToStreamIDsRowParser) ParseStreamIDs() {
|
||||||
}
|
}
|
||||||
tail := sp.tail
|
tail := sp.tail
|
||||||
n := len(tail) / 16
|
n := len(tail) / 16
|
||||||
streamIDs := sp.StreamIDs[:0]
|
sp.StreamIDs = slicesutil.SetLength(sp.StreamIDs, n)
|
||||||
if n <= cap(streamIDs) {
|
streamIDs := sp.StreamIDs
|
||||||
streamIDs = streamIDs[:n]
|
_ = streamIDs[n-1]
|
||||||
} else {
|
|
||||||
streamIDs = append(streamIDs[:cap(streamIDs)], make([]u128, n-cap(streamIDs))...)
|
|
||||||
}
|
|
||||||
sp.StreamIDs = streamIDs
|
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
var err error
|
var err error
|
||||||
tail, err = streamIDs[i].unmarshal(tail)
|
tail, err = streamIDs[i].unmarshal(tail)
|
||||||
|
|
|
@ -198,8 +198,7 @@ func getEmptyStrings(rowsCount int) []string {
|
||||||
return values
|
return values
|
||||||
}
|
}
|
||||||
values := *p
|
values := *p
|
||||||
values = slicesutil.ExtendCapacity(values, rowsCount)
|
return slicesutil.SetLength(values, rowsCount)
|
||||||
return values[:rowsCount]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var emptyStrings atomic.Pointer[[]string]
|
var emptyStrings atomic.Pointer[[]string]
|
||||||
|
|
|
@ -161,8 +161,7 @@ func unmarshalBlockHeadersNoCopy(dst []blockHeader, src []byte, blockHeadersCoun
|
||||||
logger.Panicf("BUG: blockHeadersCount must be greater than 0; got %d", blockHeadersCount)
|
logger.Panicf("BUG: blockHeadersCount must be greater than 0; got %d", blockHeadersCount)
|
||||||
}
|
}
|
||||||
dstLen := len(dst)
|
dstLen := len(dst)
|
||||||
dst = slicesutil.ExtendCapacity(dst, blockHeadersCount)
|
dst = slicesutil.SetLength(dst, dstLen+blockHeadersCount)
|
||||||
dst = dst[:dstLen+blockHeadersCount]
|
|
||||||
for i := 0; i < blockHeadersCount; i++ {
|
for i := 0; i < blockHeadersCount; i++ {
|
||||||
tail, err := dst[dstLen+i].UnmarshalNoCopy(src)
|
tail, err := dst[dstLen+i].UnmarshalNoCopy(src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -413,8 +413,7 @@ func (ib *inmemoryBlock) UnmarshalData(sb *storageBlock, firstItem, commonPrefix
|
||||||
// since the data isn't going to be resized after unmarshaling.
|
// since the data isn't going to be resized after unmarshaling.
|
||||||
// This may save memory for caching the unmarshaled block.
|
// This may save memory for caching the unmarshaled block.
|
||||||
data := bytesutil.ResizeNoCopyNoOverallocate(ib.data, dataLen)
|
data := bytesutil.ResizeNoCopyNoOverallocate(ib.data, dataLen)
|
||||||
ib.items = slicesutil.ExtendCapacity(ib.items, int(itemsCount))
|
ib.items = slicesutil.SetLength(ib.items, int(itemsCount))
|
||||||
ib.items = ib.items[:itemsCount]
|
|
||||||
data = append(data[:0], firstItem...)
|
data = append(data[:0], firstItem...)
|
||||||
items := ib.items
|
items := ib.items
|
||||||
items[0] = Item{
|
items[0] = Item{
|
||||||
|
@ -553,10 +552,7 @@ func getLensBuffer(n int) *lensBuffer {
|
||||||
v = &lensBuffer{}
|
v = &lensBuffer{}
|
||||||
}
|
}
|
||||||
lb := v.(*lensBuffer)
|
lb := v.(*lensBuffer)
|
||||||
if nn := n - cap(lb.lens); nn > 0 {
|
lb.lens = slicesutil.SetLength(lb.lens, n)
|
||||||
lb.lens = append(lb.lens[:cap(lb.lens)], make([]uint64, nn)...)
|
|
||||||
}
|
|
||||||
lb.lens = lb.lens[:n]
|
|
||||||
return lb
|
return lb
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -72,8 +72,7 @@ func (ts *TableSearch) Init(tb *Table) {
|
||||||
ts.pws = ts.tb.getParts(ts.pws[:0])
|
ts.pws = ts.tb.getParts(ts.pws[:0])
|
||||||
|
|
||||||
// Initialize the psPool.
|
// Initialize the psPool.
|
||||||
ts.psPool = slicesutil.ExtendCapacity(ts.psPool, len(ts.pws))
|
ts.psPool = slicesutil.SetLength(ts.psPool, len(ts.pws))
|
||||||
ts.psPool = ts.psPool[:len(ts.pws)]
|
|
||||||
for i, pw := range ts.pws {
|
for i, pw := range ts.pws {
|
||||||
ts.psPool[i].Init(pw.p)
|
ts.psPool[i].Init(pw.p)
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,8 +10,7 @@ import (
|
||||||
func (wr *WriteRequest) MarshalProtobuf(dst []byte) []byte {
|
func (wr *WriteRequest) MarshalProtobuf(dst []byte) []byte {
|
||||||
size := wr.Size()
|
size := wr.Size()
|
||||||
dstLen := len(dst)
|
dstLen := len(dst)
|
||||||
dst = slicesutil.ExtendCapacity(dst, size)
|
dst = slicesutil.SetLength(dst, dstLen+size)
|
||||||
dst = dst[:dstLen+size]
|
|
||||||
n, err := wr.MarshalToSizedBuffer(dst[dstLen:])
|
n, err := wr.MarshalToSizedBuffer(dst[dstLen:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Errorf("BUG: unexpected error when marshaling WriteRequest: %w", err))
|
panic(fmt.Errorf("BUG: unexpected error when marshaling WriteRequest: %w", err))
|
||||||
|
|
|
@ -1,10 +1,23 @@
|
||||||
package slicesutil
|
package slicesutil
|
||||||
|
|
||||||
// ExtendCapacity returns a with the capacity extended to len(a)+n if needed.
|
// ExtendCapacity returns a with the capacity extended to len(a)+itemsToAdd.
|
||||||
func ExtendCapacity[T any](a []T, n int) []T {
|
//
|
||||||
|
// It may allocate new slice if cap(a) is smaller than len(a)+itemsToAdd.
|
||||||
|
func ExtendCapacity[T any](a []T, itemsToAdd int) []T {
|
||||||
aLen := len(a)
|
aLen := len(a)
|
||||||
if n := aLen + n - cap(a); n > 0 {
|
if n := aLen + itemsToAdd - cap(a); n > 0 {
|
||||||
|
a = append(a[:cap(a)], make([]T, n)...)
|
||||||
|
return a[:aLen]
|
||||||
|
}
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetLength sets len(a) to newLen and returns the result.
|
||||||
|
//
|
||||||
|
// It may allocate new slice if cap(a) is smaller than newLen.
|
||||||
|
func SetLength[T any](a []T, newLen int) []T {
|
||||||
|
if n := newLen - cap(a); n > 0 {
|
||||||
a = append(a[:cap(a)], make([]T, n)...)
|
a = append(a[:cap(a)], make([]T, n)...)
|
||||||
}
|
}
|
||||||
return a[:aLen]
|
return a[:newLen]
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/mergeset"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/mergeset"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/uint64set"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/uint64set"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/workingsetcache"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/workingsetcache"
|
||||||
"github.com/VictoriaMetrics/fastcache"
|
"github.com/VictoriaMetrics/fastcache"
|
||||||
|
@ -3153,13 +3154,8 @@ func (mp *tagToMetricIDsRowParser) ParseMetricIDs() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
tail := mp.tail
|
tail := mp.tail
|
||||||
mp.MetricIDs = mp.MetricIDs[:0]
|
|
||||||
n := len(tail) / 8
|
n := len(tail) / 8
|
||||||
if n <= cap(mp.MetricIDs) {
|
mp.MetricIDs = slicesutil.SetLength(mp.MetricIDs, n)
|
||||||
mp.MetricIDs = mp.MetricIDs[:n]
|
|
||||||
} else {
|
|
||||||
mp.MetricIDs = append(mp.MetricIDs[:cap(mp.MetricIDs)], make([]uint64, n-cap(mp.MetricIDs))...)
|
|
||||||
}
|
|
||||||
metricIDs := mp.MetricIDs
|
metricIDs := mp.MetricIDs
|
||||||
_ = metricIDs[n-1]
|
_ = metricIDs[n-1]
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
|
|
|
@ -703,8 +703,8 @@ func (mn *MetricName) sortTags() {
|
||||||
}
|
}
|
||||||
|
|
||||||
cts := getCanonicalTags()
|
cts := getCanonicalTags()
|
||||||
cts.tags = slicesutil.ExtendCapacity(cts.tags, len(mn.Tags))
|
cts.tags = slicesutil.SetLength(cts.tags, len(mn.Tags))
|
||||||
dst := cts.tags[:len(mn.Tags)]
|
dst := cts.tags
|
||||||
for i := range mn.Tags {
|
for i := range mn.Tags {
|
||||||
tag := &mn.Tags[i]
|
tag := &mn.Tags[i]
|
||||||
ct := &dst[i]
|
ct := &dst[i]
|
||||||
|
@ -774,8 +774,7 @@ func (ts *canonicalTagsSort) Swap(i, j int) {
|
||||||
|
|
||||||
func copyTags(dst, src []Tag) []Tag {
|
func copyTags(dst, src []Tag) []Tag {
|
||||||
dstLen := len(dst)
|
dstLen := len(dst)
|
||||||
dst = slicesutil.ExtendCapacity(dst, len(src))
|
dst = slicesutil.SetLength(dst, dstLen+len(src))
|
||||||
dst = dst[:dstLen+len(src)]
|
|
||||||
for i := range src {
|
for i := range src {
|
||||||
dst[dstLen+i].copyFrom(&src[i])
|
dst[dstLen+i].copyFrom(&src[i])
|
||||||
}
|
}
|
||||||
|
|
|
@ -84,8 +84,7 @@ func (pts *partitionSearch) Init(pt *partition, tsids []TSID, tr TimeRange) {
|
||||||
pts.pws = pt.GetParts(pts.pws[:0], true)
|
pts.pws = pt.GetParts(pts.pws[:0], true)
|
||||||
|
|
||||||
// Initialize psPool.
|
// Initialize psPool.
|
||||||
pts.psPool = slicesutil.ExtendCapacity(pts.psPool, len(pts.pws))
|
pts.psPool = slicesutil.SetLength(pts.psPool, len(pts.pws))
|
||||||
pts.psPool = pts.psPool[:len(pts.pws)]
|
|
||||||
for i, pw := range pts.pws {
|
for i, pw := range pts.pws {
|
||||||
pts.psPool[i].Init(pw.p, tsids, tr)
|
pts.psPool[i].Init(pw.p, tsids, tr)
|
||||||
}
|
}
|
||||||
|
|
|
@ -414,8 +414,7 @@ func (sq *SearchQuery) Unmarshal(src []byte) ([]byte, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return src, fmt.Errorf("cannot unmarshal the count of TagFilterss: %w", err)
|
return src, fmt.Errorf("cannot unmarshal the count of TagFilterss: %w", err)
|
||||||
}
|
}
|
||||||
sq.TagFilterss = slicesutil.ExtendCapacity(sq.TagFilterss, int(tfssCount))
|
sq.TagFilterss = slicesutil.SetLength(sq.TagFilterss, int(tfssCount))
|
||||||
sq.TagFilterss = sq.TagFilterss[:tfssCount]
|
|
||||||
src = tail
|
src = tail
|
||||||
|
|
||||||
for i := 0; i < int(tfssCount); i++ {
|
for i := 0; i < int(tfssCount); i++ {
|
||||||
|
@ -426,8 +425,7 @@ func (sq *SearchQuery) Unmarshal(src []byte) ([]byte, error) {
|
||||||
src = tail
|
src = tail
|
||||||
|
|
||||||
tagFilters := sq.TagFilterss[i]
|
tagFilters := sq.TagFilterss[i]
|
||||||
tagFilters = slicesutil.ExtendCapacity(tagFilters, int(tfsCount))
|
tagFilters = slicesutil.SetLength(tagFilters, int(tfsCount))
|
||||||
tagFilters = tagFilters[:tfsCount]
|
|
||||||
for j := 0; j < int(tfsCount); j++ {
|
for j := 0; j < int(tfsCount); j++ {
|
||||||
tail, err := tagFilters[j].Unmarshal(src)
|
tail, err := tagFilters[j].Unmarshal(src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -85,8 +85,7 @@ func (ts *tableSearch) Init(tb *table, tsids []TSID, tr TimeRange) {
|
||||||
ts.ptws = tb.GetPartitions(ts.ptws[:0])
|
ts.ptws = tb.GetPartitions(ts.ptws[:0])
|
||||||
|
|
||||||
// Initialize the ptsPool.
|
// Initialize the ptsPool.
|
||||||
ts.ptsPool = slicesutil.ExtendCapacity(ts.ptsPool, len(ts.ptws))
|
ts.ptsPool = slicesutil.SetLength(ts.ptsPool, len(ts.ptws))
|
||||||
ts.ptsPool = ts.ptsPool[:len(ts.ptws)]
|
|
||||||
for i, ptw := range ts.ptws {
|
for i, ptw := range ts.ptws {
|
||||||
ts.ptsPool[i].Init(ptw.pt, tsids, tr)
|
ts.ptsPool[i].Init(ptw.pt, tsids, tr)
|
||||||
}
|
}
|
||||||
|
|
|
@ -230,6 +230,7 @@ func (s *Set) AppendTo(dst []uint64) []uint64 {
|
||||||
// pre-allocate memory for dst
|
// pre-allocate memory for dst
|
||||||
sLen := s.Len()
|
sLen := s.Len()
|
||||||
dst = slicesutil.ExtendCapacity(dst, sLen)
|
dst = slicesutil.ExtendCapacity(dst, sLen)
|
||||||
|
|
||||||
s.sort()
|
s.sort()
|
||||||
for i := range s.buckets {
|
for i := range s.buckets {
|
||||||
dst = s.buckets[i].appendTo(dst)
|
dst = s.buckets[i].appendTo(dst)
|
||||||
|
|
Loading…
Reference in a new issue