2019-05-22 21:16:55 +00:00
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"math/rand"
|
|
|
|
"os"
|
|
|
|
"testing"
|
|
|
|
"time"
|
2020-12-08 18:49:32 +00:00
|
|
|
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
2019-05-22 21:16:55 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func BenchmarkTableAddRows(b *testing.B) {
|
|
|
|
for _, tsidsCount := range []int{1e0, 1e1, 1e2, 1e3, 1e4} {
|
|
|
|
b.Run(fmt.Sprintf("tsidsCount_%d", tsidsCount), func(b *testing.B) {
|
|
|
|
for _, rowsPerInsert := range []int{1, 1e1, 1e2, 1e3, 1e4, 1e5} {
|
|
|
|
b.Run(fmt.Sprintf("rowsPerInsert_%d", rowsPerInsert), func(b *testing.B) {
|
|
|
|
benchmarkTableAddRows(b, rowsPerInsert, tsidsCount)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func benchmarkTableAddRows(b *testing.B, rowsPerInsert, tsidsCount int) {
|
|
|
|
rows := make([]rawRow, rowsPerInsert)
|
|
|
|
startTimestamp := timestampFromTime(time.Now())
|
|
|
|
timestamp := startTimestamp
|
|
|
|
value := float64(100)
|
2023-01-24 04:10:29 +00:00
|
|
|
rng := rand.New(rand.NewSource(1))
|
2019-05-22 21:16:55 +00:00
|
|
|
for i := 0; i < rowsPerInsert; i++ {
|
|
|
|
r := &rows[i]
|
|
|
|
r.PrecisionBits = defaultPrecisionBits
|
2023-01-24 04:10:29 +00:00
|
|
|
r.TSID.MetricID = uint64(rng.Intn(tsidsCount) + 1)
|
2019-05-22 21:16:55 +00:00
|
|
|
r.Timestamp = timestamp
|
|
|
|
r.Value = value
|
|
|
|
|
2023-01-24 04:10:29 +00:00
|
|
|
timestamp += 10 + rng.Int63n(2)
|
|
|
|
value += float64(int(rng.NormFloat64() * 5))
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
timestampDelta := timestamp - startTimestamp
|
|
|
|
|
|
|
|
insertsCount := int(1e3)
|
|
|
|
rowsCountExpected := insertsCount * len(rows)
|
|
|
|
b.ResetTimer()
|
|
|
|
b.ReportAllocs()
|
|
|
|
b.SetBytes(int64(rowsCountExpected))
|
2023-03-25 21:33:54 +00:00
|
|
|
tablePath := "benchmarkTableAddRows"
|
2022-10-23 13:23:44 +00:00
|
|
|
strg := newTestStorage()
|
2019-05-22 21:16:55 +00:00
|
|
|
for i := 0; i < b.N; i++ {
|
2023-04-15 06:01:20 +00:00
|
|
|
tb := mustOpenTable(tablePath, strg)
|
2019-05-22 21:16:55 +00:00
|
|
|
|
|
|
|
workCh := make(chan struct{}, insertsCount)
|
|
|
|
for j := 0; j < insertsCount; j++ {
|
|
|
|
workCh <- struct{}{}
|
|
|
|
}
|
|
|
|
close(workCh)
|
|
|
|
|
|
|
|
doneCh := make(chan struct{})
|
2020-12-08 18:49:32 +00:00
|
|
|
gomaxprocs := cgroup.AvailableCPUs()
|
2019-05-22 21:16:55 +00:00
|
|
|
|
|
|
|
for j := 0; j < gomaxprocs; j++ {
|
|
|
|
go func(goroutineID int) {
|
|
|
|
// Make per-goroutine rows copy with distinct timestamps.
|
|
|
|
rowsCopy := append([]rawRow{}, rows...)
|
|
|
|
for k := range rowsCopy {
|
|
|
|
r := &rowsCopy[k]
|
|
|
|
r.Timestamp += int64(goroutineID)
|
|
|
|
r.Value += float64(goroutineID)
|
|
|
|
}
|
|
|
|
|
|
|
|
for range workCh {
|
|
|
|
// Update rowsCopy to the next timestamp chunk.
|
|
|
|
for q := range rowsCopy {
|
|
|
|
r := &rowsCopy[q]
|
|
|
|
r.Timestamp += timestampDelta
|
|
|
|
r.Value++
|
|
|
|
}
|
|
|
|
// Add updated rowsCopy.
|
2023-04-14 05:11:56 +00:00
|
|
|
tb.MustAddRows(rowsCopy)
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
doneCh <- struct{}{}
|
|
|
|
}(j)
|
|
|
|
}
|
|
|
|
|
|
|
|
for j := 0; j < gomaxprocs; j++ {
|
|
|
|
<-doneCh
|
|
|
|
}
|
|
|
|
|
|
|
|
tb.MustClose()
|
|
|
|
|
|
|
|
// Open the table from files and verify the rows count on it
|
2023-04-15 06:01:20 +00:00
|
|
|
tb = mustOpenTable(tablePath, strg)
|
2019-05-22 21:16:55 +00:00
|
|
|
var m TableMetrics
|
|
|
|
tb.UpdateMetrics(&m)
|
2022-12-05 23:15:00 +00:00
|
|
|
if rowsCount := m.TotalRowsCount(); rowsCount != uint64(rowsCountExpected) {
|
2019-05-22 21:16:55 +00:00
|
|
|
b.Fatalf("unexpected rows count in the final table %q: got %d; want %d", tablePath, rowsCount, rowsCountExpected)
|
|
|
|
}
|
|
|
|
tb.MustClose()
|
|
|
|
|
|
|
|
// Remove the table.
|
|
|
|
if err := os.RemoveAll(tablePath); err != nil {
|
|
|
|
b.Fatalf("cannot remove table %q: %s", tablePath, err)
|
|
|
|
}
|
|
|
|
}
|
2023-07-14 00:13:24 +00:00
|
|
|
stopTestStorage(strg)
|
2019-05-22 21:16:55 +00:00
|
|
|
}
|