lib/storage: use deterministic random generator in tests

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3683
This commit is contained in:
Aliaksandr Valialkin 2023-01-23 20:10:29 -08:00
parent 1a3a6ef907
commit ba5a6c851c
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
13 changed files with 140 additions and 105 deletions

View file

@ -17,11 +17,12 @@ func TestBlockStreamReaderSingleRow(t *testing.T) {
} }
func TestBlockStreamReaderSingleBlockManyRows(t *testing.T) { func TestBlockStreamReaderSingleBlockManyRows(t *testing.T) {
rng := rand.New(rand.NewSource(1))
var rows []rawRow var rows []rawRow
var r rawRow var r rawRow
r.PrecisionBits = defaultPrecisionBits r.PrecisionBits = defaultPrecisionBits
for i := 0; i < maxRowsPerBlock; i++ { for i := 0; i < maxRowsPerBlock; i++ {
r.Value = rand.Float64()*1e9 - 5e8 r.Value = rng.Float64()*1e9 - 5e8
r.Timestamp = int64(i * 1e9) r.Timestamp = int64(i * 1e9)
rows = append(rows, r) rows = append(rows, r)
} }
@ -29,24 +30,26 @@ func TestBlockStreamReaderSingleBlockManyRows(t *testing.T) {
} }
func TestBlockStreamReaderSingleTSIDManyBlocks(t *testing.T) { func TestBlockStreamReaderSingleTSIDManyBlocks(t *testing.T) {
rng := rand.New(rand.NewSource(1))
var rows []rawRow var rows []rawRow
var r rawRow var r rawRow
r.PrecisionBits = 1 r.PrecisionBits = 1
for i := 0; i < 5*maxRowsPerBlock; i++ { for i := 0; i < 5*maxRowsPerBlock; i++ {
r.Value = rand.NormFloat64() * 1e4 r.Value = rng.NormFloat64() * 1e4
r.Timestamp = int64(rand.NormFloat64() * 1e9) r.Timestamp = int64(rng.NormFloat64() * 1e9)
rows = append(rows, r) rows = append(rows, r)
} }
testBlocksStreamReader(t, rows, 5) testBlocksStreamReader(t, rows, 5)
} }
func TestBlockStreamReaderManyTSIDSingleRow(t *testing.T) { func TestBlockStreamReaderManyTSIDSingleRow(t *testing.T) {
rng := rand.New(rand.NewSource(1))
var rows []rawRow var rows []rawRow
var r rawRow var r rawRow
r.PrecisionBits = defaultPrecisionBits r.PrecisionBits = defaultPrecisionBits
for i := 0; i < 1000; i++ { for i := 0; i < 1000; i++ {
r.TSID.MetricID = uint64(i) r.TSID.MetricID = uint64(i)
r.Value = rand.Float64()*1e9 - 5e8 r.Value = rng.Float64()*1e9 - 5e8
r.Timestamp = int64(i * 1e9) r.Timestamp = int64(i * 1e9)
rows = append(rows, r) rows = append(rows, r)
} }
@ -54,28 +57,30 @@ func TestBlockStreamReaderManyTSIDSingleRow(t *testing.T) {
} }
func TestBlockStreamReaderManyTSIDManyRows(t *testing.T) { func TestBlockStreamReaderManyTSIDManyRows(t *testing.T) {
rng := rand.New(rand.NewSource(1))
var rows []rawRow var rows []rawRow
var r rawRow var r rawRow
r.PrecisionBits = defaultPrecisionBits r.PrecisionBits = defaultPrecisionBits
const blocks = 123 const blocks = 123
for i := 0; i < 3210; i++ { for i := 0; i < 3210; i++ {
r.TSID.MetricID = uint64((1e9 - i) % blocks) r.TSID.MetricID = uint64((1e9 - i) % blocks)
r.Value = rand.Float64() r.Value = rng.Float64()
r.Timestamp = int64(rand.Float64() * 1e9) r.Timestamp = int64(rng.Float64() * 1e9)
rows = append(rows, r) rows = append(rows, r)
} }
testBlocksStreamReader(t, rows, blocks) testBlocksStreamReader(t, rows, blocks)
} }
func TestBlockStreamReaderReadConcurrent(t *testing.T) { func TestBlockStreamReaderReadConcurrent(t *testing.T) {
rng := rand.New(rand.NewSource(1))
var rows []rawRow var rows []rawRow
var r rawRow var r rawRow
r.PrecisionBits = defaultPrecisionBits r.PrecisionBits = defaultPrecisionBits
const blocks = 123 const blocks = 123
for i := 0; i < 3210; i++ { for i := 0; i < 3210; i++ {
r.TSID.MetricID = uint64((1e9 - i) % blocks) r.TSID.MetricID = uint64((1e9 - i) % blocks)
r.Value = rand.Float64() r.Value = rng.Float64()
r.Timestamp = int64(rand.Float64() * 1e9) r.Timestamp = int64(rng.Float64() * 1e9)
rows = append(rows, r) rows = append(rows, r)
} }
var mp inmemoryPart var mp inmemoryPart

View file

@ -11,13 +11,14 @@ import (
) )
func TestBlockMarshalUnmarshalPortable(t *testing.T) { func TestBlockMarshalUnmarshalPortable(t *testing.T) {
rng := rand.New(rand.NewSource(1))
var b Block var b Block
for i := 0; i < 1000; i++ { for i := 0; i < 1000; i++ {
b.Reset() b.Reset()
rowsCount := rand.Intn(maxRowsPerBlock) + 1 rowsCount := rng.Intn(maxRowsPerBlock) + 1
b.timestamps = getRandTimestamps(rowsCount) b.timestamps = getRandTimestamps(rowsCount)
b.values = getRandValues(rowsCount) b.values = getRandValues(rowsCount)
b.bh.Scale = int16(rand.Intn(30) - 15) b.bh.Scale = int16(rng.Intn(30) - 15)
b.bh.PrecisionBits = uint8(64 - (i % 64)) b.bh.PrecisionBits = uint8(64 - (i % 64))
testBlockMarshalUnmarshalPortable(t, &b) testBlockMarshalUnmarshalPortable(t, &b)
} }
@ -129,19 +130,21 @@ func getValuesForPrecisionBits(values []int64, precisionBits uint8) []int64 {
} }
func getRandValues(rowsCount int) []int64 { func getRandValues(rowsCount int) []int64 {
rng := rand.New(rand.NewSource(1))
a := make([]int64, rowsCount) a := make([]int64, rowsCount)
for i := 0; i < rowsCount; i++ { for i := 0; i < rowsCount; i++ {
a[i] = int64(rand.Intn(1e5) - 0.5e5) a[i] = int64(rng.Intn(1e5) - 0.5e5)
} }
return a return a
} }
func getRandTimestamps(rowsCount int) []int64 { func getRandTimestamps(rowsCount int) []int64 {
rng := rand.New(rand.NewSource(1))
a := make([]int64, rowsCount) a := make([]int64, rowsCount)
ts := int64(rand.Intn(1e9)) ts := int64(rng.Intn(1e9))
for i := 0; i < rowsCount; i++ { for i := 0; i < rowsCount; i++ {
a[i] = ts a[i] = ts
ts += int64(rand.Intn(1e5)) ts += int64(rng.Intn(1e5))
} }
return a return a
} }

View file

@ -596,6 +596,7 @@ func TestIndexDB(t *testing.T) {
} }
func testIndexDBGetOrCreateTSIDByName(db *indexDB, metricGroups int) ([]MetricName, []TSID, error) { func testIndexDBGetOrCreateTSIDByName(db *indexDB, metricGroups int) ([]MetricName, []TSID, error) {
r := rand.New(rand.NewSource(1))
// Create tsids. // Create tsids.
var mns []MetricName var mns []MetricName
var tsids []TSID var tsids []TSID
@ -612,7 +613,7 @@ func testIndexDBGetOrCreateTSIDByName(db *indexDB, metricGroups int) ([]MetricNa
mn.MetricGroup = []byte(fmt.Sprintf("metricGroup.%d\x00\x01\x02", i%metricGroups)) mn.MetricGroup = []byte(fmt.Sprintf("metricGroup.%d\x00\x01\x02", i%metricGroups))
// Init other tags. // Init other tags.
tagsCount := rand.Intn(10) + 1 tagsCount := r.Intn(10) + 1
for j := 0; j < tagsCount; j++ { for j := 0; j < tagsCount; j++ {
key := fmt.Sprintf("key\x01\x02\x00_%d_%d", i, j) key := fmt.Sprintf("key\x01\x02\x00_%d_%d", i, j)
value := fmt.Sprintf("val\x01_%d\x00_%d\x02", i, j) value := fmt.Sprintf("val\x01_%d\x00_%d\x02", i, j)
@ -1455,6 +1456,7 @@ func TestMatchTagFilters(t *testing.T) {
} }
func TestIndexDBRepopulateAfterRotation(t *testing.T) { func TestIndexDBRepopulateAfterRotation(t *testing.T) {
r := rand.New(rand.NewSource(1))
path := "TestIndexRepopulateAfterRotation" path := "TestIndexRepopulateAfterRotation"
s, err := OpenStorage(path, msecsPerMonth, 1e5, 1e5) s, err := OpenStorage(path, msecsPerMonth, 1e5, 1e5)
if err != nil { if err != nil {
@ -1475,7 +1477,7 @@ func TestIndexDBRepopulateAfterRotation(t *testing.T) {
const metricRowsN = 1000 const metricRowsN = 1000
// use min-max timestamps of 1month range to create smaller number of partitions // use min-max timestamps of 1month range to create smaller number of partitions
timeMin, timeMax := time.Now().Add(-730*time.Hour), time.Now() timeMin, timeMax := time.Now().Add(-730*time.Hour), time.Now()
mrs := testGenerateMetricRows(metricRowsN, timeMin.UnixMilli(), timeMax.UnixMilli()) mrs := testGenerateMetricRows(r, metricRowsN, timeMin.UnixMilli(), timeMax.UnixMilli())
if err := s.AddRows(mrs, defaultPrecisionBits); err != nil { if err := s.AddRows(mrs, defaultPrecisionBits); err != nil {
t.Fatalf("unexpected error when adding mrs: %s", err) t.Fatalf("unexpected error when adding mrs: %s", err)
} }

View file

@ -19,6 +19,7 @@ func TestInmemoryPartInitFromRows(t *testing.T) {
}, },
}, 1) }, 1)
rng := rand.New(rand.NewSource(1))
var rows []rawRow var rows []rawRow
var r rawRow var r rawRow
@ -27,8 +28,8 @@ func TestInmemoryPartInitFromRows(t *testing.T) {
initTestTSID(&r.TSID) initTestTSID(&r.TSID)
r.PrecisionBits = defaultPrecisionBits r.PrecisionBits = defaultPrecisionBits
for i := uint64(0); i < 1e4; i++ { for i := uint64(0); i < 1e4; i++ {
r.Timestamp = int64(rand.NormFloat64() * 1e7) r.Timestamp = int64(rng.NormFloat64() * 1e7)
r.Value = rand.NormFloat64() * 100 r.Value = rng.NormFloat64() * 100
rows = append(rows, r) rows = append(rows, r)
} }
@ -39,8 +40,8 @@ func TestInmemoryPartInitFromRows(t *testing.T) {
for i := 0; i < 1e4; i++ { for i := 0; i < 1e4; i++ {
initTestTSID(&r.TSID) initTestTSID(&r.TSID)
r.TSID.MetricID = uint64(i) r.TSID.MetricID = uint64(i)
r.Timestamp = int64(rand.NormFloat64() * 1e7) r.Timestamp = int64(rng.NormFloat64() * 1e7)
r.Value = rand.NormFloat64() * 100 r.Value = rng.NormFloat64() * 100
r.PrecisionBits = uint8(i%64) + 1 r.PrecisionBits = uint8(i%64) + 1
rows = append(rows, r) rows = append(rows, r)

View file

@ -26,12 +26,13 @@ func benchmarkInmemoryPartInitFromRows(b *testing.B, rows []rawRow) {
// Each row belongs to an unique TSID // Each row belongs to an unique TSID
var benchRawRowsWorstCase = func() []rawRow { var benchRawRowsWorstCase = func() []rawRow {
rng := rand.New(rand.NewSource(1))
var rows []rawRow var rows []rawRow
var r rawRow var r rawRow
for i := 0; i < 1e5; i++ { for i := 0; i < 1e5; i++ {
r.TSID.MetricID = uint64(i) r.TSID.MetricID = uint64(i)
r.Timestamp = rand.Int63() r.Timestamp = rng.Int63()
r.Value = rand.NormFloat64() r.Value = rng.NormFloat64()
r.PrecisionBits = uint8(i%64) + 1 r.PrecisionBits = uint8(i%64) + 1
rows = append(rows, r) rows = append(rows, r)
} }

View file

@ -20,14 +20,15 @@ func TestMergeBlockStreamsOneStreamOneRow(t *testing.T) {
} }
func TestMergeBlockStreamsOneStreamOneBlockManyRows(t *testing.T) { func TestMergeBlockStreamsOneStreamOneBlockManyRows(t *testing.T) {
rng := rand.New(rand.NewSource(1))
var rows []rawRow var rows []rawRow
var r rawRow var r rawRow
r.PrecisionBits = 4 r.PrecisionBits = 4
minTimestamp := int64(1<<63 - 1) minTimestamp := int64(1<<63 - 1)
maxTimestamp := int64(-1 << 63) maxTimestamp := int64(-1 << 63)
for i := 0; i < maxRowsPerBlock; i++ { for i := 0; i < maxRowsPerBlock; i++ {
r.Timestamp = int64(rand.Intn(1e9)) r.Timestamp = int64(rng.Intn(1e9))
r.Value = rand.NormFloat64() * 2332 r.Value = rng.NormFloat64() * 2332
rows = append(rows, r) rows = append(rows, r)
if r.Timestamp < minTimestamp { if r.Timestamp < minTimestamp {
@ -43,6 +44,7 @@ func TestMergeBlockStreamsOneStreamOneBlockManyRows(t *testing.T) {
} }
func TestMergeBlockStreamsOneStreamManyBlocksOneRow(t *testing.T) { func TestMergeBlockStreamsOneStreamManyBlocksOneRow(t *testing.T) {
rng := rand.New(rand.NewSource(1))
var rows []rawRow var rows []rawRow
var r rawRow var r rawRow
r.PrecisionBits = 4 r.PrecisionBits = 4
@ -52,8 +54,8 @@ func TestMergeBlockStreamsOneStreamManyBlocksOneRow(t *testing.T) {
for i := 0; i < blocksCount; i++ { for i := 0; i < blocksCount; i++ {
initTestTSID(&r.TSID) initTestTSID(&r.TSID)
r.TSID.MetricID = uint64(i * 123) r.TSID.MetricID = uint64(i * 123)
r.Timestamp = int64(rand.Intn(1e9)) r.Timestamp = int64(rng.Intn(1e9))
r.Value = rand.NormFloat64() * 2332 r.Value = rng.NormFloat64() * 2332
rows = append(rows, r) rows = append(rows, r)
if r.Timestamp < minTimestamp { if r.Timestamp < minTimestamp {
@ -69,6 +71,7 @@ func TestMergeBlockStreamsOneStreamManyBlocksOneRow(t *testing.T) {
} }
func TestMergeBlockStreamsOneStreamManyBlocksManyRows(t *testing.T) { func TestMergeBlockStreamsOneStreamManyBlocksManyRows(t *testing.T) {
rng := rand.New(rand.NewSource(1))
var rows []rawRow var rows []rawRow
var r rawRow var r rawRow
initTestTSID(&r.TSID) initTestTSID(&r.TSID)
@ -79,8 +82,8 @@ func TestMergeBlockStreamsOneStreamManyBlocksManyRows(t *testing.T) {
maxTimestamp := int64(-1 << 63) maxTimestamp := int64(-1 << 63)
for i := 0; i < rowsCount; i++ { for i := 0; i < rowsCount; i++ {
r.TSID.MetricID = uint64(i % blocksCount) r.TSID.MetricID = uint64(i % blocksCount)
r.Timestamp = int64(rand.Intn(1e9)) r.Timestamp = int64(rng.Intn(1e9))
r.Value = rand.NormFloat64() * 2332 r.Value = rng.NormFloat64() * 2332
rows = append(rows, r) rows = append(rows, r)
if r.Timestamp < minTimestamp { if r.Timestamp < minTimestamp {
@ -169,6 +172,7 @@ func TestMergeBlockStreamsTwoStreamsManyBlocksManyRows(t *testing.T) {
minTimestamp := int64(1<<63 - 1) minTimestamp := int64(1<<63 - 1)
maxTimestamp := int64(-1 << 63) maxTimestamp := int64(-1 << 63)
rng := rand.New(rand.NewSource(1))
var rows []rawRow var rows []rawRow
var r rawRow var r rawRow
initTestTSID(&r.TSID) initTestTSID(&r.TSID)
@ -176,8 +180,8 @@ func TestMergeBlockStreamsTwoStreamsManyBlocksManyRows(t *testing.T) {
const rowsCount1 = 4938 const rowsCount1 = 4938
for i := 0; i < rowsCount1; i++ { for i := 0; i < rowsCount1; i++ {
r.TSID.MetricID = uint64(i % blocksCount) r.TSID.MetricID = uint64(i % blocksCount)
r.Timestamp = int64(rand.Intn(1e9)) r.Timestamp = int64(rng.Intn(1e9))
r.Value = rand.NormFloat64() * 2332 r.Value = rng.NormFloat64() * 2332
rows = append(rows, r) rows = append(rows, r)
if r.Timestamp < minTimestamp { if r.Timestamp < minTimestamp {
@ -193,8 +197,8 @@ func TestMergeBlockStreamsTwoStreamsManyBlocksManyRows(t *testing.T) {
const rowsCount2 = 3281 const rowsCount2 = 3281
for i := 0; i < rowsCount2; i++ { for i := 0; i < rowsCount2; i++ {
r.TSID.MetricID = uint64((i + 17) % blocksCount) r.TSID.MetricID = uint64((i + 17) % blocksCount)
r.Timestamp = int64(rand.Intn(1e9)) r.Timestamp = int64(rng.Intn(1e9))
r.Value = rand.NormFloat64() * 2332 r.Value = rng.NormFloat64() * 2332
rows = append(rows, r) rows = append(rows, r)
if r.Timestamp < minTimestamp { if r.Timestamp < minTimestamp {
@ -214,13 +218,14 @@ func TestMergeBlockStreamsTwoStreamsBigOverlappingBlocks(t *testing.T) {
minTimestamp := int64(1<<63 - 1) minTimestamp := int64(1<<63 - 1)
maxTimestamp := int64(-1 << 63) maxTimestamp := int64(-1 << 63)
rng := rand.New(rand.NewSource(1))
var rows []rawRow var rows []rawRow
var r rawRow var r rawRow
r.PrecisionBits = 5 r.PrecisionBits = 5
const rowsCount1 = maxRowsPerBlock + 234 const rowsCount1 = maxRowsPerBlock + 234
for i := 0; i < rowsCount1; i++ { for i := 0; i < rowsCount1; i++ {
r.Timestamp = int64(i * 2894) r.Timestamp = int64(i * 2894)
r.Value = float64(int(rand.NormFloat64() * 1e2)) r.Value = float64(int(rng.NormFloat64() * 1e2))
rows = append(rows, r) rows = append(rows, r)
if r.Timestamp < minTimestamp { if r.Timestamp < minTimestamp {
@ -236,7 +241,7 @@ func TestMergeBlockStreamsTwoStreamsBigOverlappingBlocks(t *testing.T) {
const rowsCount2 = maxRowsPerBlock + 2344 const rowsCount2 = maxRowsPerBlock + 2344
for i := 0; i < rowsCount2; i++ { for i := 0; i < rowsCount2; i++ {
r.Timestamp = int64(i * 2494) r.Timestamp = int64(i * 2494)
r.Value = float64(int(rand.NormFloat64() * 1e2)) r.Value = float64(int(rng.NormFloat64() * 1e2))
rows = append(rows, r) rows = append(rows, r)
if r.Timestamp < minTimestamp { if r.Timestamp < minTimestamp {
@ -256,13 +261,14 @@ func TestMergeBlockStreamsTwoStreamsBigSequentialBlocks(t *testing.T) {
minTimestamp := int64(1<<63 - 1) minTimestamp := int64(1<<63 - 1)
maxTimestamp := int64(-1 << 63) maxTimestamp := int64(-1 << 63)
rng := rand.New(rand.NewSource(1))
var rows []rawRow var rows []rawRow
var r rawRow var r rawRow
r.PrecisionBits = 5 r.PrecisionBits = 5
const rowsCount1 = maxRowsPerBlock + 234 const rowsCount1 = maxRowsPerBlock + 234
for i := 0; i < rowsCount1; i++ { for i := 0; i < rowsCount1; i++ {
r.Timestamp = int64(i * 2894) r.Timestamp = int64(i * 2894)
r.Value = float64(int(rand.NormFloat64() * 1e2)) r.Value = float64(int(rng.NormFloat64() * 1e2))
rows = append(rows, r) rows = append(rows, r)
if r.Timestamp < minTimestamp { if r.Timestamp < minTimestamp {
@ -279,7 +285,7 @@ func TestMergeBlockStreamsTwoStreamsBigSequentialBlocks(t *testing.T) {
const rowsCount2 = maxRowsPerBlock - 233 const rowsCount2 = maxRowsPerBlock - 233
for i := 0; i < rowsCount2; i++ { for i := 0; i < rowsCount2; i++ {
r.Timestamp = maxTimestampB1 + int64(i*2494) r.Timestamp = maxTimestampB1 + int64(i*2494)
r.Value = float64(int(rand.NormFloat64() * 1e2)) r.Value = float64(int(rng.NormFloat64() * 1e2))
rows = append(rows, r) rows = append(rows, r)
if r.Timestamp < minTimestamp { if r.Timestamp < minTimestamp {
@ -303,16 +309,17 @@ func TestMergeBlockStreamsManyStreamsManyBlocksManyRows(t *testing.T) {
initTestTSID(&r.TSID) initTestTSID(&r.TSID)
r.PrecisionBits = defaultPrecisionBits r.PrecisionBits = defaultPrecisionBits
rng := rand.New(rand.NewSource(1))
rowsCount := 0 rowsCount := 0
const blocksCount = 113 const blocksCount = 113
var bsrs []*blockStreamReader var bsrs []*blockStreamReader
for i := 0; i < 20; i++ { for i := 0; i < 20; i++ {
rowsPerStream := rand.Intn(500) rowsPerStream := rng.Intn(500)
var rows []rawRow var rows []rawRow
for j := 0; j < rowsPerStream; j++ { for j := 0; j < rowsPerStream; j++ {
r.TSID.MetricID = uint64(j % blocksCount) r.TSID.MetricID = uint64(j % blocksCount)
r.Timestamp = int64(rand.Intn(1e9)) r.Timestamp = int64(rng.Intn(1e9))
r.Value = rand.NormFloat64() r.Value = rng.NormFloat64()
rows = append(rows, r) rows = append(rows, r)
if r.Timestamp < minTimestamp { if r.Timestamp < minTimestamp {
@ -337,15 +344,16 @@ func TestMergeForciblyStop(t *testing.T) {
initTestTSID(&r.TSID) initTestTSID(&r.TSID)
r.PrecisionBits = defaultPrecisionBits r.PrecisionBits = defaultPrecisionBits
rng := rand.New(rand.NewSource(1))
const blocksCount = 113 const blocksCount = 113
var bsrs []*blockStreamReader var bsrs []*blockStreamReader
for i := 0; i < 20; i++ { for i := 0; i < 20; i++ {
rowsPerStream := rand.Intn(1000) rowsPerStream := rng.Intn(1000)
var rows []rawRow var rows []rawRow
for j := 0; j < rowsPerStream; j++ { for j := 0; j < rowsPerStream; j++ {
r.TSID.MetricID = uint64(j % blocksCount) r.TSID.MetricID = uint64(j % blocksCount)
r.Timestamp = int64(rand.Intn(1e9)) r.Timestamp = int64(rng.Intn(1e9))
r.Value = rand.NormFloat64() r.Value = rng.NormFloat64()
rows = append(rows, r) rows = append(rows, r)
if r.Timestamp < minTimestamp { if r.Timestamp < minTimestamp {

View file

@ -50,12 +50,13 @@ func benchmarkMergeBlockStreams(b *testing.B, mps []*inmemoryPart, rowsPerLoop i
} }
var benchTwoSourcesWorstCaseMPS = func() []*inmemoryPart { var benchTwoSourcesWorstCaseMPS = func() []*inmemoryPart {
rng := rand.New(rand.NewSource(1))
var rows []rawRow var rows []rawRow
var r rawRow var r rawRow
r.PrecisionBits = defaultPrecisionBits r.PrecisionBits = defaultPrecisionBits
for i := 0; i < maxRowsPerBlock/2-1; i++ { for i := 0; i < maxRowsPerBlock/2-1; i++ {
r.Value = rand.NormFloat64() r.Value = rng.NormFloat64()
r.Timestamp = rand.Int63n(1e12) r.Timestamp = rng.Int63n(1e12)
rows = append(rows, r) rows = append(rows, r)
} }
mp := newTestInmemoryPart(rows) mp := newTestInmemoryPart(rows)
@ -83,12 +84,13 @@ var benchTwoSourcesBestCaseMPS = func() []*inmemoryPart {
const benchTwoSourcesBestCaseMPSRowsPerLoop = 2 * maxRowsPerBlock const benchTwoSourcesBestCaseMPSRowsPerLoop = 2 * maxRowsPerBlock
var benchFourSourcesWorstCaseMPS = func() []*inmemoryPart { var benchFourSourcesWorstCaseMPS = func() []*inmemoryPart {
rng := rand.New(rand.NewSource(1))
var rows []rawRow var rows []rawRow
var r rawRow var r rawRow
r.PrecisionBits = defaultPrecisionBits r.PrecisionBits = defaultPrecisionBits
for i := 0; i < maxRowsPerBlock/2-1; i++ { for i := 0; i < maxRowsPerBlock/2-1; i++ {
r.Value = rand.NormFloat64() r.Value = rng.NormFloat64()
r.Timestamp = rand.Int63n(1e12) r.Timestamp = rng.Int63n(1e12)
rows = append(rows, r) rows = append(rows, r)
} }
mp := newTestInmemoryPart(rows) mp := newTestInmemoryPart(rows)

View file

@ -1155,13 +1155,14 @@ func TestPartSearchMultiRowsOneTSID(t *testing.T) {
func testPartSearchMultiRowsOneTSID(t *testing.T, rowsCount int) { func testPartSearchMultiRowsOneTSID(t *testing.T, rowsCount int) {
t.Helper() t.Helper()
rng := rand.New(rand.NewSource(1))
var rows []rawRow var rows []rawRow
var r rawRow var r rawRow
r.PrecisionBits = 24 r.PrecisionBits = 24
r.TSID.MetricID = 1111 r.TSID.MetricID = 1111
for i := 0; i < rowsCount; i++ { for i := 0; i < rowsCount; i++ {
r.Timestamp = int64(rand.NormFloat64() * 1e6) r.Timestamp = int64(rng.NormFloat64() * 1e6)
r.Value = float64(int(rand.NormFloat64() * 1e5)) r.Value = float64(int(rng.NormFloat64() * 1e5))
rows = append(rows, r) rows = append(rows, r)
} }
@ -1191,20 +1192,21 @@ func TestPartSearchMultiRowsMultiTSIDs(t *testing.T) {
func testPartSearchMultiRowsMultiTSIDs(t *testing.T, rowsCount, tsidsCount int) { func testPartSearchMultiRowsMultiTSIDs(t *testing.T, rowsCount, tsidsCount int) {
t.Helper() t.Helper()
rng := rand.New(rand.NewSource(2))
var rows []rawRow var rows []rawRow
var r rawRow var r rawRow
r.PrecisionBits = 24 r.PrecisionBits = 24
for i := 0; i < rowsCount; i++ { for i := 0; i < rowsCount; i++ {
r.TSID.MetricID = uint64(rand.Intn(tsidsCount)) r.TSID.MetricID = uint64(rng.Intn(tsidsCount))
r.Timestamp = int64(rand.NormFloat64() * 1e6) r.Timestamp = int64(rng.NormFloat64() * 1e6)
r.Value = float64(int(rand.NormFloat64() * 1e5)) r.Value = float64(int(rng.NormFloat64() * 1e5))
rows = append(rows, r) rows = append(rows, r)
} }
var tsids []TSID var tsids []TSID
var tsid TSID var tsid TSID
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
tsid.MetricID = uint64(rand.Intn(tsidsCount * 3)) tsid.MetricID = uint64(rng.Intn(tsidsCount * 3))
tsids = append(tsids, tsid) tsids = append(tsids, tsid)
} }
sort.Slice(tsids, func(i, j int) bool { return tsids[i].Less(&tsids[j]) }) sort.Slice(tsids, func(i, j int) bool { return tsids[i].Less(&tsids[j]) })

View file

@ -114,11 +114,13 @@ func TestPartitionSearch(t *testing.T) {
func testPartitionSearchEx(t *testing.T, ptt int64, tr TimeRange, partsCount, maxRowsPerPart, tsidsCount int) { func testPartitionSearchEx(t *testing.T, ptt int64, tr TimeRange, partsCount, maxRowsPerPart, tsidsCount int) {
t.Helper() t.Helper()
rng := rand.New(rand.NewSource(1))
// Generate tsids to search. // Generate tsids to search.
var tsids []TSID var tsids []TSID
var tsid TSID var tsid TSID
for i := 0; i < 25; i++ { for i := 0; i < 25; i++ {
tsid.MetricID = uint64(rand.Intn(tsidsCount * 2)) tsid.MetricID = uint64(rng.Intn(tsidsCount * 2))
tsids = append(tsids, tsid) tsids = append(tsids, tsid)
} }
sort.Slice(tsids, func(i, j int) bool { return tsids[i].Less(&tsids[j]) }) sort.Slice(tsids, func(i, j int) bool { return tsids[i].Less(&tsids[j]) })
@ -135,13 +137,13 @@ func testPartitionSearchEx(t *testing.T, ptt int64, tr TimeRange, partsCount, ma
var r rawRow var r rawRow
r.PrecisionBits = 30 r.PrecisionBits = 30
timestamp := ptr.MinTimestamp timestamp := ptr.MinTimestamp
rowsCount := 1 + rand.Intn(maxRowsPerPart) rowsCount := 1 + rng.Intn(maxRowsPerPart)
for j := 0; j < rowsCount; j++ { for j := 0; j < rowsCount; j++ {
r.TSID.MetricID = uint64(rand.Intn(tsidsCount)) r.TSID.MetricID = uint64(rng.Intn(tsidsCount))
r.Timestamp = timestamp r.Timestamp = timestamp
r.Value = float64(int(rand.NormFloat64() * 1e5)) r.Value = float64(int(rng.NormFloat64() * 1e5))
timestamp += int64(rand.Intn(1e4)) timestamp += int64(rng.Intn(1e4))
if timestamp > ptr.MaxTimestamp { if timestamp > ptr.MaxTimestamp {
break break
} }

View file

@ -374,10 +374,10 @@ func TestUpdateCurrHourMetricIDs(t *testing.T) {
func TestMetricRowMarshalUnmarshal(t *testing.T) { func TestMetricRowMarshalUnmarshal(t *testing.T) {
var buf []byte var buf []byte
typ := reflect.TypeOf(&MetricRow{}) typ := reflect.TypeOf(&MetricRow{})
rnd := rand.New(rand.NewSource(1)) rng := rand.New(rand.NewSource(1))
for i := 0; i < 1000; i++ { for i := 0; i < 1000; i++ {
v, ok := quick.Value(typ, rnd) v, ok := quick.Value(typ, rng)
if !ok { if !ok {
t.Fatalf("cannot create random MetricRow via quick.Value") t.Fatalf("cannot create random MetricRow via quick.Value")
} }
@ -504,7 +504,7 @@ func testStorageRandTimestamps(s *Storage) error {
currentTime := timestampFromTime(time.Now()) currentTime := timestampFromTime(time.Now())
const rowsPerAdd = 5e3 const rowsPerAdd = 5e3
const addsCount = 3 const addsCount = 3
rnd := rand.New(rand.NewSource(1)) rng := rand.New(rand.NewSource(1))
for i := 0; i < addsCount; i++ { for i := 0; i < addsCount; i++ {
var mrs []MetricRow var mrs []MetricRow
@ -514,10 +514,10 @@ func testStorageRandTimestamps(s *Storage) error {
{[]byte("instance"), []byte("1.2.3.4")}, {[]byte("instance"), []byte("1.2.3.4")},
} }
for j := 0; j < rowsPerAdd; j++ { for j := 0; j < rowsPerAdd; j++ {
mn.MetricGroup = []byte(fmt.Sprintf("metric_%d", rand.Intn(100))) mn.MetricGroup = []byte(fmt.Sprintf("metric_%d", rng.Intn(100)))
metricNameRaw := mn.marshalRaw(nil) metricNameRaw := mn.marshalRaw(nil)
timestamp := currentTime - int64((rnd.Float64()-0.2)*float64(2*s.retentionMsecs)) timestamp := currentTime - int64((rng.Float64()-0.2)*float64(2*s.retentionMsecs))
value := rnd.NormFloat64() * 1e11 value := rng.NormFloat64() * 1e11
mr := MetricRow{ mr := MetricRow{
MetricNameRaw: metricNameRaw, MetricNameRaw: metricNameRaw,
@ -618,6 +618,7 @@ func TestStorageDeleteSeries(t *testing.T) {
} }
func testStorageDeleteSeries(s *Storage, workerNum int) error { func testStorageDeleteSeries(s *Storage, workerNum int) error {
rng := rand.New(rand.NewSource(1))
const rowsPerMetric = 100 const rowsPerMetric = 100
const metricsCount = 30 const metricsCount = 30
@ -642,8 +643,8 @@ func testStorageDeleteSeries(s *Storage, workerNum int) error {
metricNameRaw := mn.marshalRaw(nil) metricNameRaw := mn.marshalRaw(nil)
for j := 0; j < rowsPerMetric; j++ { for j := 0; j < rowsPerMetric; j++ {
timestamp := rand.Int63n(1e10) timestamp := rng.Int63n(1e10)
value := rand.NormFloat64() * 1e6 value := rng.NormFloat64() * 1e6
mr := MetricRow{ mr := MetricRow{
MetricNameRaw: metricNameRaw, MetricNameRaw: metricNameRaw,
@ -929,13 +930,14 @@ func testStorageRegisterMetricNames(s *Storage) error {
} }
func TestStorageAddRowsSerial(t *testing.T) { func TestStorageAddRowsSerial(t *testing.T) {
rng := rand.New(rand.NewSource(1))
path := "TestStorageAddRowsSerial" path := "TestStorageAddRowsSerial"
retentionMsecs := int64(msecsPerMonth * 10) retentionMsecs := int64(msecsPerMonth * 10)
s, err := OpenStorage(path, retentionMsecs, 1e5, 1e5) s, err := OpenStorage(path, retentionMsecs, 1e5, 1e5)
if err != nil { if err != nil {
t.Fatalf("cannot open storage: %s", err) t.Fatalf("cannot open storage: %s", err)
} }
if err := testStorageAddRows(s); err != nil { if err := testStorageAddRows(rng, s); err != nil {
t.Fatalf("unexpected error: %s", err) t.Fatalf("unexpected error: %s", err)
} }
s.MustClose() s.MustClose()
@ -953,9 +955,10 @@ func TestStorageAddRowsConcurrent(t *testing.T) {
} }
ch := make(chan error, 3) ch := make(chan error, 3)
for i := 0; i < cap(ch); i++ { for i := 0; i < cap(ch); i++ {
go func() { go func(n int) {
ch <- testStorageAddRows(s) rLocal := rand.New(rand.NewSource(int64(n)))
}() ch <- testStorageAddRows(rLocal, s)
}(i)
} }
for i := 0; i < cap(ch); i++ { for i := 0; i < cap(ch); i++ {
select { select {
@ -973,7 +976,7 @@ func TestStorageAddRowsConcurrent(t *testing.T) {
} }
} }
func testGenerateMetricRows(rows uint64, timestampMin, timestampMax int64) []MetricRow { func testGenerateMetricRows(rng *rand.Rand, rows uint64, timestampMin, timestampMax int64) []MetricRow {
var mrs []MetricRow var mrs []MetricRow
var mn MetricName var mn MetricName
mn.Tags = []Tag{ mn.Tags = []Tag{
@ -983,8 +986,8 @@ func testGenerateMetricRows(rows uint64, timestampMin, timestampMax int64) []Met
for i := 0; i < int(rows); i++ { for i := 0; i < int(rows); i++ {
mn.MetricGroup = []byte(fmt.Sprintf("metric_%d", i)) mn.MetricGroup = []byte(fmt.Sprintf("metric_%d", i))
metricNameRaw := mn.marshalRaw(nil) metricNameRaw := mn.marshalRaw(nil)
timestamp := rand.Int63n(timestampMax-timestampMin) + timestampMin timestamp := rng.Int63n(timestampMax-timestampMin) + timestampMin
value := rand.NormFloat64() * 1e6 value := rng.NormFloat64() * 1e6
mr := MetricRow{ mr := MetricRow{
MetricNameRaw: metricNameRaw, MetricNameRaw: metricNameRaw,
@ -996,14 +999,14 @@ func testGenerateMetricRows(rows uint64, timestampMin, timestampMax int64) []Met
return mrs return mrs
} }
func testStorageAddRows(s *Storage) error { func testStorageAddRows(rng *rand.Rand, s *Storage) error {
const rowsPerAdd = 1e3 const rowsPerAdd = 1e3
const addsCount = 10 const addsCount = 10
maxTimestamp := timestampFromTime(time.Now()) maxTimestamp := timestampFromTime(time.Now())
minTimestamp := maxTimestamp - s.retentionMsecs minTimestamp := maxTimestamp - s.retentionMsecs
for i := 0; i < addsCount; i++ { for i := 0; i < addsCount; i++ {
mrs := testGenerateMetricRows(rowsPerAdd, minTimestamp, maxTimestamp) mrs := testGenerateMetricRows(rng, rowsPerAdd, minTimestamp, maxTimestamp)
if err := s.AddRows(mrs, defaultPrecisionBits); err != nil { if err := s.AddRows(mrs, defaultPrecisionBits); err != nil {
return fmt.Errorf("unexpected error when adding mrs: %w", err) return fmt.Errorf("unexpected error when adding mrs: %w", err)
} }
@ -1130,6 +1133,7 @@ func TestStorageRotateIndexDB(t *testing.T) {
} }
func testStorageAddMetrics(s *Storage, workerNum int) error { func testStorageAddMetrics(s *Storage, workerNum int) error {
rng := rand.New(rand.NewSource(1))
const rowsCount = 1e3 const rowsCount = 1e3
var mn MetricName var mn MetricName
@ -1138,10 +1142,10 @@ func testStorageAddMetrics(s *Storage, workerNum int) error {
{[]byte("instance"), []byte("1.2.3.4")}, {[]byte("instance"), []byte("1.2.3.4")},
} }
for i := 0; i < rowsCount; i++ { for i := 0; i < rowsCount; i++ {
mn.MetricGroup = []byte(fmt.Sprintf("metric_%d_%d", workerNum, rand.Intn(10))) mn.MetricGroup = []byte(fmt.Sprintf("metric_%d_%d", workerNum, rng.Intn(10)))
metricNameRaw := mn.marshalRaw(nil) metricNameRaw := mn.marshalRaw(nil)
timestamp := rand.Int63n(1e10) timestamp := rng.Int63n(1e10)
value := rand.NormFloat64() * 1e6 value := rng.NormFloat64() * 1e6
mr := MetricRow{ mr := MetricRow{
MetricNameRaw: metricNameRaw, MetricNameRaw: metricNameRaw,
@ -1164,6 +1168,7 @@ func testStorageAddMetrics(s *Storage, workerNum int) error {
} }
func TestStorageDeleteStaleSnapshots(t *testing.T) { func TestStorageDeleteStaleSnapshots(t *testing.T) {
rng := rand.New(rand.NewSource(1))
path := "TestStorageDeleteStaleSnapshots" path := "TestStorageDeleteStaleSnapshots"
retentionMsecs := int64(msecsPerMonth * 10) retentionMsecs := int64(msecsPerMonth * 10)
s, err := OpenStorage(path, retentionMsecs, 1e5, 1e5) s, err := OpenStorage(path, retentionMsecs, 1e5, 1e5)
@ -1175,7 +1180,7 @@ func TestStorageDeleteStaleSnapshots(t *testing.T) {
maxTimestamp := timestampFromTime(time.Now()) maxTimestamp := timestampFromTime(time.Now())
minTimestamp := maxTimestamp - s.retentionMsecs minTimestamp := maxTimestamp - s.retentionMsecs
for i := 0; i < addsCount; i++ { for i := 0; i < addsCount; i++ {
mrs := testGenerateMetricRows(rowsPerAdd, minTimestamp, maxTimestamp) mrs := testGenerateMetricRows(rng, rowsPerAdd, minTimestamp, maxTimestamp)
if err := s.AddRows(mrs, defaultPrecisionBits); err != nil { if err := s.AddRows(mrs, defaultPrecisionBits); err != nil {
t.Fatalf("unexpected error when adding mrs: %s", err) t.Fatalf("unexpected error when adding mrs: %s", err)
} }

View file

@ -10,6 +10,8 @@ import (
) )
func TestTableSearch(t *testing.T) { func TestTableSearch(t *testing.T) {
rng := rand.New(rand.NewSource(1))
var trData TimeRange var trData TimeRange
trData.fromPartitionTime(time.Now()) trData.fromPartitionTime(time.Now())
trData.MinTimestamp -= 5 * 365 * 24 * 3600 * 1000 trData.MinTimestamp -= 5 * 365 * 24 * 3600 * 1000
@ -19,7 +21,7 @@ func TestTableSearch(t *testing.T) {
MinTimestamp: trData.MinTimestamp + 4e3, MinTimestamp: trData.MinTimestamp + 4e3,
MaxTimestamp: trData.MaxTimestamp - 4e3, MaxTimestamp: trData.MaxTimestamp - 4e3,
} }
testTableSearchEx(t, trData, trSearch, 1, 10, 1000, 10) testTableSearchEx(t, rng, trData, trSearch, 1, 10, 1000, 10)
}) })
t.Run("SinglePartPerPartition", func(t *testing.T) { t.Run("SinglePartPerPartition", func(t *testing.T) {
@ -27,7 +29,7 @@ func TestTableSearch(t *testing.T) {
MinTimestamp: trData.MinTimestamp + 4e3, MinTimestamp: trData.MinTimestamp + 4e3,
MaxTimestamp: trData.MaxTimestamp - 4e3, MaxTimestamp: trData.MaxTimestamp - 4e3,
} }
testTableSearchEx(t, trData, trSearch, 12, 1, 1000, 10) testTableSearchEx(t, rng, trData, trSearch, 12, 1, 1000, 10)
}) })
t.Run("SingleRowPerPartition", func(t *testing.T) { t.Run("SingleRowPerPartition", func(t *testing.T) {
@ -35,7 +37,7 @@ func TestTableSearch(t *testing.T) {
MinTimestamp: trData.MinTimestamp + 4e3, MinTimestamp: trData.MinTimestamp + 4e3,
MaxTimestamp: trData.MaxTimestamp - 4e3, MaxTimestamp: trData.MaxTimestamp - 4e3,
} }
testTableSearchEx(t, trData, trSearch, 12, 20, 1, 10) testTableSearchEx(t, rng, trData, trSearch, 12, 20, 1, 10)
}) })
t.Run("SingleTSID", func(t *testing.T) { t.Run("SingleTSID", func(t *testing.T) {
@ -43,7 +45,7 @@ func TestTableSearch(t *testing.T) {
MinTimestamp: trData.MinTimestamp + 4e3, MinTimestamp: trData.MinTimestamp + 4e3,
MaxTimestamp: trData.MaxTimestamp - 4e3, MaxTimestamp: trData.MaxTimestamp - 4e3,
} }
testTableSearchEx(t, trData, trSearch, 12, 5, 1000, 1) testTableSearchEx(t, rng, trData, trSearch, 12, 5, 1000, 1)
}) })
t.Run("ManyPartitions", func(t *testing.T) { t.Run("ManyPartitions", func(t *testing.T) {
@ -51,7 +53,7 @@ func TestTableSearch(t *testing.T) {
MinTimestamp: trData.MinTimestamp + 4e3, MinTimestamp: trData.MinTimestamp + 4e3,
MaxTimestamp: trData.MaxTimestamp - 4e3, MaxTimestamp: trData.MaxTimestamp - 4e3,
} }
testTableSearchEx(t, trData, trSearch, 20, 10, 30, 20) testTableSearchEx(t, rng, trData, trSearch, 20, 10, 30, 20)
}) })
t.Run("ManyTSIDs", func(t *testing.T) { t.Run("ManyTSIDs", func(t *testing.T) {
@ -59,7 +61,7 @@ func TestTableSearch(t *testing.T) {
MinTimestamp: trData.MinTimestamp + 4e3, MinTimestamp: trData.MinTimestamp + 4e3,
MaxTimestamp: trData.MaxTimestamp - 4e3, MaxTimestamp: trData.MaxTimestamp - 4e3,
} }
testTableSearchEx(t, trData, trSearch, 2, 5, 5000, 1000) testTableSearchEx(t, rng, trData, trSearch, 2, 5, 5000, 1000)
}) })
t.Run("ExactTimeRange", func(t *testing.T) { t.Run("ExactTimeRange", func(t *testing.T) {
@ -67,7 +69,7 @@ func TestTableSearch(t *testing.T) {
MinTimestamp: trData.MinTimestamp, MinTimestamp: trData.MinTimestamp,
MaxTimestamp: trData.MaxTimestamp, MaxTimestamp: trData.MaxTimestamp,
} }
testTableSearchEx(t, trData, trSearch, 2, 5, 1000, 10) testTableSearchEx(t, rng, trData, trSearch, 2, 5, 1000, 10)
}) })
t.Run("InnerTimeRange", func(t *testing.T) { t.Run("InnerTimeRange", func(t *testing.T) {
@ -75,7 +77,7 @@ func TestTableSearch(t *testing.T) {
MinTimestamp: trData.MinTimestamp + 4e3, MinTimestamp: trData.MinTimestamp + 4e3,
MaxTimestamp: trData.MaxTimestamp - 4e3, MaxTimestamp: trData.MaxTimestamp - 4e3,
} }
testTableSearchEx(t, trData, trSearch, 2, 5, 1000, 10) testTableSearchEx(t, rng, trData, trSearch, 2, 5, 1000, 10)
}) })
t.Run("OuterTimeRange", func(t *testing.T) { t.Run("OuterTimeRange", func(t *testing.T) {
@ -83,7 +85,7 @@ func TestTableSearch(t *testing.T) {
MinTimestamp: trData.MinTimestamp - 1e6, MinTimestamp: trData.MinTimestamp - 1e6,
MaxTimestamp: trData.MaxTimestamp + 1e6, MaxTimestamp: trData.MaxTimestamp + 1e6,
} }
testTableSearchEx(t, trData, trSearch, 2, 5, 1000, 10) testTableSearchEx(t, rng, trData, trSearch, 2, 5, 1000, 10)
}) })
t.Run("LowTimeRange", func(t *testing.T) { t.Run("LowTimeRange", func(t *testing.T) {
@ -91,7 +93,7 @@ func TestTableSearch(t *testing.T) {
MinTimestamp: trData.MinTimestamp - 2e6, MinTimestamp: trData.MinTimestamp - 2e6,
MaxTimestamp: trData.MinTimestamp - 1e6, MaxTimestamp: trData.MinTimestamp - 1e6,
} }
testTableSearchEx(t, trData, trSearch, 2, 5, 1000, 10) testTableSearchEx(t, rng, trData, trSearch, 2, 5, 1000, 10)
}) })
t.Run("HighTimeRange", func(t *testing.T) { t.Run("HighTimeRange", func(t *testing.T) {
@ -99,7 +101,7 @@ func TestTableSearch(t *testing.T) {
MinTimestamp: trData.MaxTimestamp + 1e6, MinTimestamp: trData.MaxTimestamp + 1e6,
MaxTimestamp: trData.MaxTimestamp + 2e6, MaxTimestamp: trData.MaxTimestamp + 2e6,
} }
testTableSearchEx(t, trData, trSearch, 2, 5, 1000, 10) testTableSearchEx(t, rng, trData, trSearch, 2, 5, 1000, 10)
}) })
t.Run("LowerEndTimeRange", func(t *testing.T) { t.Run("LowerEndTimeRange", func(t *testing.T) {
@ -107,7 +109,7 @@ func TestTableSearch(t *testing.T) {
MinTimestamp: trData.MinTimestamp - 1e6, MinTimestamp: trData.MinTimestamp - 1e6,
MaxTimestamp: trData.MaxTimestamp - 4e3, MaxTimestamp: trData.MaxTimestamp - 4e3,
} }
testTableSearchEx(t, trData, trSearch, 2, 5, 1000, 10) testTableSearchEx(t, rng, trData, trSearch, 2, 5, 1000, 10)
}) })
t.Run("HigherEndTimeRange", func(t *testing.T) { t.Run("HigherEndTimeRange", func(t *testing.T) {
@ -115,18 +117,18 @@ func TestTableSearch(t *testing.T) {
MinTimestamp: trData.MinTimestamp + 4e3, MinTimestamp: trData.MinTimestamp + 4e3,
MaxTimestamp: trData.MaxTimestamp + 1e6, MaxTimestamp: trData.MaxTimestamp + 1e6,
} }
testTableSearchEx(t, trData, trSearch, 2, 5, 1000, 10) testTableSearchEx(t, rng, trData, trSearch, 2, 5, 1000, 10)
}) })
} }
func testTableSearchEx(t *testing.T, trData, trSearch TimeRange, partitionsCount, maxPartsPerPartition, maxRowsPerPart, tsidsCount int) { func testTableSearchEx(t *testing.T, rng *rand.Rand, trData, trSearch TimeRange, partitionsCount, maxPartsPerPartition, maxRowsPerPart, tsidsCount int) {
t.Helper() t.Helper()
// Generate tsids to search. // Generate tsids to search.
var tsids []TSID var tsids []TSID
var tsid TSID var tsid TSID
for i := 0; i < 25; i++ { for i := 0; i < 25; i++ {
tsid.MetricID = uint64(rand.Intn(tsidsCount * 2)) tsid.MetricID = uint64(rng.Intn(tsidsCount * 2))
tsids = append(tsids, tsid) tsids = append(tsids, tsid)
} }
sort.Slice(tsids, func(i, j int) bool { return tsids[i].Less(&tsids[j]) }) sort.Slice(tsids, func(i, j int) bool { return tsids[i].Less(&tsids[j]) })
@ -142,17 +144,17 @@ func testTableSearchEx(t *testing.T, trData, trSearch TimeRange, partitionsCount
ptr.fromPartitionTimestamp(trData.MinTimestamp) ptr.fromPartitionTimestamp(trData.MinTimestamp)
var rowss [][]rawRow var rowss [][]rawRow
for i := 0; i < partitionsCount; i++ { for i := 0; i < partitionsCount; i++ {
partsCount := rand.Intn(maxPartsPerPartition) + 1 partsCount := rng.Intn(maxPartsPerPartition) + 1
for j := 0; j < partsCount; j++ { for j := 0; j < partsCount; j++ {
var rows []rawRow var rows []rawRow
timestamp := ptr.MinTimestamp timestamp := ptr.MinTimestamp
rowsCount := rand.Intn(maxRowsPerPart) + 1 rowsCount := rng.Intn(maxRowsPerPart) + 1
for k := 0; k < rowsCount; k++ { for k := 0; k < rowsCount; k++ {
r.TSID.MetricID = uint64(rand.Intn(tsidsCount)) r.TSID.MetricID = uint64(rng.Intn(tsidsCount))
r.Timestamp = timestamp r.Timestamp = timestamp
r.Value = float64(int(rand.NormFloat64() * 1e5)) r.Value = float64(int(rng.NormFloat64() * 1e5))
timestamp += int64(rand.Intn(1e4)) + 1 timestamp += int64(rng.Intn(1e4)) + 1
if timestamp > ptr.MaxTimestamp { if timestamp > ptr.MaxTimestamp {
break break
} }

View file

@ -79,17 +79,18 @@ func createBenchTable(b *testing.B, path string, startTimestamp int64, rowsPerIn
var wg sync.WaitGroup var wg sync.WaitGroup
for k := 0; k < cgroup.AvailableCPUs(); k++ { for k := 0; k < cgroup.AvailableCPUs(); k++ {
wg.Add(1) wg.Add(1)
go func() { go func(n int) {
rng := rand.New(rand.NewSource(int64(n)))
rows := make([]rawRow, rowsPerInsert) rows := make([]rawRow, rowsPerInsert)
value := float64(100) value := float64(100)
for int(atomic.AddUint64(&insertsCount, ^uint64(0))) >= 0 { for int(atomic.AddUint64(&insertsCount, ^uint64(0))) >= 0 {
for j := 0; j < rowsPerInsert; j++ { for j := 0; j < rowsPerInsert; j++ {
ts := atomic.AddUint64(&timestamp, uint64(10+rand.Int63n(2))) ts := atomic.AddUint64(&timestamp, uint64(10+rng.Int63n(2)))
value += float64(int(rand.NormFloat64() * 5)) value += float64(int(rng.NormFloat64() * 5))
r := &rows[j] r := &rows[j]
r.PrecisionBits = defaultPrecisionBits r.PrecisionBits = defaultPrecisionBits
r.TSID.MetricID = uint64(rand.Intn(tsidsCount) + 1) r.TSID.MetricID = uint64(rng.Intn(tsidsCount) + 1)
r.Timestamp = int64(ts) r.Timestamp = int64(ts)
r.Value = value r.Value = value
} }
@ -98,7 +99,7 @@ func createBenchTable(b *testing.B, path string, startTimestamp int64, rowsPerIn
} }
} }
wg.Done() wg.Done()
}() }(k)
} }
wg.Wait() wg.Wait()

View file

@ -27,15 +27,16 @@ func benchmarkTableAddRows(b *testing.B, rowsPerInsert, tsidsCount int) {
startTimestamp := timestampFromTime(time.Now()) startTimestamp := timestampFromTime(time.Now())
timestamp := startTimestamp timestamp := startTimestamp
value := float64(100) value := float64(100)
rng := rand.New(rand.NewSource(1))
for i := 0; i < rowsPerInsert; i++ { for i := 0; i < rowsPerInsert; i++ {
r := &rows[i] r := &rows[i]
r.PrecisionBits = defaultPrecisionBits r.PrecisionBits = defaultPrecisionBits
r.TSID.MetricID = uint64(rand.Intn(tsidsCount) + 1) r.TSID.MetricID = uint64(rng.Intn(tsidsCount) + 1)
r.Timestamp = timestamp r.Timestamp = timestamp
r.Value = value r.Value = value
timestamp += 10 + rand.Int63n(2) timestamp += 10 + rng.Int63n(2)
value += float64(int(rand.NormFloat64() * 5)) value += float64(int(rng.NormFloat64() * 5))
} }
timestampDelta := timestamp - startTimestamp timestampDelta := timestamp - startTimestamp