lib/storage: use deterministic random generator in tests

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3683
This commit is contained in:
Aliaksandr Valialkin 2023-01-23 20:10:29 -08:00
parent 4c7062b408
commit 903b2e710c
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
13 changed files with 141 additions and 105 deletions

View file

@ -17,11 +17,12 @@ func TestBlockStreamReaderSingleRow(t *testing.T) {
}
func TestBlockStreamReaderSingleBlockManyRows(t *testing.T) {
rng := rand.New(rand.NewSource(1))
var rows []rawRow
var r rawRow
r.PrecisionBits = defaultPrecisionBits
for i := 0; i < maxRowsPerBlock; i++ {
r.Value = rand.Float64()*1e9 - 5e8
r.Value = rng.Float64()*1e9 - 5e8
r.Timestamp = int64(i * 1e9)
rows = append(rows, r)
}
@ -29,24 +30,26 @@ func TestBlockStreamReaderSingleBlockManyRows(t *testing.T) {
}
func TestBlockStreamReaderSingleTSIDManyBlocks(t *testing.T) {
rng := rand.New(rand.NewSource(1))
var rows []rawRow
var r rawRow
r.PrecisionBits = 1
for i := 0; i < 5*maxRowsPerBlock; i++ {
r.Value = rand.NormFloat64() * 1e4
r.Timestamp = int64(rand.NormFloat64() * 1e9)
r.Value = rng.NormFloat64() * 1e4
r.Timestamp = int64(rng.NormFloat64() * 1e9)
rows = append(rows, r)
}
testBlocksStreamReader(t, rows, 5)
}
func TestBlockStreamReaderManyTSIDSingleRow(t *testing.T) {
rng := rand.New(rand.NewSource(1))
var rows []rawRow
var r rawRow
r.PrecisionBits = defaultPrecisionBits
for i := 0; i < 1000; i++ {
r.TSID.MetricID = uint64(i)
r.Value = rand.Float64()*1e9 - 5e8
r.Value = rng.Float64()*1e9 - 5e8
r.Timestamp = int64(i * 1e9)
rows = append(rows, r)
}
@ -54,28 +57,30 @@ func TestBlockStreamReaderManyTSIDSingleRow(t *testing.T) {
}
func TestBlockStreamReaderManyTSIDManyRows(t *testing.T) {
rng := rand.New(rand.NewSource(1))
var rows []rawRow
var r rawRow
r.PrecisionBits = defaultPrecisionBits
const blocks = 123
for i := 0; i < 3210; i++ {
r.TSID.MetricID = uint64((1e9 - i) % blocks)
r.Value = rand.Float64()
r.Timestamp = int64(rand.Float64() * 1e9)
r.Value = rng.Float64()
r.Timestamp = int64(rng.Float64() * 1e9)
rows = append(rows, r)
}
testBlocksStreamReader(t, rows, blocks)
}
func TestBlockStreamReaderReadConcurrent(t *testing.T) {
rng := rand.New(rand.NewSource(1))
var rows []rawRow
var r rawRow
r.PrecisionBits = defaultPrecisionBits
const blocks = 123
for i := 0; i < 3210; i++ {
r.TSID.MetricID = uint64((1e9 - i) % blocks)
r.Value = rand.Float64()
r.Timestamp = int64(rand.Float64() * 1e9)
r.Value = rng.Float64()
r.Timestamp = int64(rng.Float64() * 1e9)
rows = append(rows, r)
}
var mp inmemoryPart

View file

@ -11,13 +11,14 @@ import (
)
func TestBlockMarshalUnmarshalPortable(t *testing.T) {
rng := rand.New(rand.NewSource(1))
var b Block
for i := 0; i < 1000; i++ {
b.Reset()
rowsCount := rand.Intn(maxRowsPerBlock) + 1
rowsCount := rng.Intn(maxRowsPerBlock) + 1
b.timestamps = getRandTimestamps(rowsCount)
b.values = getRandValues(rowsCount)
b.bh.Scale = int16(rand.Intn(30) - 15)
b.bh.Scale = int16(rng.Intn(30) - 15)
b.bh.PrecisionBits = uint8(64 - (i % 64))
testBlockMarshalUnmarshalPortable(t, &b)
}
@ -129,19 +130,21 @@ func getValuesForPrecisionBits(values []int64, precisionBits uint8) []int64 {
}
func getRandValues(rowsCount int) []int64 {
rng := rand.New(rand.NewSource(1))
a := make([]int64, rowsCount)
for i := 0; i < rowsCount; i++ {
a[i] = int64(rand.Intn(1e5) - 0.5e5)
a[i] = int64(rng.Intn(1e5) - 0.5e5)
}
return a
}
func getRandTimestamps(rowsCount int) []int64 {
rng := rand.New(rand.NewSource(1))
a := make([]int64, rowsCount)
ts := int64(rand.Intn(1e9))
ts := int64(rng.Intn(1e9))
for i := 0; i < rowsCount; i++ {
a[i] = ts
ts += int64(rand.Intn(1e5))
ts += int64(rng.Intn(1e5))
}
return a
}

View file

@ -614,6 +614,8 @@ func TestIndexDB(t *testing.T) {
}
func testIndexDBGetOrCreateTSIDByName(db *indexDB, accountsCount, projectsCount, metricGroups int) ([]MetricName, []TSID, []string, error) {
r := rand.New(rand.NewSource(1))
// Create tsids.
var mns []MetricName
var tsids []TSID
@ -635,7 +637,7 @@ func testIndexDBGetOrCreateTSIDByName(db *indexDB, accountsCount, projectsCount,
mn.MetricGroup = []byte(fmt.Sprintf("metricGroup.%d\x00\x01\x02", i%metricGroups))
// Init other tags.
tagsCount := rand.Intn(10) + 1
tagsCount := r.Intn(10) + 1
for j := 0; j < tagsCount; j++ {
key := fmt.Sprintf("key\x01\x02\x00_%d_%d", i, j)
value := fmt.Sprintf("val\x01_%d\x00_%d\x02", i, j)
@ -1564,6 +1566,7 @@ func TestMatchTagFilters(t *testing.T) {
}
func TestIndexDBRepopulateAfterRotation(t *testing.T) {
r := rand.New(rand.NewSource(1))
path := "TestIndexRepopulateAfterRotation"
s, err := OpenStorage(path, msecsPerMonth, 1e5, 1e5)
if err != nil {
@ -1584,7 +1587,7 @@ func TestIndexDBRepopulateAfterRotation(t *testing.T) {
const metricRowsN = 1000
// use min-max timestamps of 1month range to create smaller number of partitions
timeMin, timeMax := time.Now().Add(-730*time.Hour), time.Now()
mrs := testGenerateMetricRows(metricRowsN, timeMin.UnixMilli(), timeMax.UnixMilli())
mrs := testGenerateMetricRows(r, metricRowsN, timeMin.UnixMilli(), timeMax.UnixMilli())
if err := s.AddRows(mrs, defaultPrecisionBits); err != nil {
t.Fatalf("unexpected error when adding mrs: %s", err)
}

View file

@ -19,6 +19,7 @@ func TestInmemoryPartInitFromRows(t *testing.T) {
},
}, 1)
rng := rand.New(rand.NewSource(1))
var rows []rawRow
var r rawRow
@ -27,8 +28,8 @@ func TestInmemoryPartInitFromRows(t *testing.T) {
initTestTSID(&r.TSID)
r.PrecisionBits = defaultPrecisionBits
for i := uint64(0); i < 1e4; i++ {
r.Timestamp = int64(rand.NormFloat64() * 1e7)
r.Value = rand.NormFloat64() * 100
r.Timestamp = int64(rng.NormFloat64() * 1e7)
r.Value = rng.NormFloat64() * 100
rows = append(rows, r)
}
@ -39,8 +40,8 @@ func TestInmemoryPartInitFromRows(t *testing.T) {
for i := 0; i < 1e4; i++ {
initTestTSID(&r.TSID)
r.TSID.MetricID = uint64(i)
r.Timestamp = int64(rand.NormFloat64() * 1e7)
r.Value = rand.NormFloat64() * 100
r.Timestamp = int64(rng.NormFloat64() * 1e7)
r.Value = rng.NormFloat64() * 100
r.PrecisionBits = uint8(i%64) + 1
rows = append(rows, r)

View file

@ -26,12 +26,13 @@ func benchmarkInmemoryPartInitFromRows(b *testing.B, rows []rawRow) {
// Each row belongs to an unique TSID
var benchRawRowsWorstCase = func() []rawRow {
rng := rand.New(rand.NewSource(1))
var rows []rawRow
var r rawRow
for i := 0; i < 1e5; i++ {
r.TSID.MetricID = uint64(i)
r.Timestamp = rand.Int63()
r.Value = rand.NormFloat64()
r.Timestamp = rng.Int63()
r.Value = rng.NormFloat64()
r.PrecisionBits = uint8(i%64) + 1
rows = append(rows, r)
}

View file

@ -20,14 +20,15 @@ func TestMergeBlockStreamsOneStreamOneRow(t *testing.T) {
}
func TestMergeBlockStreamsOneStreamOneBlockManyRows(t *testing.T) {
rng := rand.New(rand.NewSource(1))
var rows []rawRow
var r rawRow
r.PrecisionBits = 4
minTimestamp := int64(1<<63 - 1)
maxTimestamp := int64(-1 << 63)
for i := 0; i < maxRowsPerBlock; i++ {
r.Timestamp = int64(rand.Intn(1e9))
r.Value = rand.NormFloat64() * 2332
r.Timestamp = int64(rng.Intn(1e9))
r.Value = rng.NormFloat64() * 2332
rows = append(rows, r)
if r.Timestamp < minTimestamp {
@ -43,6 +44,7 @@ func TestMergeBlockStreamsOneStreamOneBlockManyRows(t *testing.T) {
}
func TestMergeBlockStreamsOneStreamManyBlocksOneRow(t *testing.T) {
rng := rand.New(rand.NewSource(1))
var rows []rawRow
var r rawRow
r.PrecisionBits = 4
@ -52,8 +54,8 @@ func TestMergeBlockStreamsOneStreamManyBlocksOneRow(t *testing.T) {
for i := 0; i < blocksCount; i++ {
initTestTSID(&r.TSID)
r.TSID.MetricID = uint64(i * 123)
r.Timestamp = int64(rand.Intn(1e9))
r.Value = rand.NormFloat64() * 2332
r.Timestamp = int64(rng.Intn(1e9))
r.Value = rng.NormFloat64() * 2332
rows = append(rows, r)
if r.Timestamp < minTimestamp {
@ -69,6 +71,7 @@ func TestMergeBlockStreamsOneStreamManyBlocksOneRow(t *testing.T) {
}
func TestMergeBlockStreamsOneStreamManyBlocksManyRows(t *testing.T) {
rng := rand.New(rand.NewSource(1))
var rows []rawRow
var r rawRow
initTestTSID(&r.TSID)
@ -79,8 +82,8 @@ func TestMergeBlockStreamsOneStreamManyBlocksManyRows(t *testing.T) {
maxTimestamp := int64(-1 << 63)
for i := 0; i < rowsCount; i++ {
r.TSID.MetricID = uint64(i % blocksCount)
r.Timestamp = int64(rand.Intn(1e9))
r.Value = rand.NormFloat64() * 2332
r.Timestamp = int64(rng.Intn(1e9))
r.Value = rng.NormFloat64() * 2332
rows = append(rows, r)
if r.Timestamp < minTimestamp {
@ -169,6 +172,7 @@ func TestMergeBlockStreamsTwoStreamsManyBlocksManyRows(t *testing.T) {
minTimestamp := int64(1<<63 - 1)
maxTimestamp := int64(-1 << 63)
rng := rand.New(rand.NewSource(1))
var rows []rawRow
var r rawRow
initTestTSID(&r.TSID)
@ -176,8 +180,8 @@ func TestMergeBlockStreamsTwoStreamsManyBlocksManyRows(t *testing.T) {
const rowsCount1 = 4938
for i := 0; i < rowsCount1; i++ {
r.TSID.MetricID = uint64(i % blocksCount)
r.Timestamp = int64(rand.Intn(1e9))
r.Value = rand.NormFloat64() * 2332
r.Timestamp = int64(rng.Intn(1e9))
r.Value = rng.NormFloat64() * 2332
rows = append(rows, r)
if r.Timestamp < minTimestamp {
@ -193,8 +197,8 @@ func TestMergeBlockStreamsTwoStreamsManyBlocksManyRows(t *testing.T) {
const rowsCount2 = 3281
for i := 0; i < rowsCount2; i++ {
r.TSID.MetricID = uint64((i + 17) % blocksCount)
r.Timestamp = int64(rand.Intn(1e9))
r.Value = rand.NormFloat64() * 2332
r.Timestamp = int64(rng.Intn(1e9))
r.Value = rng.NormFloat64() * 2332
rows = append(rows, r)
if r.Timestamp < minTimestamp {
@ -214,13 +218,14 @@ func TestMergeBlockStreamsTwoStreamsBigOverlappingBlocks(t *testing.T) {
minTimestamp := int64(1<<63 - 1)
maxTimestamp := int64(-1 << 63)
rng := rand.New(rand.NewSource(1))
var rows []rawRow
var r rawRow
r.PrecisionBits = 5
const rowsCount1 = maxRowsPerBlock + 234
for i := 0; i < rowsCount1; i++ {
r.Timestamp = int64(i * 2894)
r.Value = float64(int(rand.NormFloat64() * 1e2))
r.Value = float64(int(rng.NormFloat64() * 1e2))
rows = append(rows, r)
if r.Timestamp < minTimestamp {
@ -236,7 +241,7 @@ func TestMergeBlockStreamsTwoStreamsBigOverlappingBlocks(t *testing.T) {
const rowsCount2 = maxRowsPerBlock + 2344
for i := 0; i < rowsCount2; i++ {
r.Timestamp = int64(i * 2494)
r.Value = float64(int(rand.NormFloat64() * 1e2))
r.Value = float64(int(rng.NormFloat64() * 1e2))
rows = append(rows, r)
if r.Timestamp < minTimestamp {
@ -256,13 +261,14 @@ func TestMergeBlockStreamsTwoStreamsBigSequentialBlocks(t *testing.T) {
minTimestamp := int64(1<<63 - 1)
maxTimestamp := int64(-1 << 63)
rng := rand.New(rand.NewSource(1))
var rows []rawRow
var r rawRow
r.PrecisionBits = 5
const rowsCount1 = maxRowsPerBlock + 234
for i := 0; i < rowsCount1; i++ {
r.Timestamp = int64(i * 2894)
r.Value = float64(int(rand.NormFloat64() * 1e2))
r.Value = float64(int(rng.NormFloat64() * 1e2))
rows = append(rows, r)
if r.Timestamp < minTimestamp {
@ -279,7 +285,7 @@ func TestMergeBlockStreamsTwoStreamsBigSequentialBlocks(t *testing.T) {
const rowsCount2 = maxRowsPerBlock - 233
for i := 0; i < rowsCount2; i++ {
r.Timestamp = maxTimestampB1 + int64(i*2494)
r.Value = float64(int(rand.NormFloat64() * 1e2))
r.Value = float64(int(rng.NormFloat64() * 1e2))
rows = append(rows, r)
if r.Timestamp < minTimestamp {
@ -303,16 +309,17 @@ func TestMergeBlockStreamsManyStreamsManyBlocksManyRows(t *testing.T) {
initTestTSID(&r.TSID)
r.PrecisionBits = defaultPrecisionBits
rng := rand.New(rand.NewSource(1))
rowsCount := 0
const blocksCount = 113
var bsrs []*blockStreamReader
for i := 0; i < 20; i++ {
rowsPerStream := rand.Intn(500)
rowsPerStream := rng.Intn(500)
var rows []rawRow
for j := 0; j < rowsPerStream; j++ {
r.TSID.MetricID = uint64(j % blocksCount)
r.Timestamp = int64(rand.Intn(1e9))
r.Value = rand.NormFloat64()
r.Timestamp = int64(rng.Intn(1e9))
r.Value = rng.NormFloat64()
rows = append(rows, r)
if r.Timestamp < minTimestamp {
@ -337,15 +344,16 @@ func TestMergeForciblyStop(t *testing.T) {
initTestTSID(&r.TSID)
r.PrecisionBits = defaultPrecisionBits
rng := rand.New(rand.NewSource(1))
const blocksCount = 113
var bsrs []*blockStreamReader
for i := 0; i < 20; i++ {
rowsPerStream := rand.Intn(1000)
rowsPerStream := rng.Intn(1000)
var rows []rawRow
for j := 0; j < rowsPerStream; j++ {
r.TSID.MetricID = uint64(j % blocksCount)
r.Timestamp = int64(rand.Intn(1e9))
r.Value = rand.NormFloat64()
r.Timestamp = int64(rng.Intn(1e9))
r.Value = rng.NormFloat64()
rows = append(rows, r)
if r.Timestamp < minTimestamp {

View file

@ -50,12 +50,13 @@ func benchmarkMergeBlockStreams(b *testing.B, mps []*inmemoryPart, rowsPerLoop i
}
var benchTwoSourcesWorstCaseMPS = func() []*inmemoryPart {
rng := rand.New(rand.NewSource(1))
var rows []rawRow
var r rawRow
r.PrecisionBits = defaultPrecisionBits
for i := 0; i < maxRowsPerBlock/2-1; i++ {
r.Value = rand.NormFloat64()
r.Timestamp = rand.Int63n(1e12)
r.Value = rng.NormFloat64()
r.Timestamp = rng.Int63n(1e12)
rows = append(rows, r)
}
mp := newTestInmemoryPart(rows)
@ -83,12 +84,13 @@ var benchTwoSourcesBestCaseMPS = func() []*inmemoryPart {
const benchTwoSourcesBestCaseMPSRowsPerLoop = 2 * maxRowsPerBlock
var benchFourSourcesWorstCaseMPS = func() []*inmemoryPart {
rng := rand.New(rand.NewSource(1))
var rows []rawRow
var r rawRow
r.PrecisionBits = defaultPrecisionBits
for i := 0; i < maxRowsPerBlock/2-1; i++ {
r.Value = rand.NormFloat64()
r.Timestamp = rand.Int63n(1e12)
r.Value = rng.NormFloat64()
r.Timestamp = rng.Int63n(1e12)
rows = append(rows, r)
}
mp := newTestInmemoryPart(rows)

View file

@ -1155,13 +1155,14 @@ func TestPartSearchMultiRowsOneTSID(t *testing.T) {
func testPartSearchMultiRowsOneTSID(t *testing.T, rowsCount int) {
t.Helper()
rng := rand.New(rand.NewSource(1))
var rows []rawRow
var r rawRow
r.PrecisionBits = 24
r.TSID.MetricID = 1111
for i := 0; i < rowsCount; i++ {
r.Timestamp = int64(rand.NormFloat64() * 1e6)
r.Value = float64(int(rand.NormFloat64() * 1e5))
r.Timestamp = int64(rng.NormFloat64() * 1e6)
r.Value = float64(int(rng.NormFloat64() * 1e5))
rows = append(rows, r)
}
@ -1191,20 +1192,21 @@ func TestPartSearchMultiRowsMultiTSIDs(t *testing.T) {
func testPartSearchMultiRowsMultiTSIDs(t *testing.T, rowsCount, tsidsCount int) {
t.Helper()
rng := rand.New(rand.NewSource(2))
var rows []rawRow
var r rawRow
r.PrecisionBits = 24
for i := 0; i < rowsCount; i++ {
r.TSID.MetricID = uint64(rand.Intn(tsidsCount))
r.Timestamp = int64(rand.NormFloat64() * 1e6)
r.Value = float64(int(rand.NormFloat64() * 1e5))
r.TSID.MetricID = uint64(rng.Intn(tsidsCount))
r.Timestamp = int64(rng.NormFloat64() * 1e6)
r.Value = float64(int(rng.NormFloat64() * 1e5))
rows = append(rows, r)
}
var tsids []TSID
var tsid TSID
for i := 0; i < 100; i++ {
tsid.MetricID = uint64(rand.Intn(tsidsCount * 3))
tsid.MetricID = uint64(rng.Intn(tsidsCount * 3))
tsids = append(tsids, tsid)
}
sort.Slice(tsids, func(i, j int) bool { return tsids[i].Less(&tsids[j]) })

View file

@ -114,11 +114,13 @@ func TestPartitionSearch(t *testing.T) {
func testPartitionSearchEx(t *testing.T, ptt int64, tr TimeRange, partsCount, maxRowsPerPart, tsidsCount int) {
t.Helper()
rng := rand.New(rand.NewSource(1))
// Generate tsids to search.
var tsids []TSID
var tsid TSID
for i := 0; i < 25; i++ {
tsid.MetricID = uint64(rand.Intn(tsidsCount * 2))
tsid.MetricID = uint64(rng.Intn(tsidsCount * 2))
tsids = append(tsids, tsid)
}
sort.Slice(tsids, func(i, j int) bool { return tsids[i].Less(&tsids[j]) })
@ -135,13 +137,13 @@ func testPartitionSearchEx(t *testing.T, ptt int64, tr TimeRange, partsCount, ma
var r rawRow
r.PrecisionBits = 30
timestamp := ptr.MinTimestamp
rowsCount := 1 + rand.Intn(maxRowsPerPart)
rowsCount := 1 + rng.Intn(maxRowsPerPart)
for j := 0; j < rowsCount; j++ {
r.TSID.MetricID = uint64(rand.Intn(tsidsCount))
r.TSID.MetricID = uint64(rng.Intn(tsidsCount))
r.Timestamp = timestamp
r.Value = float64(int(rand.NormFloat64() * 1e5))
r.Value = float64(int(rng.NormFloat64() * 1e5))
timestamp += int64(rand.Intn(1e4))
timestamp += int64(rng.Intn(1e4))
if timestamp > ptr.MaxTimestamp {
break
}

View file

@ -459,10 +459,10 @@ func TestUpdateCurrHourMetricIDs(t *testing.T) {
func TestMetricRowMarshalUnmarshal(t *testing.T) {
var buf []byte
typ := reflect.TypeOf(&MetricRow{})
rnd := rand.New(rand.NewSource(1))
rng := rand.New(rand.NewSource(1))
for i := 0; i < 1000; i++ {
v, ok := quick.Value(typ, rnd)
v, ok := quick.Value(typ, rng)
if !ok {
t.Fatalf("cannot create random MetricRow via quick.Value")
}
@ -589,7 +589,7 @@ func testStorageRandTimestamps(s *Storage) error {
currentTime := timestampFromTime(time.Now())
const rowsPerAdd = 5e3
const addsCount = 3
rnd := rand.New(rand.NewSource(1))
rng := rand.New(rand.NewSource(1))
for i := 0; i < addsCount; i++ {
var mrs []MetricRow
@ -599,10 +599,10 @@ func testStorageRandTimestamps(s *Storage) error {
{[]byte("instance"), []byte("1.2.3.4")},
}
for j := 0; j < rowsPerAdd; j++ {
mn.MetricGroup = []byte(fmt.Sprintf("metric_%d", rand.Intn(100)))
mn.MetricGroup = []byte(fmt.Sprintf("metric_%d", rng.Intn(100)))
metricNameRaw := mn.marshalRaw(nil)
timestamp := currentTime - int64((rnd.Float64()-0.2)*float64(2*s.retentionMsecs))
value := rnd.NormFloat64() * 1e11
timestamp := currentTime - int64((rng.Float64()-0.2)*float64(2*s.retentionMsecs))
value := rng.NormFloat64() * 1e11
mr := MetricRow{
MetricNameRaw: metricNameRaw,
@ -703,6 +703,7 @@ func TestStorageDeleteSeries(t *testing.T) {
}
func testStorageDeleteSeries(s *Storage, workerNum int) error {
rng := rand.New(rand.NewSource(1))
const rowsPerMetric = 100
const metricsCount = 30
@ -731,8 +732,8 @@ func testStorageDeleteSeries(s *Storage, workerNum int) error {
metricNameRaw := mn.marshalRaw(nil)
for j := 0; j < rowsPerMetric; j++ {
timestamp := rand.Int63n(1e10)
value := rand.NormFloat64() * 1e6
timestamp := rng.Int63n(1e10)
value := rng.NormFloat64() * 1e6
mr := MetricRow{
MetricNameRaw: metricNameRaw,
@ -1071,13 +1072,14 @@ func testStorageRegisterMetricNames(s *Storage) error {
}
func TestStorageAddRowsSerial(t *testing.T) {
rng := rand.New(rand.NewSource(1))
path := "TestStorageAddRowsSerial"
retentionMsecs := int64(msecsPerMonth * 10)
s, err := OpenStorage(path, retentionMsecs, 1e5, 1e5)
if err != nil {
t.Fatalf("cannot open storage: %s", err)
}
if err := testStorageAddRows(s); err != nil {
if err := testStorageAddRows(rng, s); err != nil {
t.Fatalf("unexpected error: %s", err)
}
s.MustClose()
@ -1095,9 +1097,10 @@ func TestStorageAddRowsConcurrent(t *testing.T) {
}
ch := make(chan error, 3)
for i := 0; i < cap(ch); i++ {
go func() {
ch <- testStorageAddRows(s)
}()
go func(n int) {
rLocal := rand.New(rand.NewSource(int64(n)))
ch <- testStorageAddRows(rLocal, s)
}(i)
}
for i := 0; i < cap(ch); i++ {
select {
@ -1115,7 +1118,7 @@ func TestStorageAddRowsConcurrent(t *testing.T) {
}
}
func testGenerateMetricRows(rows uint64, timestampMin, timestampMax int64) []MetricRow {
func testGenerateMetricRows(rng *rand.Rand, rows uint64, timestampMin, timestampMax int64) []MetricRow {
var mrs []MetricRow
var mn MetricName
mn.Tags = []Tag{
@ -1127,8 +1130,8 @@ func testGenerateMetricRows(rows uint64, timestampMin, timestampMax int64) []Met
mn.ProjectID = uint32(rand.Intn(3))
mn.MetricGroup = []byte(fmt.Sprintf("metric_%d", i))
metricNameRaw := mn.marshalRaw(nil)
timestamp := rand.Int63n(timestampMax-timestampMin) + timestampMin
value := rand.NormFloat64() * 1e6
timestamp := rng.Int63n(timestampMax-timestampMin) + timestampMin
value := rng.NormFloat64() * 1e6
mr := MetricRow{
MetricNameRaw: metricNameRaw,
@ -1140,14 +1143,14 @@ func testGenerateMetricRows(rows uint64, timestampMin, timestampMax int64) []Met
return mrs
}
func testStorageAddRows(s *Storage) error {
func testStorageAddRows(rng *rand.Rand, s *Storage) error {
const rowsPerAdd = 1e3
const addsCount = 10
maxTimestamp := timestampFromTime(time.Now())
minTimestamp := maxTimestamp - s.retentionMsecs
for i := 0; i < addsCount; i++ {
mrs := testGenerateMetricRows(rowsPerAdd, minTimestamp, maxTimestamp)
mrs := testGenerateMetricRows(rng, rowsPerAdd, minTimestamp, maxTimestamp)
if err := s.AddRows(mrs, defaultPrecisionBits); err != nil {
return fmt.Errorf("unexpected error when adding mrs: %w", err)
}
@ -1274,6 +1277,7 @@ func TestStorageRotateIndexDB(t *testing.T) {
}
func testStorageAddMetrics(s *Storage, workerNum int) error {
rng := rand.New(rand.NewSource(1))
const rowsCount = 1e3
var mn MetricName
@ -1284,10 +1288,10 @@ func testStorageAddMetrics(s *Storage, workerNum int) error {
for i := 0; i < rowsCount; i++ {
mn.AccountID = 123
mn.ProjectID = uint32(i % 3)
mn.MetricGroup = []byte(fmt.Sprintf("metric_%d_%d", workerNum, rand.Intn(10)))
mn.MetricGroup = []byte(fmt.Sprintf("metric_%d_%d", workerNum, rng.Intn(10)))
metricNameRaw := mn.marshalRaw(nil)
timestamp := rand.Int63n(1e10)
value := rand.NormFloat64() * 1e6
timestamp := rng.Int63n(1e10)
value := rng.NormFloat64() * 1e6
mr := MetricRow{
MetricNameRaw: metricNameRaw,
@ -1310,6 +1314,7 @@ func testStorageAddMetrics(s *Storage, workerNum int) error {
}
func TestStorageDeleteStaleSnapshots(t *testing.T) {
rng := rand.New(rand.NewSource(1))
path := "TestStorageDeleteStaleSnapshots"
retentionMsecs := int64(msecsPerMonth * 10)
s, err := OpenStorage(path, retentionMsecs, 1e5, 1e5)
@ -1321,7 +1326,7 @@ func TestStorageDeleteStaleSnapshots(t *testing.T) {
maxTimestamp := timestampFromTime(time.Now())
minTimestamp := maxTimestamp - s.retentionMsecs
for i := 0; i < addsCount; i++ {
mrs := testGenerateMetricRows(rowsPerAdd, minTimestamp, maxTimestamp)
mrs := testGenerateMetricRows(rng, rowsPerAdd, minTimestamp, maxTimestamp)
if err := s.AddRows(mrs, defaultPrecisionBits); err != nil {
t.Fatalf("unexpected error when adding mrs: %s", err)
}

View file

@ -10,6 +10,8 @@ import (
)
func TestTableSearch(t *testing.T) {
rng := rand.New(rand.NewSource(1))
var trData TimeRange
trData.fromPartitionTime(time.Now())
trData.MinTimestamp -= 5 * 365 * 24 * 3600 * 1000
@ -19,7 +21,7 @@ func TestTableSearch(t *testing.T) {
MinTimestamp: trData.MinTimestamp + 4e3,
MaxTimestamp: trData.MaxTimestamp - 4e3,
}
testTableSearchEx(t, trData, trSearch, 1, 10, 1000, 10)
testTableSearchEx(t, rng, trData, trSearch, 1, 10, 1000, 10)
})
t.Run("SinglePartPerPartition", func(t *testing.T) {
@ -27,7 +29,7 @@ func TestTableSearch(t *testing.T) {
MinTimestamp: trData.MinTimestamp + 4e3,
MaxTimestamp: trData.MaxTimestamp - 4e3,
}
testTableSearchEx(t, trData, trSearch, 12, 1, 1000, 10)
testTableSearchEx(t, rng, trData, trSearch, 12, 1, 1000, 10)
})
t.Run("SingleRowPerPartition", func(t *testing.T) {
@ -35,7 +37,7 @@ func TestTableSearch(t *testing.T) {
MinTimestamp: trData.MinTimestamp + 4e3,
MaxTimestamp: trData.MaxTimestamp - 4e3,
}
testTableSearchEx(t, trData, trSearch, 12, 20, 1, 10)
testTableSearchEx(t, rng, trData, trSearch, 12, 20, 1, 10)
})
t.Run("SingleTSID", func(t *testing.T) {
@ -43,7 +45,7 @@ func TestTableSearch(t *testing.T) {
MinTimestamp: trData.MinTimestamp + 4e3,
MaxTimestamp: trData.MaxTimestamp - 4e3,
}
testTableSearchEx(t, trData, trSearch, 12, 5, 1000, 1)
testTableSearchEx(t, rng, trData, trSearch, 12, 5, 1000, 1)
})
t.Run("ManyPartitions", func(t *testing.T) {
@ -51,7 +53,7 @@ func TestTableSearch(t *testing.T) {
MinTimestamp: trData.MinTimestamp + 4e3,
MaxTimestamp: trData.MaxTimestamp - 4e3,
}
testTableSearchEx(t, trData, trSearch, 20, 10, 30, 20)
testTableSearchEx(t, rng, trData, trSearch, 20, 10, 30, 20)
})
t.Run("ManyTSIDs", func(t *testing.T) {
@ -59,7 +61,7 @@ func TestTableSearch(t *testing.T) {
MinTimestamp: trData.MinTimestamp + 4e3,
MaxTimestamp: trData.MaxTimestamp - 4e3,
}
testTableSearchEx(t, trData, trSearch, 2, 5, 5000, 1000)
testTableSearchEx(t, rng, trData, trSearch, 2, 5, 5000, 1000)
})
t.Run("ExactTimeRange", func(t *testing.T) {
@ -67,7 +69,7 @@ func TestTableSearch(t *testing.T) {
MinTimestamp: trData.MinTimestamp,
MaxTimestamp: trData.MaxTimestamp,
}
testTableSearchEx(t, trData, trSearch, 2, 5, 1000, 10)
testTableSearchEx(t, rng, trData, trSearch, 2, 5, 1000, 10)
})
t.Run("InnerTimeRange", func(t *testing.T) {
@ -75,7 +77,7 @@ func TestTableSearch(t *testing.T) {
MinTimestamp: trData.MinTimestamp + 4e3,
MaxTimestamp: trData.MaxTimestamp - 4e3,
}
testTableSearchEx(t, trData, trSearch, 2, 5, 1000, 10)
testTableSearchEx(t, rng, trData, trSearch, 2, 5, 1000, 10)
})
t.Run("OuterTimeRange", func(t *testing.T) {
@ -83,7 +85,7 @@ func TestTableSearch(t *testing.T) {
MinTimestamp: trData.MinTimestamp - 1e6,
MaxTimestamp: trData.MaxTimestamp + 1e6,
}
testTableSearchEx(t, trData, trSearch, 2, 5, 1000, 10)
testTableSearchEx(t, rng, trData, trSearch, 2, 5, 1000, 10)
})
t.Run("LowTimeRange", func(t *testing.T) {
@ -91,7 +93,7 @@ func TestTableSearch(t *testing.T) {
MinTimestamp: trData.MinTimestamp - 2e6,
MaxTimestamp: trData.MinTimestamp - 1e6,
}
testTableSearchEx(t, trData, trSearch, 2, 5, 1000, 10)
testTableSearchEx(t, rng, trData, trSearch, 2, 5, 1000, 10)
})
t.Run("HighTimeRange", func(t *testing.T) {
@ -99,7 +101,7 @@ func TestTableSearch(t *testing.T) {
MinTimestamp: trData.MaxTimestamp + 1e6,
MaxTimestamp: trData.MaxTimestamp + 2e6,
}
testTableSearchEx(t, trData, trSearch, 2, 5, 1000, 10)
testTableSearchEx(t, rng, trData, trSearch, 2, 5, 1000, 10)
})
t.Run("LowerEndTimeRange", func(t *testing.T) {
@ -107,7 +109,7 @@ func TestTableSearch(t *testing.T) {
MinTimestamp: trData.MinTimestamp - 1e6,
MaxTimestamp: trData.MaxTimestamp - 4e3,
}
testTableSearchEx(t, trData, trSearch, 2, 5, 1000, 10)
testTableSearchEx(t, rng, trData, trSearch, 2, 5, 1000, 10)
})
t.Run("HigherEndTimeRange", func(t *testing.T) {
@ -115,18 +117,18 @@ func TestTableSearch(t *testing.T) {
MinTimestamp: trData.MinTimestamp + 4e3,
MaxTimestamp: trData.MaxTimestamp + 1e6,
}
testTableSearchEx(t, trData, trSearch, 2, 5, 1000, 10)
testTableSearchEx(t, rng, trData, trSearch, 2, 5, 1000, 10)
})
}
func testTableSearchEx(t *testing.T, trData, trSearch TimeRange, partitionsCount, maxPartsPerPartition, maxRowsPerPart, tsidsCount int) {
func testTableSearchEx(t *testing.T, rng *rand.Rand, trData, trSearch TimeRange, partitionsCount, maxPartsPerPartition, maxRowsPerPart, tsidsCount int) {
t.Helper()
// Generate tsids to search.
var tsids []TSID
var tsid TSID
for i := 0; i < 25; i++ {
tsid.MetricID = uint64(rand.Intn(tsidsCount * 2))
tsid.MetricID = uint64(rng.Intn(tsidsCount * 2))
tsids = append(tsids, tsid)
}
sort.Slice(tsids, func(i, j int) bool { return tsids[i].Less(&tsids[j]) })
@ -142,17 +144,17 @@ func testTableSearchEx(t *testing.T, trData, trSearch TimeRange, partitionsCount
ptr.fromPartitionTimestamp(trData.MinTimestamp)
var rowss [][]rawRow
for i := 0; i < partitionsCount; i++ {
partsCount := rand.Intn(maxPartsPerPartition) + 1
partsCount := rng.Intn(maxPartsPerPartition) + 1
for j := 0; j < partsCount; j++ {
var rows []rawRow
timestamp := ptr.MinTimestamp
rowsCount := rand.Intn(maxRowsPerPart) + 1
rowsCount := rng.Intn(maxRowsPerPart) + 1
for k := 0; k < rowsCount; k++ {
r.TSID.MetricID = uint64(rand.Intn(tsidsCount))
r.TSID.MetricID = uint64(rng.Intn(tsidsCount))
r.Timestamp = timestamp
r.Value = float64(int(rand.NormFloat64() * 1e5))
r.Value = float64(int(rng.NormFloat64() * 1e5))
timestamp += int64(rand.Intn(1e4)) + 1
timestamp += int64(rng.Intn(1e4)) + 1
if timestamp > ptr.MaxTimestamp {
break
}

View file

@ -79,17 +79,18 @@ func createBenchTable(b *testing.B, path string, startTimestamp int64, rowsPerIn
var wg sync.WaitGroup
for k := 0; k < cgroup.AvailableCPUs(); k++ {
wg.Add(1)
go func() {
go func(n int) {
rng := rand.New(rand.NewSource(int64(n)))
rows := make([]rawRow, rowsPerInsert)
value := float64(100)
for int(atomic.AddUint64(&insertsCount, ^uint64(0))) >= 0 {
for j := 0; j < rowsPerInsert; j++ {
ts := atomic.AddUint64(&timestamp, uint64(10+rand.Int63n(2)))
value += float64(int(rand.NormFloat64() * 5))
ts := atomic.AddUint64(&timestamp, uint64(10+rng.Int63n(2)))
value += float64(int(rng.NormFloat64() * 5))
r := &rows[j]
r.PrecisionBits = defaultPrecisionBits
r.TSID.MetricID = uint64(rand.Intn(tsidsCount) + 1)
r.TSID.MetricID = uint64(rng.Intn(tsidsCount) + 1)
r.Timestamp = int64(ts)
r.Value = value
}
@ -98,7 +99,7 @@ func createBenchTable(b *testing.B, path string, startTimestamp int64, rowsPerIn
}
}
wg.Done()
}()
}(k)
}
wg.Wait()

View file

@ -27,15 +27,16 @@ func benchmarkTableAddRows(b *testing.B, rowsPerInsert, tsidsCount int) {
startTimestamp := timestampFromTime(time.Now())
timestamp := startTimestamp
value := float64(100)
rng := rand.New(rand.NewSource(1))
for i := 0; i < rowsPerInsert; i++ {
r := &rows[i]
r.PrecisionBits = defaultPrecisionBits
r.TSID.MetricID = uint64(rand.Intn(tsidsCount) + 1)
r.TSID.MetricID = uint64(rng.Intn(tsidsCount) + 1)
r.Timestamp = timestamp
r.Value = value
timestamp += 10 + rand.Int63n(2)
value += float64(int(rand.NormFloat64() * 5))
timestamp += 10 + rng.Int63n(2)
value += float64(int(rng.NormFloat64() * 5))
}
timestampDelta := timestamp - startTimestamp