mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
lib/storage: properly free up resources from newTestStorage() by calling stopTestStorage()
This commit is contained in:
parent
71f3898f84
commit
443661a5da
9 changed files with 15 additions and 8 deletions
|
@ -303,14 +303,12 @@ func BenchmarkIndexDBGetTSIDs(b *testing.B) {
|
|||
b.RunParallel(func(pb *testing.PB) {
|
||||
var genTSIDLocal generationTSID
|
||||
var metricNameLocal []byte
|
||||
var metricNameLocalRaw []byte
|
||||
mnLocal := mn
|
||||
is := db.getIndexSearch(noDeadline)
|
||||
for pb.Next() {
|
||||
for i := 0; i < recordsPerLoop; i++ {
|
||||
mnLocal.sortTags()
|
||||
metricNameLocal = mnLocal.Marshal(metricNameLocal[:0])
|
||||
metricNameLocalRaw = mnLocal.marshalRaw(metricNameLocalRaw[:0])
|
||||
if !is.getTSIDByMetricName(&genTSIDLocal, metricNameLocal, date) {
|
||||
panic(fmt.Errorf("cannot obtain tsid for row %d", i))
|
||||
}
|
||||
|
|
|
@ -384,6 +384,7 @@ func TestMergeForciblyStop(t *testing.T) {
|
|||
if rowsDeleted != 0 {
|
||||
t.Fatalf("unexpected rowsDeleted; got %d; want %d", rowsDeleted, 0)
|
||||
}
|
||||
stopTestStorage(strg)
|
||||
}
|
||||
|
||||
func testMergeBlockStreams(t *testing.T, bsrs []*blockStreamReader, expectedBlocksCount, expectedRowsCount int, expectedMinTimestamp, expectedMaxTimestamp int64) {
|
||||
|
@ -399,6 +400,7 @@ func testMergeBlockStreams(t *testing.T, bsrs []*blockStreamReader, expectedBloc
|
|||
if err := mergeBlockStreams(&mp.ph, &bsw, bsrs, nil, strg, 0, &rowsMerged, &rowsDeleted); err != nil {
|
||||
t.Fatalf("unexpected error in mergeBlockStreams: %s", err)
|
||||
}
|
||||
stopTestStorage(strg)
|
||||
|
||||
// Verify written data.
|
||||
if mp.ph.RowsCount != uint64(expectedRowsCount) {
|
||||
|
|
|
@ -47,6 +47,8 @@ func benchmarkMergeBlockStreams(b *testing.B, mps []*inmemoryPart, rowsPerLoop i
|
|||
}
|
||||
}
|
||||
})
|
||||
|
||||
stopTestStorage(strg)
|
||||
}
|
||||
|
||||
var benchTwoSourcesWorstCaseMPS = func() []*inmemoryPart {
|
||||
|
|
|
@ -186,6 +186,7 @@ func testPartitionSearchEx(t *testing.T, ptt int64, tr TimeRange, partsCount, ma
|
|||
pt = mustOpenPartition(smallPartsPath, bigPartsPath, strg)
|
||||
testPartitionSearch(t, pt, tsids, tr, rbsExpected, rowsCountExpected)
|
||||
pt.MustClose()
|
||||
stopTestStorage(strg)
|
||||
|
||||
if err := os.RemoveAll("small-table"); err != nil {
|
||||
t.Fatalf("cannot remove small parts directory: %s", err)
|
||||
|
|
|
@ -1845,7 +1845,7 @@ func SetLogNewSeries(ok bool) {
|
|||
|
||||
var logNewSeries = false
|
||||
|
||||
func (s *Storage) createAllIndexesForMetricName(is *indexSearch, mn *MetricName, metricNameRaw []byte, genTSID *generationTSID, date uint64) error {
|
||||
func (s *Storage) createAllIndexesForMetricName(is *indexSearch, mn *MetricName, metricNameRaw []byte, genTSID *generationTSID, date uint64) {
|
||||
is.createGlobalIndexes(&genTSID.TSID, mn)
|
||||
is.createPerDayIndexes(date, &genTSID.TSID, mn)
|
||||
|
||||
|
@ -1856,8 +1856,6 @@ func (s *Storage) createAllIndexesForMetricName(is *indexSearch, mn *MetricName,
|
|||
// Register the (date, metricID) entry in the cache,
|
||||
// so next time the entry is found there instead of searching for it in the indexdb.
|
||||
s.dateMetricIDCache.Set(date, genTSID.TSID.MetricID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Storage) registerSeriesCardinality(metricID uint64, metricNameRaw []byte) bool {
|
||||
|
|
|
@ -203,6 +203,7 @@ func testTableSearchEx(t *testing.T, rng *rand.Rand, trData, trSearch TimeRange,
|
|||
tb = mustOpenTable("test-table", strg)
|
||||
testTableSearch(t, tb, tsids, trSearch, rbsExpected, rowsCountExpected)
|
||||
tb.MustClose()
|
||||
stopTestStorage(strg)
|
||||
}
|
||||
|
||||
func testTableSearch(t *testing.T, tb *table, tsids []TSID, tr TimeRange, rbsExpected []rawBlock, rowsCountExpected int64) {
|
||||
|
|
|
@ -38,7 +38,7 @@ func BenchmarkTableSearch(b *testing.B) {
|
|||
}
|
||||
}
|
||||
|
||||
func openBenchTable(b *testing.B, startTimestamp int64, rowsPerInsert, rowsCount, tsidsCount int) *table {
|
||||
func openBenchTable(b *testing.B, startTimestamp int64, rowsPerInsert, rowsCount, tsidsCount int) (*table, *Storage) {
|
||||
b.Helper()
|
||||
|
||||
path := filepath.Join("benchmarkTableSearch", fmt.Sprintf("rows%d_tsids%d", rowsCount, tsidsCount))
|
||||
|
@ -58,7 +58,7 @@ func openBenchTable(b *testing.B, startTimestamp int64, rowsPerInsert, rowsCount
|
|||
b.Fatalf("unexpected rows count in the table %q; got %d; want %d", path, rowsCount, rowsCountExpected)
|
||||
}
|
||||
|
||||
return tb
|
||||
return tb, strg
|
||||
}
|
||||
|
||||
var createdBenchTables = make(map[string]bool)
|
||||
|
@ -98,13 +98,14 @@ func createBenchTable(b *testing.B, path string, startTimestamp int64, rowsPerIn
|
|||
wg.Wait()
|
||||
|
||||
tb.MustClose()
|
||||
stopTestStorage(strg)
|
||||
}
|
||||
|
||||
func benchmarkTableSearch(b *testing.B, rowsCount, tsidsCount, tsidsSearch int) {
|
||||
startTimestamp := timestampFromTime(time.Now()) - 365*24*3600*1000
|
||||
rowsPerInsert := getMaxRawRowsPerShard()
|
||||
|
||||
tb := openBenchTable(b, startTimestamp, rowsPerInsert, rowsCount, tsidsCount)
|
||||
tb, strg := openBenchTable(b, startTimestamp, rowsPerInsert, rowsCount, tsidsCount)
|
||||
tr := TimeRange{
|
||||
MinTimestamp: startTimestamp,
|
||||
MaxTimestamp: (1 << 63) - 1,
|
||||
|
@ -135,4 +136,5 @@ func benchmarkTableSearch(b *testing.B, rowsCount, tsidsCount, tsidsSearch int)
|
|||
b.StopTimer()
|
||||
|
||||
tb.MustClose()
|
||||
stopTestStorage(strg)
|
||||
}
|
||||
|
|
|
@ -29,4 +29,6 @@ func TestTableOpenClose(t *testing.T) {
|
|||
tb := mustOpenTable(path, strg)
|
||||
tb.MustClose()
|
||||
}
|
||||
|
||||
stopTestStorage(strg)
|
||||
}
|
||||
|
|
|
@ -104,4 +104,5 @@ func benchmarkTableAddRows(b *testing.B, rowsPerInsert, tsidsCount int) {
|
|||
b.Fatalf("cannot remove table %q: %s", tablePath, err)
|
||||
}
|
||||
}
|
||||
stopTestStorage(strg)
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue