Revert "lib/storage: remove unused fetchData arg from BlockRef.MustReadBlock"

This reverts commit bab6a15ae0.

Reason for revert: the `fetchData` arg is used in cluster branch.
Leaving this arg in master branch makes smaller the diff with cluster branch.
This commit is contained in:
Aliaksandr Valialkin 2020-09-24 22:44:23 +03:00
parent bab6a15ae0
commit 82973f8ae7
7 changed files with 18 additions and 9 deletions

View file

@ -386,7 +386,7 @@ func (sb *sortBlock) reset() {
func (sb *sortBlock) unpackFrom(tmpBlock *storage.Block, br storage.BlockRef, tr storage.TimeRange) error {
tmpBlock.Reset()
br.MustReadBlock(tmpBlock)
br.MustReadBlock(tmpBlock, true)
if err := tmpBlock.UnmarshalData(); err != nil {
return fmt.Errorf("cannot unmarshal block: %w", err)
}

View file

@ -1251,7 +1251,7 @@ func testPartSearchSerial(p *part, tsids []TSID, tr TimeRange, expectedRawBlocks
var bs []Block
for ps.NextBlock() {
var b Block
ps.BlockRef.MustReadBlock(&b)
ps.BlockRef.MustReadBlock(&b, true)
bs = append(bs, b)
}
if err := ps.Error(); err != nil {

View file

@ -243,7 +243,7 @@ func testPartitionSearchSerial(pt *partition, tsids []TSID, tr TimeRange, rbsExp
pts.Init(pt, tsids, tr)
for pts.NextBlock() {
var b Block
pts.BlockRef.MustReadBlock(&b)
pts.BlockRef.MustReadBlock(&b, true)
bs = append(bs, b)
}
if err := pts.Error(); err != nil {

View file

@ -31,9 +31,14 @@ func (br *BlockRef) init(p *part, bh *blockHeader) {
}
// MustReadBlock reads block from br to dst.
func (br *BlockRef) MustReadBlock(dst *Block) {
//
// if fetchData is false, then only block header is read, otherwise all the data is read.
func (br *BlockRef) MustReadBlock(dst *Block, fetchData bool) {
dst.Reset()
dst.bh = br.bh
if !fetchData {
return
}
dst.timestampsData = bytesutil.Resize(dst.timestampsData[:0], int(br.bh.TimestampsBlockSize))
br.p.timestampsFile.MustReadAt(dst.timestampsData, int64(br.bh.TimestampsBlockOffset))

View file

@ -222,7 +222,7 @@ func testSearchInternal(st *Storage, tr TimeRange, mrs []MetricRow, accountsCoun
var mbs []metricBlock
for s.NextMetricBlock() {
var b Block
s.MetricBlockRef.BlockRef.MustReadBlock(&b)
s.MetricBlockRef.BlockRef.MustReadBlock(&b, true)
var mb metricBlock
mb.MetricName = append(mb.MetricName, s.MetricBlockRef.MetricName...)

View file

@ -254,7 +254,7 @@ func testTableSearchSerial(tb *table, tsids []TSID, tr TimeRange, rbsExpected []
ts.Init(tb, tsids, tr)
for ts.NextBlock() {
var b Block
ts.BlockRef.MustReadBlock(&b)
ts.BlockRef.MustReadBlock(&b, true)
bs = append(bs, b)
}
if err := ts.Error(); err != nil {

View file

@ -26,7 +26,11 @@ func BenchmarkTableSearch(b *testing.B) {
b.Run(fmt.Sprintf("tsidsCount_%d", tsidsCount), func(b *testing.B) {
for _, tsidsSearch := range []int{1, 1e1, 1e2, 1e3, 1e4} {
b.Run(fmt.Sprintf("tsidsSearch_%d", tsidsSearch), func(b *testing.B) {
benchmarkTableSearch(b, rowsCount, tsidsCount, tsidsSearch)
for _, fetchData := range []bool{true, false} {
b.Run(fmt.Sprintf("fetchData_%v", fetchData), func(b *testing.B) {
benchmarkTableSearch(b, rowsCount, tsidsCount, tsidsSearch, fetchData)
})
}
})
}
})
@ -103,7 +107,7 @@ func createBenchTable(b *testing.B, path string, startTimestamp int64, rowsPerIn
tb.MustClose()
}
func benchmarkTableSearch(b *testing.B, rowsCount, tsidsCount, tsidsSearch int) {
func benchmarkTableSearch(b *testing.B, rowsCount, tsidsCount, tsidsSearch int, fetchData bool) {
startTimestamp := timestampFromTime(time.Now()) - 365*24*3600*1000
rowsPerInsert := getMaxRawRowsPerPartition()
@ -130,7 +134,7 @@ func benchmarkTableSearch(b *testing.B, rowsCount, tsidsCount, tsidsSearch int)
}
ts.Init(tb, tsids, tr)
for ts.NextBlock() {
ts.BlockRef.MustReadBlock(&tmpBlock)
ts.BlockRef.MustReadBlock(&tmpBlock, fetchData)
}
ts.MustClose()
}