mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
app/vmselect: optimize /api/v1/series a bit for time ranges smaller than one day
This commit is contained in:
parent
bffd72e9a9
commit
e578549b8a
11 changed files with 67 additions and 159 deletions
|
@ -51,9 +51,8 @@ func (r *Result) reset() {
|
|||
|
||||
// Results holds results returned from ProcessSearchQuery.
|
||||
type Results struct {
|
||||
tr storage.TimeRange
|
||||
fetchData bool
|
||||
deadline searchutils.Deadline
|
||||
tr storage.TimeRange
|
||||
deadline searchutils.Deadline
|
||||
|
||||
packedTimeseries []packedTimeseries
|
||||
sr *storage.Search
|
||||
|
@ -146,11 +145,11 @@ func (tsw *timeseriesWork) do(r *Result, workerID uint) error {
|
|||
atomic.StoreUint32(tsw.mustStop, 1)
|
||||
return fmt.Errorf("timeout exceeded during query execution: %s", rss.deadline.String())
|
||||
}
|
||||
if err := tsw.pts.Unpack(r, rss.tbf, rss.tr, rss.fetchData); err != nil {
|
||||
if err := tsw.pts.Unpack(r, rss.tbf, rss.tr); err != nil {
|
||||
atomic.StoreUint32(tsw.mustStop, 1)
|
||||
return fmt.Errorf("error during time series unpacking: %w", err)
|
||||
}
|
||||
if len(r.Timestamps) > 0 || !rss.fetchData {
|
||||
if len(r.Timestamps) > 0 {
|
||||
if err := tsw.f(r, workerID); err != nil {
|
||||
atomic.StoreUint32(tsw.mustStop, 1)
|
||||
return err
|
||||
|
@ -377,15 +376,11 @@ var tmpBlockPool sync.Pool
|
|||
var unpackBatchSize = 5000
|
||||
|
||||
// Unpack unpacks pts to dst.
|
||||
func (pts *packedTimeseries) Unpack(dst *Result, tbf *tmpBlocksFile, tr storage.TimeRange, fetchData bool) error {
|
||||
func (pts *packedTimeseries) Unpack(dst *Result, tbf *tmpBlocksFile, tr storage.TimeRange) error {
|
||||
dst.reset()
|
||||
if err := dst.MetricName.Unmarshal(bytesutil.ToUnsafeBytes(pts.metricName)); err != nil {
|
||||
return fmt.Errorf("cannot unmarshal metricName %q: %w", pts.metricName, err)
|
||||
}
|
||||
if !fetchData {
|
||||
// Do not spend resources on data reading and unpacking.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Spin up local workers.
|
||||
// Do not use global workers pool, since it increases inter-CPU memory ping-poing,
|
||||
|
@ -559,7 +554,7 @@ func (sb *sortBlock) reset() {
|
|||
func (sb *sortBlock) unpackFrom(tmpBlock *storage.Block, tbf *tmpBlocksFile, br blockRef, tr storage.TimeRange) error {
|
||||
tmpBlock.Reset()
|
||||
brReal := tbf.MustReadBlockRefAt(br.partRef, br.addr)
|
||||
brReal.MustReadBlock(tmpBlock, true)
|
||||
brReal.MustReadBlock(tmpBlock)
|
||||
if err := tmpBlock.UnmarshalData(); err != nil {
|
||||
return fmt.Errorf("cannot unmarshal block: %w", err)
|
||||
}
|
||||
|
@ -895,7 +890,7 @@ func ExportBlocks(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline sear
|
|||
return fmt.Errorf("cannot unmarshal metricName for block #%d: %w", blocksRead, err)
|
||||
}
|
||||
br := sr.MetricBlockRef.BlockRef
|
||||
br.MustReadBlock(&xw.b, true)
|
||||
br.MustReadBlock(&xw.b)
|
||||
samples += br.RowsCount()
|
||||
workCh <- xw
|
||||
}
|
||||
|
@ -966,8 +961,8 @@ func SearchMetricNames(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline
|
|||
// ProcessSearchQuery performs sq until the given deadline.
|
||||
//
|
||||
// Results.RunParallel or Results.Cancel must be called on the returned Results.
|
||||
func ProcessSearchQuery(qt *querytracer.Tracer, sq *storage.SearchQuery, fetchData bool, deadline searchutils.Deadline) (*Results, error) {
|
||||
qt = qt.NewChild("fetch matching series: %s, fetchData=%v", sq, fetchData)
|
||||
func ProcessSearchQuery(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline searchutils.Deadline) (*Results, error) {
|
||||
qt = qt.NewChild("fetch matching series: %s", sq)
|
||||
defer qt.Done()
|
||||
if deadline.Exceeded() {
|
||||
return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
|
||||
|
@ -1052,7 +1047,6 @@ func ProcessSearchQuery(qt *querytracer.Tracer, sq *storage.SearchQuery, fetchDa
|
|||
|
||||
var rss Results
|
||||
rss.tr = tr
|
||||
rss.fetchData = fetchData
|
||||
rss.deadline = deadline
|
||||
pts := make([]packedTimeseries, len(orderedMetricNames))
|
||||
for i, metricName := range orderedMetricNames {
|
||||
|
|
|
@ -73,7 +73,7 @@ func FederateHandler(startTime time.Time, w http.ResponseWriter, r *http.Request
|
|||
cp.start = cp.end - lookbackDelta
|
||||
}
|
||||
sq := storage.NewSearchQuery(cp.start, cp.end, cp.filterss, *maxFederateSeries)
|
||||
rss, err := netstorage.ProcessSearchQuery(nil, sq, true, cp.deadline)
|
||||
rss, err := netstorage.ProcessSearchQuery(nil, sq, cp.deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
||||
}
|
||||
|
@ -134,7 +134,7 @@ func ExportCSVHandler(startTime time.Time, w http.ResponseWriter, r *http.Reques
|
|||
}
|
||||
doneCh := make(chan error, 1)
|
||||
if !reduceMemUsage {
|
||||
rss, err := netstorage.ProcessSearchQuery(nil, sq, true, cp.deadline)
|
||||
rss, err := netstorage.ProcessSearchQuery(nil, sq, cp.deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
||||
}
|
||||
|
@ -336,7 +336,7 @@ func exportHandler(qt *querytracer.Tracer, w http.ResponseWriter, cp *commonPara
|
|||
resultsCh := make(chan *quicktemplate.ByteBuffer, cgroup.AvailableCPUs())
|
||||
doneCh := make(chan error, 1)
|
||||
if !reduceMemUsage {
|
||||
rss, err := netstorage.ProcessSearchQuery(qt, sq, true, cp.deadline)
|
||||
rss, err := netstorage.ProcessSearchQuery(qt, sq, cp.deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
||||
}
|
||||
|
@ -610,66 +610,19 @@ func SeriesHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseW
|
|||
cp.start = cp.end - defaultStep
|
||||
}
|
||||
sq := storage.NewSearchQuery(cp.start, cp.end, cp.filterss, *maxSeriesLimit)
|
||||
qtDone := func() {
|
||||
qt.Donef("start=%d, end=%d", cp.start, cp.end)
|
||||
}
|
||||
if cp.end-cp.start > 24*3600*1000 {
|
||||
// It is cheaper to call SearchMetricNames on time ranges exceeding a day.
|
||||
mns, err := netstorage.SearchMetricNames(qt, sq, cp.deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot fetch time series for %q: %w", sq, err)
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
bw := bufferedwriter.Get(w)
|
||||
defer bufferedwriter.Put(bw)
|
||||
resultsCh := make(chan *quicktemplate.ByteBuffer)
|
||||
go func() {
|
||||
for i := range mns {
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
writemetricNameObject(bb, &mns[i])
|
||||
resultsCh <- bb
|
||||
}
|
||||
close(resultsCh)
|
||||
}()
|
||||
// WriteSeriesResponse must consume all the data from resultsCh.
|
||||
WriteSeriesResponse(bw, resultsCh, qt, qtDone)
|
||||
if err := bw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
seriesDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
rss, err := netstorage.ProcessSearchQuery(qt, sq, false, cp.deadline)
|
||||
mns, err := netstorage.SearchMetricNames(qt, sq, cp.deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
||||
return fmt.Errorf("cannot fetch time series for %q: %w", sq, err)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
bw := bufferedwriter.Get(w)
|
||||
defer bufferedwriter.Put(bw)
|
||||
resultsCh := make(chan *quicktemplate.ByteBuffer)
|
||||
doneCh := make(chan error)
|
||||
go func() {
|
||||
err := rss.RunParallel(qt, func(rs *netstorage.Result, workerID uint) error {
|
||||
if err := bw.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
writemetricNameObject(bb, &rs.MetricName)
|
||||
resultsCh <- bb
|
||||
return nil
|
||||
})
|
||||
close(resultsCh)
|
||||
doneCh <- err
|
||||
}()
|
||||
// WriteSeriesResponse must consume all the data from resultsCh.
|
||||
WriteSeriesResponse(bw, resultsCh, qt, qtDone)
|
||||
if err := bw.Flush(); err != nil {
|
||||
return fmt.Errorf("cannot flush series response to remote client: %w", err)
|
||||
qtDone := func() {
|
||||
qt.Donef("start=%d, end=%d", cp.start, cp.end)
|
||||
}
|
||||
err = <-doneCh
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot send series response to remote client: %w", err)
|
||||
WriteSeriesResponse(bw, mns, qt, qtDone)
|
||||
if err := bw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,34 +1,22 @@
|
|||
{% import (
|
||||
"github.com/valyala/quicktemplate"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||
) %}
|
||||
|
||||
{% stripspace %}
|
||||
SeriesResponse generates response for /api/v1/series.
|
||||
See https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers
|
||||
{% func SeriesResponse(resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer, qtDone func()) %}
|
||||
{% func SeriesResponse(mns []storage.MetricName, qt *querytracer.Tracer, qtDone func()) %}
|
||||
{
|
||||
{% code seriesCount := 0 %}
|
||||
"status":"success",
|
||||
"data":[
|
||||
{% code bb, ok := <-resultsCh %}
|
||||
{% if ok %}
|
||||
{%z= bb.B %}
|
||||
{% code
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
seriesCount++
|
||||
%}
|
||||
{% for bb := range resultsCh %}
|
||||
,{%z= bb.B %}
|
||||
{% code
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
seriesCount++
|
||||
%}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% for i := range mns %}
|
||||
{%= metricNameObject(&mns[i]) %}
|
||||
{% if i+1 < len(mns) %},{% endif %}
|
||||
{% endfor %}
|
||||
]
|
||||
{% code
|
||||
qt.Printf("generate response: series=%d", seriesCount)
|
||||
qt.Printf("generate response: series=%d", len(mns))
|
||||
qtDone()
|
||||
%}
|
||||
{%= dumpQueryTrace(qt) %}
|
||||
|
|
|
@ -7,7 +7,7 @@ package prometheus
|
|||
//line app/vmselect/prometheus/series_response.qtpl:1
|
||||
import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||
"github.com/valyala/quicktemplate"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
)
|
||||
|
||||
// SeriesResponse generates response for /api/v1/series.See https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers
|
||||
|
@ -26,74 +26,56 @@ var (
|
|||
)
|
||||
|
||||
//line app/vmselect/prometheus/series_response.qtpl:9
|
||||
func StreamSeriesResponse(qw422016 *qt422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer, qtDone func()) {
|
||||
func StreamSeriesResponse(qw422016 *qt422016.Writer, mns []storage.MetricName, qt *querytracer.Tracer, qtDone func()) {
|
||||
//line app/vmselect/prometheus/series_response.qtpl:9
|
||||
qw422016.N().S(`{`)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:11
|
||||
seriesCount := 0
|
||||
|
||||
//line app/vmselect/prometheus/series_response.qtpl:11
|
||||
qw422016.N().S(`"status":"success","data":[`)
|
||||
qw422016.N().S(`{"status":"success","data":[`)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:13
|
||||
for i := range mns {
|
||||
//line app/vmselect/prometheus/series_response.qtpl:14
|
||||
bb, ok := <-resultsCh
|
||||
|
||||
streammetricNameObject(qw422016, &mns[i])
|
||||
//line app/vmselect/prometheus/series_response.qtpl:15
|
||||
if i+1 < len(mns) {
|
||||
//line app/vmselect/prometheus/series_response.qtpl:15
|
||||
if ok {
|
||||
//line app/vmselect/prometheus/series_response.qtpl:16
|
||||
qw422016.N().Z(bb.B)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:18
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
seriesCount++
|
||||
|
||||
//line app/vmselect/prometheus/series_response.qtpl:21
|
||||
for bb := range resultsCh {
|
||||
//line app/vmselect/prometheus/series_response.qtpl:21
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:22
|
||||
qw422016.N().Z(bb.B)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:24
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
seriesCount++
|
||||
|
||||
//line app/vmselect/prometheus/series_response.qtpl:27
|
||||
//line app/vmselect/prometheus/series_response.qtpl:15
|
||||
}
|
||||
//line app/vmselect/prometheus/series_response.qtpl:28
|
||||
//line app/vmselect/prometheus/series_response.qtpl:16
|
||||
}
|
||||
//line app/vmselect/prometheus/series_response.qtpl:28
|
||||
//line app/vmselect/prometheus/series_response.qtpl:16
|
||||
qw422016.N().S(`]`)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:31
|
||||
qt.Printf("generate response: series=%d", seriesCount)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:19
|
||||
qt.Printf("generate response: series=%d", len(mns))
|
||||
qtDone()
|
||||
|
||||
//line app/vmselect/prometheus/series_response.qtpl:34
|
||||
//line app/vmselect/prometheus/series_response.qtpl:22
|
||||
streamdumpQueryTrace(qw422016, qt)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:34
|
||||
//line app/vmselect/prometheus/series_response.qtpl:22
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:36
|
||||
//line app/vmselect/prometheus/series_response.qtpl:24
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/series_response.qtpl:36
|
||||
func WriteSeriesResponse(qq422016 qtio422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer, qtDone func()) {
|
||||
//line app/vmselect/prometheus/series_response.qtpl:36
|
||||
//line app/vmselect/prometheus/series_response.qtpl:24
|
||||
func WriteSeriesResponse(qq422016 qtio422016.Writer, mns []storage.MetricName, qt *querytracer.Tracer, qtDone func()) {
|
||||
//line app/vmselect/prometheus/series_response.qtpl:24
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:36
|
||||
StreamSeriesResponse(qw422016, resultsCh, qt, qtDone)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:36
|
||||
//line app/vmselect/prometheus/series_response.qtpl:24
|
||||
StreamSeriesResponse(qw422016, mns, qt, qtDone)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:24
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:36
|
||||
//line app/vmselect/prometheus/series_response.qtpl:24
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/series_response.qtpl:36
|
||||
func SeriesResponse(resultsCh <-chan *quicktemplate.ByteBuffer, qt *querytracer.Tracer, qtDone func()) string {
|
||||
//line app/vmselect/prometheus/series_response.qtpl:36
|
||||
//line app/vmselect/prometheus/series_response.qtpl:24
|
||||
func SeriesResponse(mns []storage.MetricName, qt *querytracer.Tracer, qtDone func()) string {
|
||||
//line app/vmselect/prometheus/series_response.qtpl:24
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/series_response.qtpl:36
|
||||
WriteSeriesResponse(qb422016, resultsCh, qt, qtDone)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:36
|
||||
//line app/vmselect/prometheus/series_response.qtpl:24
|
||||
WriteSeriesResponse(qb422016, mns, qt, qtDone)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:24
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:36
|
||||
//line app/vmselect/prometheus/series_response.qtpl:24
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:36
|
||||
//line app/vmselect/prometheus/series_response.qtpl:24
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/series_response.qtpl:36
|
||||
//line app/vmselect/prometheus/series_response.qtpl:24
|
||||
}
|
||||
|
|
|
@ -903,7 +903,7 @@ func evalRollupFuncWithMetricExpr(qt *querytracer.Tracer, ec *EvalConfig, funcNa
|
|||
minTimestamp -= ec.Step
|
||||
}
|
||||
sq := storage.NewSearchQuery(minTimestamp, ec.End, tfss, ec.MaxSeries)
|
||||
rss, err := netstorage.ProcessSearchQuery(qt, sq, true, ec.Deadline)
|
||||
rss, err := netstorage.ProcessSearchQuery(qt, sq, ec.Deadline)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -1251,7 +1251,7 @@ func testPartSearchSerial(p *part, tsids []TSID, tr TimeRange, expectedRawBlocks
|
|||
var bs []Block
|
||||
for ps.NextBlock() {
|
||||
var b Block
|
||||
ps.BlockRef.MustReadBlock(&b, true)
|
||||
ps.BlockRef.MustReadBlock(&b)
|
||||
bs = append(bs, b)
|
||||
}
|
||||
if err := ps.Error(); err != nil {
|
||||
|
|
|
@ -245,7 +245,7 @@ func testPartitionSearchSerial(pt *partition, tsids []TSID, tr TimeRange, rbsExp
|
|||
pts.Init(pt, tsids, tr)
|
||||
for pts.NextBlock() {
|
||||
var b Block
|
||||
pts.BlockRef.MustReadBlock(&b, true)
|
||||
pts.BlockRef.MustReadBlock(&b)
|
||||
bs = append(bs, b)
|
||||
}
|
||||
if err := pts.Error(); err != nil {
|
||||
|
|
|
@ -68,14 +68,9 @@ type PartRef struct {
|
|||
}
|
||||
|
||||
// MustReadBlock reads block from br to dst.
|
||||
//
|
||||
// if fetchData is false, then only block header is read, otherwise all the data is read.
|
||||
func (br *BlockRef) MustReadBlock(dst *Block, fetchData bool) {
|
||||
func (br *BlockRef) MustReadBlock(dst *Block) {
|
||||
dst.Reset()
|
||||
dst.bh = br.bh
|
||||
if !fetchData {
|
||||
return
|
||||
}
|
||||
|
||||
dst.timestampsData = bytesutil.ResizeNoCopyMayOverallocate(dst.timestampsData, int(br.bh.TimestampsBlockSize))
|
||||
br.p.timestampsFile.MustReadAt(dst.timestampsData, int64(br.bh.TimestampsBlockOffset))
|
||||
|
|
|
@ -206,7 +206,7 @@ func testSearchInternal(st *Storage, tr TimeRange, mrs []MetricRow, accountsCoun
|
|||
var mbs []metricBlock
|
||||
for s.NextMetricBlock() {
|
||||
var b Block
|
||||
s.MetricBlockRef.BlockRef.MustReadBlock(&b, true)
|
||||
s.MetricBlockRef.BlockRef.MustReadBlock(&b)
|
||||
|
||||
var mb metricBlock
|
||||
mb.MetricName = append(mb.MetricName, s.MetricBlockRef.MetricName...)
|
||||
|
|
|
@ -255,7 +255,7 @@ func testTableSearchSerial(tb *table, tsids []TSID, tr TimeRange, rbsExpected []
|
|||
ts.Init(tb, tsids, tr)
|
||||
for ts.NextBlock() {
|
||||
var b Block
|
||||
ts.BlockRef.MustReadBlock(&b, true)
|
||||
ts.BlockRef.MustReadBlock(&b)
|
||||
bs = append(bs, b)
|
||||
}
|
||||
if err := ts.Error(); err != nil {
|
||||
|
|
|
@ -27,11 +27,7 @@ func BenchmarkTableSearch(b *testing.B) {
|
|||
b.Run(fmt.Sprintf("tsidsCount_%d", tsidsCount), func(b *testing.B) {
|
||||
for _, tsidsSearch := range []int{1, 1e1, 1e2, 1e3, 1e4} {
|
||||
b.Run(fmt.Sprintf("tsidsSearch_%d", tsidsSearch), func(b *testing.B) {
|
||||
for _, fetchData := range []bool{true, false} {
|
||||
b.Run(fmt.Sprintf("fetchData_%v", fetchData), func(b *testing.B) {
|
||||
benchmarkTableSearch(b, rowsCount, tsidsCount, tsidsSearch, fetchData)
|
||||
})
|
||||
}
|
||||
benchmarkTableSearch(b, rowsCount, tsidsCount, tsidsSearch)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
@ -110,7 +106,7 @@ func createBenchTable(b *testing.B, path string, startTimestamp int64, rowsPerIn
|
|||
tb.MustClose()
|
||||
}
|
||||
|
||||
func benchmarkTableSearch(b *testing.B, rowsCount, tsidsCount, tsidsSearch int, fetchData bool) {
|
||||
func benchmarkTableSearch(b *testing.B, rowsCount, tsidsCount, tsidsSearch int) {
|
||||
startTimestamp := timestampFromTime(time.Now()) - 365*24*3600*1000
|
||||
rowsPerInsert := getMaxRawRowsPerShard()
|
||||
|
||||
|
@ -137,7 +133,7 @@ func benchmarkTableSearch(b *testing.B, rowsCount, tsidsCount, tsidsSearch int,
|
|||
}
|
||||
ts.Init(tb, tsids, tr)
|
||||
for ts.NextBlock() {
|
||||
ts.BlockRef.MustReadBlock(&tmpBlock, fetchData)
|
||||
ts.BlockRef.MustReadBlock(&tmpBlock)
|
||||
}
|
||||
ts.MustClose()
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue