mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2025-01-10 15:14:09 +00:00
app/vmselect: optimize /api/v1/series
by skipping storage data
Fetch and process only time series metainfo.
This commit is contained in:
parent
7f5afae1e3
commit
880b1d80b1
14 changed files with 120 additions and 44 deletions
|
@ -45,9 +45,10 @@ func (r *Result) reset() {
|
||||||
|
|
||||||
// Results holds results returned from ProcessSearchQuery.
|
// Results holds results returned from ProcessSearchQuery.
|
||||||
type Results struct {
|
type Results struct {
|
||||||
at *auth.Token
|
at *auth.Token
|
||||||
tr storage.TimeRange
|
tr storage.TimeRange
|
||||||
deadline Deadline
|
fetchData bool
|
||||||
|
deadline Deadline
|
||||||
|
|
||||||
tbf *tmpBlocksFile
|
tbf *tmpBlocksFile
|
||||||
|
|
||||||
|
@ -100,10 +101,10 @@ func (rss *Results) RunParallel(f func(rs *Result, workerID uint)) error {
|
||||||
err = fmt.Errorf("timeout exceeded during query execution: %s", rss.deadline.Timeout)
|
err = fmt.Errorf("timeout exceeded during query execution: %s", rss.deadline.Timeout)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err = pts.Unpack(rss.tbf, rs, rss.tr, rss.at, maxWorkersCount); err != nil {
|
if err = pts.Unpack(rss.tbf, rs, rss.tr, rss.fetchData, rss.at, maxWorkersCount); err != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if len(rs.Timestamps) == 0 {
|
if len(rs.Timestamps) == 0 && rss.fetchData {
|
||||||
// Skip empty blocks.
|
// Skip empty blocks.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -146,7 +147,7 @@ type packedTimeseries struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unpack unpacks pts to dst.
|
// Unpack unpacks pts to dst.
|
||||||
func (pts *packedTimeseries) Unpack(tbf *tmpBlocksFile, dst *Result, tr storage.TimeRange, at *auth.Token, maxWorkersCount int) error {
|
func (pts *packedTimeseries) Unpack(tbf *tmpBlocksFile, dst *Result, tr storage.TimeRange, fetchData bool, at *auth.Token, maxWorkersCount int) error {
|
||||||
dst.reset()
|
dst.reset()
|
||||||
|
|
||||||
if err := dst.MetricName.Unmarshal(bytesutil.ToUnsafeBytes(pts.metricName)); err != nil {
|
if err := dst.MetricName.Unmarshal(bytesutil.ToUnsafeBytes(pts.metricName)); err != nil {
|
||||||
|
@ -173,7 +174,7 @@ func (pts *packedTimeseries) Unpack(tbf *tmpBlocksFile, dst *Result, tr storage.
|
||||||
var err error
|
var err error
|
||||||
for addr := range workCh {
|
for addr := range workCh {
|
||||||
sb := getSortBlock()
|
sb := getSortBlock()
|
||||||
if err = sb.unpackFrom(tbf, addr, tr, at); err != nil {
|
if err = sb.unpackFrom(tbf, addr, tr, fetchData, at); err != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -292,10 +293,12 @@ func (sb *sortBlock) reset() {
|
||||||
sb.NextIdx = 0
|
sb.NextIdx = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sb *sortBlock) unpackFrom(tbf *tmpBlocksFile, addr tmpBlockAddr, tr storage.TimeRange, at *auth.Token) error {
|
func (sb *sortBlock) unpackFrom(tbf *tmpBlocksFile, addr tmpBlockAddr, tr storage.TimeRange, fetchData bool, at *auth.Token) error {
|
||||||
tbf.MustReadBlockAt(&sb.b, addr)
|
tbf.MustReadBlockAt(&sb.b, addr)
|
||||||
if err := sb.b.UnmarshalData(); err != nil {
|
if fetchData {
|
||||||
return fmt.Errorf("cannot unmarshal block: %s", err)
|
if err := sb.b.UnmarshalData(); err != nil {
|
||||||
|
return fmt.Errorf("cannot unmarshal block: %s", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
timestamps := sb.b.Timestamps()
|
timestamps := sb.b.Timestamps()
|
||||||
|
|
||||||
|
@ -692,7 +695,7 @@ func GetSeriesCount(at *auth.Token, deadline Deadline) (uint64, bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProcessSearchQuery performs sq on storage nodes until the given deadline.
|
// ProcessSearchQuery performs sq on storage nodes until the given deadline.
|
||||||
func ProcessSearchQuery(at *auth.Token, sq *storage.SearchQuery, deadline Deadline) (*Results, bool, error) {
|
func ProcessSearchQuery(at *auth.Token, sq *storage.SearchQuery, fetchData bool, deadline Deadline) (*Results, bool, error) {
|
||||||
requestData := sq.Marshal(nil)
|
requestData := sq.Marshal(nil)
|
||||||
|
|
||||||
// Send the query to all the storage nodes in parallel.
|
// Send the query to all the storage nodes in parallel.
|
||||||
|
@ -708,7 +711,7 @@ func ProcessSearchQuery(at *auth.Token, sq *storage.SearchQuery, deadline Deadli
|
||||||
for _, sn := range storageNodes {
|
for _, sn := range storageNodes {
|
||||||
go func(sn *storageNode) {
|
go func(sn *storageNode) {
|
||||||
sn.searchRequests.Inc()
|
sn.searchRequests.Inc()
|
||||||
results, err := sn.processSearchQuery(requestData, tr, deadline)
|
results, err := sn.processSearchQuery(requestData, tr, fetchData, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
sn.searchRequestErrors.Inc()
|
sn.searchRequestErrors.Inc()
|
||||||
err = fmt.Errorf("cannot perform search on vmstorage %s: %s", sn.connPool.Addr(), err)
|
err = fmt.Errorf("cannot perform search on vmstorage %s: %s", sn.connPool.Addr(), err)
|
||||||
|
@ -769,6 +772,7 @@ func ProcessSearchQuery(at *auth.Token, sq *storage.SearchQuery, deadline Deadli
|
||||||
rss.packedTimeseries = make([]packedTimeseries, len(m))
|
rss.packedTimeseries = make([]packedTimeseries, len(m))
|
||||||
rss.at = at
|
rss.at = at
|
||||||
rss.tr = tr
|
rss.tr = tr
|
||||||
|
rss.fetchData = fetchData
|
||||||
rss.deadline = deadline
|
rss.deadline = deadline
|
||||||
rss.tbf = tbf
|
rss.tbf = tbf
|
||||||
i := 0
|
i := 0
|
||||||
|
@ -931,20 +935,20 @@ func (sn *storageNode) getSeriesCount(accountID, projectID uint32, deadline Dead
|
||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sn *storageNode) processSearchQuery(requestData []byte, tr storage.TimeRange, deadline Deadline) ([]*storage.MetricBlock, error) {
|
func (sn *storageNode) processSearchQuery(requestData []byte, tr storage.TimeRange, fetchData bool, deadline Deadline) ([]*storage.MetricBlock, error) {
|
||||||
var results []*storage.MetricBlock
|
var results []*storage.MetricBlock
|
||||||
f := func(bc *handshake.BufferedConn) error {
|
f := func(bc *handshake.BufferedConn) error {
|
||||||
rs, err := sn.processSearchQueryOnConn(bc, requestData, tr)
|
rs, err := sn.processSearchQueryOnConn(bc, requestData, tr, fetchData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
results = rs
|
results = rs
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if err := sn.execOnConn("search_v2", f, deadline); err != nil {
|
if err := sn.execOnConn("search_v3", f, deadline); err != nil {
|
||||||
// Try again before giving up.
|
// Try again before giving up.
|
||||||
results = nil
|
results = nil
|
||||||
if err = sn.execOnConn("search_v2", f, deadline); err != nil {
|
if err = sn.execOnConn("search_v3", f, deadline); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1197,11 +1201,14 @@ const maxMetricBlockSize = 1024 * 1024
|
||||||
// from vmstorage.
|
// from vmstorage.
|
||||||
const maxErrorMessageSize = 64 * 1024
|
const maxErrorMessageSize = 64 * 1024
|
||||||
|
|
||||||
func (sn *storageNode) processSearchQueryOnConn(bc *handshake.BufferedConn, requestData []byte, tr storage.TimeRange) ([]*storage.MetricBlock, error) {
|
func (sn *storageNode) processSearchQueryOnConn(bc *handshake.BufferedConn, requestData []byte, tr storage.TimeRange, fetchData bool) ([]*storage.MetricBlock, error) {
|
||||||
// Send the request to sn.
|
// Send the request to sn.
|
||||||
if err := writeBytes(bc, requestData); err != nil {
|
if err := writeBytes(bc, requestData); err != nil {
|
||||||
return nil, fmt.Errorf("cannot write requestData: %s", err)
|
return nil, fmt.Errorf("cannot write requestData: %s", err)
|
||||||
}
|
}
|
||||||
|
if err := writeBool(bc, fetchData); err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot write fetchData=%v: %s", fetchData, err)
|
||||||
|
}
|
||||||
if err := bc.Flush(); err != nil {
|
if err := bc.Flush(); err != nil {
|
||||||
return nil, fmt.Errorf("cannot flush requestData to conn: %s", err)
|
return nil, fmt.Errorf("cannot flush requestData to conn: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -1265,6 +1272,17 @@ func writeUint32(bc *handshake.BufferedConn, n uint32) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func writeBool(bc *handshake.BufferedConn, b bool) error {
|
||||||
|
var buf [1]byte
|
||||||
|
if b {
|
||||||
|
buf[0] = 1
|
||||||
|
}
|
||||||
|
if _, err := bc.Write(buf[:]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func readBytes(buf []byte, bc *handshake.BufferedConn, maxDataSize int) ([]byte, error) {
|
func readBytes(buf []byte, bc *handshake.BufferedConn, maxDataSize int) ([]byte, error) {
|
||||||
buf = bytesutil.Resize(buf, 8)
|
buf = bytesutil.Resize(buf, 8)
|
||||||
if _, err := io.ReadFull(bc, buf); err != nil {
|
if _, err := io.ReadFull(bc, buf); err != nil {
|
||||||
|
|
|
@ -72,7 +72,7 @@ func FederateHandler(at *auth.Token, w http.ResponseWriter, r *http.Request) err
|
||||||
MaxTimestamp: end,
|
MaxTimestamp: end,
|
||||||
TagFilterss: tagFilterss,
|
TagFilterss: tagFilterss,
|
||||||
}
|
}
|
||||||
rss, isPartial, err := netstorage.ProcessSearchQuery(at, sq, deadline)
|
rss, isPartial, err := netstorage.ProcessSearchQuery(at, sq, true, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot fetch data for %q: %s", sq, err)
|
return fmt.Errorf("cannot fetch data for %q: %s", sq, err)
|
||||||
}
|
}
|
||||||
|
@ -169,7 +169,7 @@ func exportHandler(at *auth.Token, w http.ResponseWriter, matches []string, star
|
||||||
MaxTimestamp: end,
|
MaxTimestamp: end,
|
||||||
TagFilterss: tagFilterss,
|
TagFilterss: tagFilterss,
|
||||||
}
|
}
|
||||||
rss, isPartial, err := netstorage.ProcessSearchQuery(at, sq, deadline)
|
rss, isPartial, err := netstorage.ProcessSearchQuery(at, sq, true, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot fetch data for %q: %s", sq, err)
|
return fmt.Errorf("cannot fetch data for %q: %s", sq, err)
|
||||||
}
|
}
|
||||||
|
@ -405,7 +405,7 @@ func SeriesHandler(at *auth.Token, w http.ResponseWriter, r *http.Request) error
|
||||||
MaxTimestamp: end,
|
MaxTimestamp: end,
|
||||||
TagFilterss: tagFilterss,
|
TagFilterss: tagFilterss,
|
||||||
}
|
}
|
||||||
rss, isPartial, err := netstorage.ProcessSearchQuery(at, sq, deadline)
|
rss, isPartial, err := netstorage.ProcessSearchQuery(at, sq, false, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot fetch data for %q: %s", sq, err)
|
return fmt.Errorf("cannot fetch data for %q: %s", sq, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -578,8 +578,7 @@ func evalRollupFuncWithMetricExpr(ec *EvalConfig, name string, rf rollupFunc, me
|
||||||
MaxTimestamp: ec.End + ec.Step,
|
MaxTimestamp: ec.End + ec.Step,
|
||||||
TagFilterss: [][]storage.TagFilter{me.TagFilters},
|
TagFilterss: [][]storage.TagFilter{me.TagFilters},
|
||||||
}
|
}
|
||||||
|
rss, isPartial, err := netstorage.ProcessSearchQuery(ec.AuthToken, sq, true, ec.Deadline)
|
||||||
rss, isPartial, err := netstorage.ProcessSearchQuery(ec.AuthToken, sq, ec.Deadline)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -392,6 +392,18 @@ func (ctx *vmselectRequestCtx) readDataBufBytes(maxDataSize int) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ctx *vmselectRequestCtx) readBool() (bool, error) {
|
||||||
|
ctx.dataBuf = bytesutil.Resize(ctx.dataBuf, 1)
|
||||||
|
if _, err := io.ReadFull(ctx.bc, ctx.dataBuf); err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return false, fmt.Errorf("cannot read bool: %s", err)
|
||||||
|
}
|
||||||
|
v := ctx.dataBuf[0] != 0
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (ctx *vmselectRequestCtx) writeDataBufBytes() error {
|
func (ctx *vmselectRequestCtx) writeDataBufBytes() error {
|
||||||
if err := ctx.writeUint64(uint64(len(ctx.dataBuf))); err != nil {
|
if err := ctx.writeUint64(uint64(len(ctx.dataBuf))); err != nil {
|
||||||
return fmt.Errorf("cannot write data size: %s", err)
|
return fmt.Errorf("cannot write data size: %s", err)
|
||||||
|
@ -443,7 +455,7 @@ func (s *Server) processVMSelectRequest(ctx *vmselectRequestCtx) error {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
switch string(ctx.dataBuf) {
|
switch string(ctx.dataBuf) {
|
||||||
case "search_v2":
|
case "search_v3":
|
||||||
return s.processVMSelectSearchQuery(ctx)
|
return s.processVMSelectSearchQuery(ctx)
|
||||||
case "labelValues":
|
case "labelValues":
|
||||||
return s.processVMSelectLabelValues(ctx)
|
return s.processVMSelectLabelValues(ctx)
|
||||||
|
@ -714,6 +726,10 @@ func (s *Server) processVMSelectSearchQuery(ctx *vmselectRequestCtx) error {
|
||||||
if len(tail) > 0 {
|
if len(tail) > 0 {
|
||||||
return fmt.Errorf("unexpected non-zero tail left after unmarshaling SearchQuery: (len=%d) %q", len(tail), tail)
|
return fmt.Errorf("unexpected non-zero tail left after unmarshaling SearchQuery: (len=%d) %q", len(tail), tail)
|
||||||
}
|
}
|
||||||
|
fetchData, err := ctx.readBool()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot read `fetchData` bool: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Setup search.
|
// Setup search.
|
||||||
if err := ctx.setupTfss(); err != nil {
|
if err := ctx.setupTfss(); err != nil {
|
||||||
|
@ -728,7 +744,7 @@ func (s *Server) processVMSelectSearchQuery(ctx *vmselectRequestCtx) error {
|
||||||
MinTimestamp: ctx.sq.MinTimestamp,
|
MinTimestamp: ctx.sq.MinTimestamp,
|
||||||
MaxTimestamp: ctx.sq.MaxTimestamp,
|
MaxTimestamp: ctx.sq.MaxTimestamp,
|
||||||
}
|
}
|
||||||
ctx.sr.Init(s.storage, ctx.tfss, tr, *maxMetricsPerSearch)
|
ctx.sr.Init(s.storage, ctx.tfss, tr, fetchData, *maxMetricsPerSearch)
|
||||||
defer ctx.sr.MustClose()
|
defer ctx.sr.MustClose()
|
||||||
if err := ctx.sr.Error(); err != nil {
|
if err := ctx.sr.Error(); err != nil {
|
||||||
// Send the error message to vmselect.
|
// Send the error message to vmselect.
|
||||||
|
|
|
@ -28,6 +28,9 @@ type partSearch struct {
|
||||||
// tr is a time range to search.
|
// tr is a time range to search.
|
||||||
tr TimeRange
|
tr TimeRange
|
||||||
|
|
||||||
|
// Skip populating timestampsData and valuesData in Block if fetchData=false.
|
||||||
|
fetchData bool
|
||||||
|
|
||||||
metaindex []metaindexRow
|
metaindex []metaindexRow
|
||||||
|
|
||||||
ibCache *indexBlockCache
|
ibCache *indexBlockCache
|
||||||
|
@ -61,7 +64,7 @@ func (ps *partSearch) reset() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init initializes the ps with the given p, tsids and tr.
|
// Init initializes the ps with the given p, tsids and tr.
|
||||||
func (ps *partSearch) Init(p *part, tsids []TSID, tr TimeRange) {
|
func (ps *partSearch) Init(p *part, tsids []TSID, tr TimeRange, fetchData bool) {
|
||||||
ps.reset()
|
ps.reset()
|
||||||
ps.p = p
|
ps.p = p
|
||||||
|
|
||||||
|
@ -72,6 +75,7 @@ func (ps *partSearch) Init(p *part, tsids []TSID, tr TimeRange) {
|
||||||
ps.tsids = append(ps.tsids[:0], tsids...)
|
ps.tsids = append(ps.tsids[:0], tsids...)
|
||||||
}
|
}
|
||||||
ps.tr = tr
|
ps.tr = tr
|
||||||
|
ps.fetchData = fetchData
|
||||||
ps.metaindex = p.metaindex
|
ps.metaindex = p.metaindex
|
||||||
ps.ibCache = &p.ibCache
|
ps.ibCache = &p.ibCache
|
||||||
|
|
||||||
|
@ -281,11 +285,14 @@ func (ps *partSearch) searchBHS() bool {
|
||||||
|
|
||||||
func (ps *partSearch) readBlock(bh *blockHeader) {
|
func (ps *partSearch) readBlock(bh *blockHeader) {
|
||||||
ps.Block.Reset()
|
ps.Block.Reset()
|
||||||
|
ps.Block.bh = *bh
|
||||||
|
if !ps.fetchData {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
ps.Block.timestampsData = bytesutil.Resize(ps.Block.timestampsData[:0], int(bh.TimestampsBlockSize))
|
ps.Block.timestampsData = bytesutil.Resize(ps.Block.timestampsData[:0], int(bh.TimestampsBlockSize))
|
||||||
ps.p.timestampsFile.ReadAt(ps.Block.timestampsData, int64(bh.TimestampsBlockOffset))
|
ps.p.timestampsFile.ReadAt(ps.Block.timestampsData, int64(bh.TimestampsBlockOffset))
|
||||||
|
|
||||||
ps.Block.valuesData = bytesutil.Resize(ps.Block.valuesData[:0], int(bh.ValuesBlockSize))
|
ps.Block.valuesData = bytesutil.Resize(ps.Block.valuesData[:0], int(bh.ValuesBlockSize))
|
||||||
ps.p.valuesFile.ReadAt(ps.Block.valuesData, int64(bh.ValuesBlockOffset))
|
ps.p.valuesFile.ReadAt(ps.Block.valuesData, int64(bh.ValuesBlockOffset))
|
||||||
|
|
||||||
ps.Block.bh = *bh
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1247,7 +1247,7 @@ func testPartSearch(t *testing.T, p *part, tsids []TSID, tr TimeRange, expectedR
|
||||||
|
|
||||||
func testPartSearchSerial(p *part, tsids []TSID, tr TimeRange, expectedRawBlocks []rawBlock) error {
|
func testPartSearchSerial(p *part, tsids []TSID, tr TimeRange, expectedRawBlocks []rawBlock) error {
|
||||||
var ps partSearch
|
var ps partSearch
|
||||||
ps.Init(p, tsids, tr)
|
ps.Init(p, tsids, tr, true)
|
||||||
var bs []Block
|
var bs []Block
|
||||||
for ps.NextBlock() {
|
for ps.NextBlock() {
|
||||||
var b Block
|
var b Block
|
||||||
|
|
|
@ -56,7 +56,7 @@ func (pts *partitionSearch) reset() {
|
||||||
// Init initializes the search in the given partition for the given tsid and tr.
|
// Init initializes the search in the given partition for the given tsid and tr.
|
||||||
//
|
//
|
||||||
// MustClose must be called when partition search is done.
|
// MustClose must be called when partition search is done.
|
||||||
func (pts *partitionSearch) Init(pt *partition, tsids []TSID, tr TimeRange) {
|
func (pts *partitionSearch) Init(pt *partition, tsids []TSID, tr TimeRange, fetchData bool) {
|
||||||
if pts.needClosing {
|
if pts.needClosing {
|
||||||
logger.Panicf("BUG: missing partitionSearch.MustClose call before the next call to Init")
|
logger.Panicf("BUG: missing partitionSearch.MustClose call before the next call to Init")
|
||||||
}
|
}
|
||||||
|
@ -85,7 +85,7 @@ func (pts *partitionSearch) Init(pt *partition, tsids []TSID, tr TimeRange) {
|
||||||
}
|
}
|
||||||
pts.psPool = pts.psPool[:len(pts.pws)]
|
pts.psPool = pts.psPool[:len(pts.pws)]
|
||||||
for i, pw := range pts.pws {
|
for i, pw := range pts.pws {
|
||||||
pts.psPool[i].Init(pw.p, tsids, tr)
|
pts.psPool[i].Init(pw.p, tsids, tr, fetchData)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize the psHeap.
|
// Initialize the psHeap.
|
||||||
|
|
|
@ -238,7 +238,7 @@ func testPartitionSearchSerial(pt *partition, tsids []TSID, tr TimeRange, rbsExp
|
||||||
|
|
||||||
bs := []Block{}
|
bs := []Block{}
|
||||||
var pts partitionSearch
|
var pts partitionSearch
|
||||||
pts.Init(pt, tsids, tr)
|
pts.Init(pt, tsids, tr, true)
|
||||||
for pts.NextBlock() {
|
for pts.NextBlock() {
|
||||||
var b Block
|
var b Block
|
||||||
b.CopyFrom(pts.Block)
|
b.CopyFrom(pts.Block)
|
||||||
|
@ -263,7 +263,7 @@ func testPartitionSearchSerial(pt *partition, tsids []TSID, tr TimeRange, rbsExp
|
||||||
}
|
}
|
||||||
|
|
||||||
// verify that empty tsids returns empty result
|
// verify that empty tsids returns empty result
|
||||||
pts.Init(pt, []TSID{}, tr)
|
pts.Init(pt, []TSID{}, tr, true)
|
||||||
if pts.NextBlock() {
|
if pts.NextBlock() {
|
||||||
return fmt.Errorf("unexpected block got for an empty tsids list: %+v", pts.Block)
|
return fmt.Errorf("unexpected block got for an empty tsids list: %+v", pts.Block)
|
||||||
}
|
}
|
||||||
|
@ -271,6 +271,16 @@ func testPartitionSearchSerial(pt *partition, tsids []TSID, tr TimeRange, rbsExp
|
||||||
return fmt.Errorf("unexpected error on empty tsids list: %s", err)
|
return fmt.Errorf("unexpected error on empty tsids list: %s", err)
|
||||||
}
|
}
|
||||||
pts.MustClose()
|
pts.MustClose()
|
||||||
|
|
||||||
|
pts.Init(pt, []TSID{}, tr, false)
|
||||||
|
if pts.NextBlock() {
|
||||||
|
return fmt.Errorf("unexpected block got for an empty tsids list: %+v", pts.Block)
|
||||||
|
}
|
||||||
|
if err := pts.Error(); err != nil {
|
||||||
|
return fmt.Errorf("unexpected error on empty tsids list: %s", err)
|
||||||
|
}
|
||||||
|
pts.MustClose()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -110,7 +110,7 @@ func (s *Search) reset() {
|
||||||
// Init initializes s from the given storage, tfss and tr.
|
// Init initializes s from the given storage, tfss and tr.
|
||||||
//
|
//
|
||||||
// MustClose must be called when the search is done.
|
// MustClose must be called when the search is done.
|
||||||
func (s *Search) Init(storage *Storage, tfss []*TagFilters, tr TimeRange, maxMetrics int) {
|
func (s *Search) Init(storage *Storage, tfss []*TagFilters, tr TimeRange, fetchData bool, maxMetrics int) {
|
||||||
if s.needClosing {
|
if s.needClosing {
|
||||||
logger.Panicf("BUG: missing MustClose call before the next call to Init")
|
logger.Panicf("BUG: missing MustClose call before the next call to Init")
|
||||||
}
|
}
|
||||||
|
@ -123,7 +123,7 @@ func (s *Search) Init(storage *Storage, tfss []*TagFilters, tr TimeRange, maxMet
|
||||||
// It is ok to call Init on error from storage.searchTSIDs.
|
// It is ok to call Init on error from storage.searchTSIDs.
|
||||||
// Init must be called before returning because it will fail
|
// Init must be called before returning because it will fail
|
||||||
// on Seach.MustClose otherwise.
|
// on Seach.MustClose otherwise.
|
||||||
s.ts.Init(storage.tb, tsids, tr)
|
s.ts.Init(storage.tb, tsids, tr, fetchData)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.err = err
|
s.err = err
|
||||||
|
|
|
@ -200,7 +200,7 @@ func testSearch(st *Storage, tr TimeRange, mrs []MetricRow, accountsCount int) e
|
||||||
}
|
}
|
||||||
|
|
||||||
// Search
|
// Search
|
||||||
s.Init(st, []*TagFilters{tfs}, tr, 1e5)
|
s.Init(st, []*TagFilters{tfs}, tr, true, 1e5)
|
||||||
var mbs []MetricBlock
|
var mbs []MetricBlock
|
||||||
for s.NextMetricBlock() {
|
for s.NextMetricBlock() {
|
||||||
var b Block
|
var b Block
|
||||||
|
|
|
@ -506,12 +506,24 @@ func testStorageDeleteMetrics(s *Storage, workerNum int) error {
|
||||||
MaxTimestamp: 2e10,
|
MaxTimestamp: 2e10,
|
||||||
}
|
}
|
||||||
metricBlocksCount := func(tfs *TagFilters) int {
|
metricBlocksCount := func(tfs *TagFilters) int {
|
||||||
|
// Verify the number of blocks with fetchData=true
|
||||||
n := 0
|
n := 0
|
||||||
sr.Init(s, []*TagFilters{tfs}, tr, 1e5)
|
sr.Init(s, []*TagFilters{tfs}, tr, true, 1e5)
|
||||||
for sr.NextMetricBlock() {
|
for sr.NextMetricBlock() {
|
||||||
n++
|
n++
|
||||||
}
|
}
|
||||||
sr.MustClose()
|
sr.MustClose()
|
||||||
|
|
||||||
|
// Make sure the number of blocks with fetchData=false is the same.
|
||||||
|
m := 0
|
||||||
|
sr.Init(s, []*TagFilters{tfs}, tr, false, 1e5)
|
||||||
|
for sr.NextMetricBlock() {
|
||||||
|
m++
|
||||||
|
}
|
||||||
|
sr.MustClose()
|
||||||
|
if n != m {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
for i := 0; i < metricsCount; i++ {
|
for i := 0; i < metricsCount; i++ {
|
||||||
|
|
|
@ -55,7 +55,7 @@ func (ts *tableSearch) reset() {
|
||||||
// Init initializes the ts.
|
// Init initializes the ts.
|
||||||
//
|
//
|
||||||
// MustClose must be called then the tableSearch is done.
|
// MustClose must be called then the tableSearch is done.
|
||||||
func (ts *tableSearch) Init(tb *table, tsids []TSID, tr TimeRange) {
|
func (ts *tableSearch) Init(tb *table, tsids []TSID, tr TimeRange, fetchData bool) {
|
||||||
if ts.needClosing {
|
if ts.needClosing {
|
||||||
logger.Panicf("BUG: missing MustClose call before the next call to Init")
|
logger.Panicf("BUG: missing MustClose call before the next call to Init")
|
||||||
}
|
}
|
||||||
|
@ -86,7 +86,7 @@ func (ts *tableSearch) Init(tb *table, tsids []TSID, tr TimeRange) {
|
||||||
}
|
}
|
||||||
ts.ptsPool = ts.ptsPool[:len(ts.ptws)]
|
ts.ptsPool = ts.ptsPool[:len(ts.ptws)]
|
||||||
for i, ptw := range ts.ptws {
|
for i, ptw := range ts.ptws {
|
||||||
ts.ptsPool[i].Init(ptw.pt, tsids, tr)
|
ts.ptsPool[i].Init(ptw.pt, tsids, tr, fetchData)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize the ptsHeap.
|
// Initialize the ptsHeap.
|
||||||
|
|
|
@ -251,7 +251,7 @@ func testTableSearchSerial(tb *table, tsids []TSID, tr TimeRange, rbsExpected []
|
||||||
|
|
||||||
bs := []Block{}
|
bs := []Block{}
|
||||||
var ts tableSearch
|
var ts tableSearch
|
||||||
ts.Init(tb, tsids, tr)
|
ts.Init(tb, tsids, tr, true)
|
||||||
for ts.NextBlock() {
|
for ts.NextBlock() {
|
||||||
var b Block
|
var b Block
|
||||||
b.CopyFrom(ts.Block)
|
b.CopyFrom(ts.Block)
|
||||||
|
@ -276,7 +276,7 @@ func testTableSearchSerial(tb *table, tsids []TSID, tr TimeRange, rbsExpected []
|
||||||
}
|
}
|
||||||
|
|
||||||
// verify that empty tsids returns empty result
|
// verify that empty tsids returns empty result
|
||||||
ts.Init(tb, []TSID{}, tr)
|
ts.Init(tb, []TSID{}, tr, true)
|
||||||
if ts.NextBlock() {
|
if ts.NextBlock() {
|
||||||
return fmt.Errorf("unexpected block got for an empty tsids list: %+v", ts.Block)
|
return fmt.Errorf("unexpected block got for an empty tsids list: %+v", ts.Block)
|
||||||
}
|
}
|
||||||
|
@ -284,5 +284,15 @@ func testTableSearchSerial(tb *table, tsids []TSID, tr TimeRange, rbsExpected []
|
||||||
return fmt.Errorf("unexpected error on empty tsids list: %s", err)
|
return fmt.Errorf("unexpected error on empty tsids list: %s", err)
|
||||||
}
|
}
|
||||||
ts.MustClose()
|
ts.MustClose()
|
||||||
|
|
||||||
|
ts.Init(tb, []TSID{}, tr, false)
|
||||||
|
if ts.NextBlock() {
|
||||||
|
return fmt.Errorf("unexpected block got for an empty tsids list with fetchData=false: %+v", ts.Block)
|
||||||
|
}
|
||||||
|
if err := ts.Error(); err != nil {
|
||||||
|
return fmt.Errorf("unexpected error on empty tsids list with fetchData=false: %s", err)
|
||||||
|
}
|
||||||
|
ts.MustClose()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,11 @@ func BenchmarkTableSearch(b *testing.B) {
|
||||||
b.Run(fmt.Sprintf("tsidsCount_%d", tsidsCount), func(b *testing.B) {
|
b.Run(fmt.Sprintf("tsidsCount_%d", tsidsCount), func(b *testing.B) {
|
||||||
for _, tsidsSearch := range []int{1, 1e1, 1e2, 1e3, 1e4} {
|
for _, tsidsSearch := range []int{1, 1e1, 1e2, 1e3, 1e4} {
|
||||||
b.Run(fmt.Sprintf("tsidsSearch_%d", tsidsSearch), func(b *testing.B) {
|
b.Run(fmt.Sprintf("tsidsSearch_%d", tsidsSearch), func(b *testing.B) {
|
||||||
benchmarkTableSearch(b, rowsCount, tsidsCount, tsidsSearch)
|
for _, fetchData := range []bool{true, false} {
|
||||||
|
b.Run(fmt.Sprintf("fetchData_%v", fetchData), func(b *testing.B) {
|
||||||
|
benchmarkTableSearch(b, rowsCount, tsidsCount, tsidsSearch, fetchData)
|
||||||
|
})
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -103,9 +107,9 @@ func createBenchTable(b *testing.B, path string, startTimestamp int64, rowsPerIn
|
||||||
tb.MustClose()
|
tb.MustClose()
|
||||||
}
|
}
|
||||||
|
|
||||||
func benchmarkTableSearch(b *testing.B, rowsCount, tsidsCount, tsidsSearch int) {
|
func benchmarkTableSearch(b *testing.B, rowsCount, tsidsCount, tsidsSearch int, fetchData bool) {
|
||||||
startTimestamp := timestampFromTime(time.Now()) - 365*24*3600*1000
|
startTimestamp := timestampFromTime(time.Now()) - 365*24*3600*1000
|
||||||
rowsPerInsert := int(maxRawRowsPerPartition)
|
rowsPerInsert := getMaxRawRowsPerPartition()
|
||||||
|
|
||||||
tb := openBenchTable(b, startTimestamp, rowsPerInsert, rowsCount, tsidsCount)
|
tb := openBenchTable(b, startTimestamp, rowsPerInsert, rowsCount, tsidsCount)
|
||||||
tr := TimeRange{
|
tr := TimeRange{
|
||||||
|
@ -127,7 +131,7 @@ func benchmarkTableSearch(b *testing.B, rowsCount, tsidsCount, tsidsSearch int)
|
||||||
for i := range tsids {
|
for i := range tsids {
|
||||||
tsids[i].MetricID = 1 + uint64(i)
|
tsids[i].MetricID = 1 + uint64(i)
|
||||||
}
|
}
|
||||||
ts.Init(tb, tsids, tr)
|
ts.Init(tb, tsids, tr, fetchData)
|
||||||
for ts.NextBlock() {
|
for ts.NextBlock() {
|
||||||
}
|
}
|
||||||
ts.MustClose()
|
ts.MustClose()
|
||||||
|
|
Loading…
Reference in a new issue