mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
all: readability improvements for query traces
- show dates in human-readable format, e.g. 2022-05-07, instead of a numeric value - limit the maximum length of queries and filters shown in trace messages
This commit is contained in:
parent
ffd88d0b76
commit
4fb0f15322
8 changed files with 66 additions and 34 deletions
|
@ -218,7 +218,8 @@ func getTimestamps(start, end, step int64) []int64 {
|
|||
|
||||
func evalExpr(qt *querytracer.Tracer, ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
|
||||
if qt.Enabled() {
|
||||
query := e.AppendString(nil)
|
||||
query := string(e.AppendString(nil))
|
||||
query = bytesutil.LimitStringLen(query, 300)
|
||||
mayCache := ec.mayCache()
|
||||
qt = qt.NewChild("eval: query=%s, timeRange=%s, step=%d, mayCache=%v", query, ec.timeRangeString(), ec.Step, mayCache)
|
||||
}
|
||||
|
|
|
@ -155,7 +155,8 @@ func ResetRollupResultCache() {
|
|||
|
||||
func (rrc *rollupResultCache) Get(qt *querytracer.Tracer, ec *EvalConfig, expr metricsql.Expr, window int64) (tss []*timeseries, newStart int64) {
|
||||
if qt.Enabled() {
|
||||
query := expr.AppendString(nil)
|
||||
query := string(expr.AppendString(nil))
|
||||
query = bytesutil.LimitStringLen(query, 300)
|
||||
qt = qt.NewChild("rollup cache get: query=%s, timeRange=%s, step=%d, window=%d", query, ec.timeRangeString(), ec.Step, window)
|
||||
defer qt.Done()
|
||||
}
|
||||
|
@ -255,7 +256,8 @@ var resultBufPool bytesutil.ByteBufferPool
|
|||
|
||||
func (rrc *rollupResultCache) Put(qt *querytracer.Tracer, ec *EvalConfig, expr metricsql.Expr, window int64, tss []*timeseries) {
|
||||
if qt.Enabled() {
|
||||
query := expr.AppendString(nil)
|
||||
query := string(expr.AppendString(nil))
|
||||
query = bytesutil.LimitStringLen(query, 300)
|
||||
qt = qt.NewChild("rollup cache put: query=%s, timeRange=%s, step=%d, window=%d, series=%d", query, ec.timeRangeString(), ec.Step, window, len(tss))
|
||||
defer qt.Done()
|
||||
}
|
||||
|
|
|
@ -79,3 +79,14 @@ func ToUnsafeBytes(s string) (b []byte) {
|
|||
slh.Cap = sh.Len
|
||||
return b
|
||||
}
|
||||
|
||||
// LimitStringLen limits the length of s to maxLen.
|
||||
//
|
||||
// If len(s) > maxLen, then the function concatenates s prefix with s suffix.
|
||||
func LimitStringLen(s string, maxLen int) string {
|
||||
if maxLen <= 4 || len(s) <= maxLen {
|
||||
return s
|
||||
}
|
||||
n := maxLen/2 - 1
|
||||
return s[:n] + ".." + s[len(s)-n:]
|
||||
}
|
||||
|
|
|
@ -422,7 +422,7 @@ func (is *indexSearch) maybeCreateIndexes(tsid *TSID, metricNameRaw []byte, date
|
|||
return false, fmt.Errorf("cannot create global indexes: %w", err)
|
||||
}
|
||||
if err := is.createPerDayIndexes(date, tsid.MetricID, mn); err != nil {
|
||||
return false, fmt.Errorf("cannot create per-day indexes for date=%d: %w", date, err)
|
||||
return false, fmt.Errorf("cannot create per-day indexes for date=%s: %w", dateToString(date), err)
|
||||
}
|
||||
PutMetricName(mn)
|
||||
atomic.AddUint64(&is.db.timeseriesRepopulated, 1)
|
||||
|
@ -615,7 +615,7 @@ func (is *indexSearch) createTSIDByName(dst *TSID, metricName, metricNameRaw []b
|
|||
return fmt.Errorf("cannot create global indexes: %w", err)
|
||||
}
|
||||
if err := is.createPerDayIndexes(date, dst.MetricID, mn); err != nil {
|
||||
return fmt.Errorf("cannot create per-day indexes for date=%d: %w", date, err)
|
||||
return fmt.Errorf("cannot create per-day indexes for date=%s: %w", dateToString(date), err)
|
||||
}
|
||||
|
||||
// There is no need in invalidating tag cache, since it is invalidated
|
||||
|
@ -800,7 +800,7 @@ func (is *indexSearch) searchLabelNamesWithFiltersOnTimeRange(qt *querytracer.Tr
|
|||
qt = qt.NewChild("parallel search for label names: filters=%s, timeRange=%s", tfss, &tr)
|
||||
for date := minDate; date <= maxDate; date++ {
|
||||
wg.Add(1)
|
||||
qtChild := qt.NewChild("search for label names: filters=%s, date=%d", tfss, date)
|
||||
qtChild := qt.NewChild("search for label names: filters=%s, date=%s", tfss, dateToString(date))
|
||||
go func(date uint64) {
|
||||
defer func() {
|
||||
qtChild.Done()
|
||||
|
@ -956,7 +956,7 @@ func (is *indexSearch) searchLabelValuesWithFiltersOnTimeRange(qt *querytracer.T
|
|||
qt = qt.NewChild("parallel search for label values: labelName=%q, filters=%s, timeRange=%s", labelName, tfss, &tr)
|
||||
for date := minDate; date <= maxDate; date++ {
|
||||
wg.Add(1)
|
||||
qtChild := qt.NewChild("search for label names: filters=%s, date=%d", tfss, date)
|
||||
qtChild := qt.NewChild("search for label names: filters=%s, date=%s", tfss, dateToString(date))
|
||||
go func(date uint64) {
|
||||
defer func() {
|
||||
qtChild.Done()
|
||||
|
@ -2486,7 +2486,7 @@ func (is *indexSearch) tryUpdatingMetricIDsForDateRange(qt *querytracer.Tracer,
|
|||
var errGlobal error
|
||||
var mu sync.Mutex // protects metricIDs + errGlobal vars from concurrent access below
|
||||
for minDate <= maxDate {
|
||||
qtChild := qt.NewChild("parallel thread for date=%d", minDate)
|
||||
qtChild := qt.NewChild("parallel thread for date=%s", dateToString(minDate))
|
||||
wg.Add(1)
|
||||
go func(date uint64) {
|
||||
defer func() {
|
||||
|
@ -2522,8 +2522,10 @@ func (is *indexSearch) tryUpdatingMetricIDsForDateRange(qt *querytracer.Tracer,
|
|||
}
|
||||
|
||||
func (is *indexSearch) getMetricIDsForDateAndFilters(qt *querytracer.Tracer, date uint64, tfs *TagFilters, maxMetrics int) (*uint64set.Set, error) {
|
||||
qt = qt.NewChild("search for metric ids on a particular day: filters=%s, date=%d, maxMetrics=%d", tfs, date, maxMetrics)
|
||||
if qt.Enabled() {
|
||||
qt = qt.NewChild("search for metric ids on a particular day: filters=%s, date=%s, maxMetrics=%d", tfs, dateToString(date), maxMetrics)
|
||||
defer qt.Done()
|
||||
}
|
||||
// Sort tfs by loopsCount needed for performing each filter.
|
||||
// This stats is usually collected from the previous queries.
|
||||
// This way we limit the amount of work below by applying fast filters at first.
|
||||
|
@ -2869,18 +2871,20 @@ func (is *indexSearch) hasDateMetricID(date, metricID uint64, accountID, project
|
|||
if err == io.EOF {
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("error when searching for (date=%d, metricID=%d) entry: %w", date, metricID, err)
|
||||
return false, fmt.Errorf("error when searching for (date=%s, metricID=%d) entry: %w", dateToString(date), metricID, err)
|
||||
}
|
||||
if string(ts.Item) != string(kb.B) {
|
||||
return false, fmt.Errorf("unexpected entry for (date=%d, metricID=%d); got %q; want %q", date, metricID, ts.Item, kb.B)
|
||||
return false, fmt.Errorf("unexpected entry for (date=%s, metricID=%d); got %q; want %q", dateToString(date), metricID, ts.Item, kb.B)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (is *indexSearch) getMetricIDsForDateTagFilter(qt *querytracer.Tracer, tf *tagFilter, date uint64, commonPrefix []byte,
|
||||
maxMetrics int, maxLoopsCount int64) (*uint64set.Set, int64, error) {
|
||||
qt = qt.NewChild("get metric ids for filter and date: filter={%s}, date=%d, maxMetrics=%d, maxLoopsCount=%d", tf, date, maxMetrics, maxLoopsCount)
|
||||
if qt.Enabled() {
|
||||
qt = qt.NewChild("get metric ids for filter and date: filter={%s}, date=%s, maxMetrics=%d, maxLoopsCount=%d", tf, dateToString(date), maxMetrics, maxLoopsCount)
|
||||
defer qt.Done()
|
||||
}
|
||||
if !bytes.HasPrefix(tf.prefix, commonPrefix) {
|
||||
logger.Panicf("BUG: unexpected tf.prefix %q; must start with commonPrefix %q", tf.prefix, commonPrefix)
|
||||
}
|
||||
|
|
|
@ -295,10 +295,11 @@ type TagFilter struct {
|
|||
// String returns string representation of tf.
|
||||
func (tf *TagFilter) String() string {
|
||||
op := tf.getOp()
|
||||
value := bytesutil.LimitStringLen(string(tf.Value), 60)
|
||||
if len(tf.Key) == 0 {
|
||||
return fmt.Sprintf("__name__%s%q", op, tf.Value)
|
||||
return fmt.Sprintf("__name__%s%q", op, value)
|
||||
}
|
||||
return fmt.Sprintf("%s%s%q", tf.Key, op, tf.Value)
|
||||
return fmt.Sprintf("%s%s%q", tf.Key, op, value)
|
||||
}
|
||||
|
||||
func (tf *TagFilter) getOp() string {
|
||||
|
|
|
@ -2235,8 +2235,8 @@ func (s *Storage) updatePerDateData(rows []rawRow, mrs []*MetricRow) error {
|
|||
ok, err := is.hasDateMetricID(date, metricID, dmid.accountID, dmid.projectID)
|
||||
if err != nil {
|
||||
if firstError == nil {
|
||||
firstError = fmt.Errorf("error when locating (date=%d, metricID=%d, accountID=%d, projectID=%d) in database: %w",
|
||||
date, metricID, dmid.accountID, dmid.projectID, err)
|
||||
firstError = fmt.Errorf("error when locating (date=%s, metricID=%d, accountID=%d, projectID=%d) in database: %w",
|
||||
dateToString(date), metricID, dmid.accountID, dmid.projectID, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
@ -2253,7 +2253,7 @@ func (s *Storage) updatePerDateData(rows []rawRow, mrs []*MetricRow) error {
|
|||
mn.sortTags()
|
||||
if err := is.createPerDayIndexes(date, metricID, mn); err != nil {
|
||||
if firstError == nil {
|
||||
firstError = fmt.Errorf("error when storing per-date inverted index for (date=%d, metricID=%d): %w", date, metricID, err)
|
||||
firstError = fmt.Errorf("error when storing per-date inverted index for (date=%s, metricID=%d): %w", dateToString(date), metricID, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -299,30 +299,35 @@ func (tf *tagFilter) Less(other *tagFilter) bool {
|
|||
|
||||
// String returns human-readable tf value.
|
||||
func (tf *tagFilter) String() string {
|
||||
op := "="
|
||||
if tf.isNegative {
|
||||
op = "!="
|
||||
if tf.isRegexp {
|
||||
op = "!~"
|
||||
}
|
||||
} else if tf.isRegexp {
|
||||
op = "=~"
|
||||
}
|
||||
op := tf.getOp()
|
||||
value := bytesutil.LimitStringLen(string(tf.value), 60)
|
||||
if bytes.Equal(tf.key, graphiteReverseTagKey) {
|
||||
return fmt.Sprintf("__graphite_reverse__%s%q", op, tf.value)
|
||||
return fmt.Sprintf("__graphite_reverse__%s%q", op, value)
|
||||
}
|
||||
if tf.isComposite() {
|
||||
metricName, key, err := unmarshalCompositeTagKey(tf.key)
|
||||
if err != nil {
|
||||
logger.Panicf("BUG: cannot unmarshal composite tag key: %s", err)
|
||||
}
|
||||
return fmt.Sprintf("composite(%s,%s)%s%q", metricName, key, op, tf.value)
|
||||
return fmt.Sprintf("composite(%s,%s)%s%q", metricName, key, op, value)
|
||||
}
|
||||
key := tf.key
|
||||
if len(key) == 0 {
|
||||
key = []byte("__name__")
|
||||
if len(tf.key) == 0 {
|
||||
return fmt.Sprintf("__name__%s%q", op, value)
|
||||
}
|
||||
return fmt.Sprintf("%s%s%q", key, op, tf.value)
|
||||
return fmt.Sprintf("%s%s%q", tf.key, op, value)
|
||||
}
|
||||
|
||||
func (tf *tagFilter) getOp() string {
|
||||
if tf.isNegative {
|
||||
if tf.isRegexp {
|
||||
return "!~"
|
||||
}
|
||||
return "!="
|
||||
}
|
||||
if tf.isRegexp {
|
||||
return "=~"
|
||||
}
|
||||
return "="
|
||||
}
|
||||
|
||||
func (tf *tagFilter) Marshal(dst []byte, accountID, projectID uint32) []byte {
|
||||
|
|
|
@ -5,6 +5,14 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
func dateToString(date uint64) string {
|
||||
if date == 0 {
|
||||
return "1970-01-01"
|
||||
}
|
||||
t := time.Unix(int64(date*24*3600), 0).UTC()
|
||||
return t.Format("2006-01-02")
|
||||
}
|
||||
|
||||
// timestampToTime returns time representation of the given timestamp.
|
||||
//
|
||||
// The returned time is in UTC timezone.
|
||||
|
|
Loading…
Reference in a new issue