From d6415b2572d03f85ac5eed9cc3417d0a2900d3fe Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Wed, 10 Jul 2024 00:14:15 +0200 Subject: [PATCH] all: consistently use 'any' instead of 'interface{}' 'any' type is supported starting from Go1.18. Let's consistently use it instead of 'interface{}' type across the code base, since `any` is easier to read than 'interface{}'. --- app/vmagent/remotewrite/relabel.go | 2 +- app/vmagent/remotewrite/remotewrite.go | 2 +- app/vmalert/config/config.go | 12 ++--- app/vmalert/config/log/logger.go | 8 +-- app/vmalert/config/types.go | 6 +-- app/vmalert/datasource/vm_prom_api.go | 4 +- app/vmalert/notifier/config.go | 4 +- app/vmalert/rule/alerting.go | 2 +- app/vmalert/rule/group.go | 2 +- app/vmalert/templates/template.go | 18 +++---- app/vmalert/templates/template_test.go | 6 +-- app/vmalert/web_test.go | 4 +- app/vmalert/web_types.go | 2 +- app/vmauth/auth_config.go | 22 ++++---- app/vmctl/influx/parser.go | 6 +-- app/vmctl/influx/parser_test.go | 2 +- app/vmctl/opentsdb/opentsdb.go | 7 --- app/vmctl/vm/timeseries.go | 2 +- app/vmctl/vm_native.go | 4 +- app/vmselect/graphite/aggr.go | 2 +- app/vmselect/graphite/functions_api.go | 4 +- app/vmselect/graphite/transform.go | 8 +-- app/vmselect/netstorage/netstorage.go | 54 +++++++++---------- app/vmselect/prometheus/prometheus.go | 4 +- app/vmselect/promql/eval.go | 4 +- app/vmselect/promql/rollup.go | 54 +++++++++---------- app/vmselect/promql/rollup_test.go | 52 +++++++++--------- lib/blockcache/blockcache.go | 14 ++--- lib/blockcache/blockcache_test.go | 4 +- lib/bytesutil/fast_string_matcher.go | 2 +- lib/bytesutil/fast_string_transformer.go | 2 +- lib/httpserver/httpserver.go | 4 +- lib/logger/logger.go | 22 ++++---- lib/logger/logger_test.go | 6 +-- lib/logger/throttler.go | 4 +- lib/logstorage/block_stream_merger.go | 4 +- lib/lrucache/lrucache.go | 4 +- lib/mergeset/merge.go | 6 +-- lib/mergeset/table_search.go | 4 +- lib/promauth/config.go | 4 +- lib/promrelabel/config.go | 10 ++-- lib/promrelabel/graphite.go | 2 +- lib/promrelabel/if_expression.go | 16 +++--- lib/promscrape/config.go | 2 +- lib/promscrape/discovery/azure/api.go | 2 +- lib/promscrape/discovery/consul/api.go | 2 +- lib/promscrape/discovery/consulagent/api.go | 2 +- lib/promscrape/discovery/digitalocean/api.go | 2 +- lib/promscrape/discovery/docker/api.go | 2 +- lib/promscrape/discovery/dockerswarm/api.go | 2 +- .../discovery/dockerswarm/services.go | 4 +- .../discovery/dockerswarm/services_test.go | 4 +- .../discovery/dockerswarm/tasks_test.go | 2 +- lib/promscrape/discovery/ec2/api.go | 2 +- lib/promscrape/discovery/eureka/api.go | 2 +- lib/promscrape/discovery/gce/api.go | 2 +- lib/promscrape/discovery/gce/gce.go | 8 +-- lib/promscrape/discovery/hetzner/api.go | 2 +- lib/promscrape/discovery/http/api.go | 2 +- .../discovery/kubernetes/api_watcher.go | 28 +++++----- .../discovery/kubernetes/api_watcher_test.go | 4 +- .../discovery/kubernetes/kubernetes.go | 4 +- lib/promscrape/discovery/kuma/api.go | 2 +- lib/promscrape/discovery/nomad/api.go | 2 +- lib/promscrape/discovery/openstack/api.go | 2 +- lib/promscrape/discovery/openstack/auth.go | 30 +++++------ lib/promscrape/discovery/vultr/api.go | 2 +- lib/promscrape/discovery/yandexcloud/api.go | 2 +- lib/promscrape/discoveryutils/config_map.go | 8 +-- lib/promscrape/targetstatus.go | 2 +- lib/promutils/duration.go | 4 +- lib/promutils/duration_test.go | 2 +- lib/promutils/labels.go | 4 +- lib/protoparser/native/stream/streamparser.go | 2 +- lib/proxy/proxy.go | 4 +- lib/querytracer/tracer.go | 8 +-- lib/storage/block_stream_merger.go | 4 +- lib/storage/index_db.go | 4 +- lib/storage/merge.go | 2 +- lib/storage/partition_search.go | 4 +- lib/storage/table_search.go | 4 +- lib/streamaggr/avg.go | 2 +- lib/streamaggr/count_samples.go | 2 +- lib/streamaggr/count_series.go | 2 +- lib/streamaggr/histogram_bucket.go | 4 +- lib/streamaggr/last.go | 2 +- lib/streamaggr/max.go | 2 +- lib/streamaggr/min.go | 2 +- lib/streamaggr/quantiles.go | 2 +- lib/streamaggr/rate.go | 2 +- lib/streamaggr/stddev.go | 2 +- lib/streamaggr/stdvar.go | 2 +- lib/streamaggr/sum_samples.go | 2 +- lib/streamaggr/total.go | 4 +- lib/streamaggr/unique_samples.go | 2 +- lib/uint64set/uint64set.go | 4 +- 96 files changed, 302 insertions(+), 309 deletions(-) diff --git a/app/vmagent/remotewrite/relabel.go b/app/vmagent/remotewrite/relabel.go index 547f0982a..e909a6fb5 100644 --- a/app/vmagent/remotewrite/relabel.go +++ b/app/vmagent/remotewrite/relabel.go @@ -181,7 +181,7 @@ func (rctx *relabelCtx) reset() { } var relabelCtxPool = &sync.Pool{ - New: func() interface{} { + New: func() any { return &relabelCtx{} }, } diff --git a/app/vmagent/remotewrite/remotewrite.go b/app/vmagent/remotewrite/remotewrite.go index 1d7bacc50..a7cdcf32b 100644 --- a/app/vmagent/remotewrite/remotewrite.go +++ b/app/vmagent/remotewrite/remotewrite.go @@ -1000,7 +1000,7 @@ func (rwctx *remoteWriteCtx) tryPushInternal(tss []prompbmarshal.TimeSeries) boo } var tssPool = &sync.Pool{ - New: func() interface{} { + New: func() any { a := []prompbmarshal.TimeSeries{} return &a }, diff --git a/app/vmalert/config/config.go b/app/vmalert/config/config.go index d2c07469c..0e7ba7d6a 100644 --- a/app/vmalert/config/config.go +++ b/app/vmalert/config/config.go @@ -45,11 +45,11 @@ type Group struct { // EvalAlignment will make the timestamp of group query requests be aligned with interval EvalAlignment *bool `yaml:"eval_alignment,omitempty"` // Catches all undefined fields and must be empty after parsing. - XXX map[string]interface{} `yaml:",inline"` + XXX map[string]any `yaml:",inline"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (g *Group) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (g *Group) UnmarshalYAML(unmarshal func(any) error) error { type group Group if err := unmarshal((*group)(g)); err != nil { return err @@ -142,11 +142,11 @@ type Rule struct { UpdateEntriesLimit *int `yaml:"update_entries_limit,omitempty"` // Catches all undefined fields and must be empty after parsing. - XXX map[string]interface{} `yaml:",inline"` + XXX map[string]any `yaml:",inline"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (r *Rule) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (r *Rule) UnmarshalYAML(unmarshal func(any) error) error { type rule Rule if err := unmarshal((*rule)(r)); err != nil { return err @@ -301,7 +301,7 @@ func parseConfig(data []byte) ([]Group, error) { g := struct { Groups []Group `yaml:"groups"` // Catches all undefined fields and must be empty after parsing. - XXX map[string]interface{} `yaml:",inline"` + XXX map[string]any `yaml:",inline"` }{} err = yaml.Unmarshal(data, &g) if err != nil { @@ -310,7 +310,7 @@ func parseConfig(data []byte) ([]Group, error) { return g.Groups, checkOverflow(g.XXX, "config") } -func checkOverflow(m map[string]interface{}, ctx string) error { +func checkOverflow(m map[string]any, ctx string) error { if len(m) > 0 { var keys []string for k := range m { diff --git a/app/vmalert/config/log/logger.go b/app/vmalert/config/log/logger.go index 8b79e55ac..db5279c27 100644 --- a/app/vmalert/config/log/logger.go +++ b/app/vmalert/config/log/logger.go @@ -29,7 +29,7 @@ func (l *Logger) isDisabled() bool { } // Errorf logs error message. -func (l *Logger) Errorf(format string, args ...interface{}) { +func (l *Logger) Errorf(format string, args ...any) { if l.isDisabled() { return } @@ -37,7 +37,7 @@ func (l *Logger) Errorf(format string, args ...interface{}) { } // Warnf logs warning message. -func (l *Logger) Warnf(format string, args ...interface{}) { +func (l *Logger) Warnf(format string, args ...any) { if l.isDisabled() { return } @@ -45,7 +45,7 @@ func (l *Logger) Warnf(format string, args ...interface{}) { } // Infof logs info message. -func (l *Logger) Infof(format string, args ...interface{}) { +func (l *Logger) Infof(format string, args ...any) { if l.isDisabled() { return } @@ -54,6 +54,6 @@ func (l *Logger) Infof(format string, args ...interface{}) { // Panicf logs panic message and panics. // Panicf can't be suppressed -func (l *Logger) Panicf(format string, args ...interface{}) { +func (l *Logger) Panicf(format string, args ...any) { logger.Panicf(format, args...) } diff --git a/app/vmalert/config/types.go b/app/vmalert/config/types.go index d1c0f80e0..cf16d0d8b 100644 --- a/app/vmalert/config/types.go +++ b/app/vmalert/config/types.go @@ -69,7 +69,7 @@ func (t *Type) ValidateExpr(expr string) error { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (t *Type) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (t *Type) UnmarshalYAML(unmarshal func(any) error) error { var s string if err := unmarshal(&s); err != nil { return err @@ -87,7 +87,7 @@ func (t *Type) UnmarshalYAML(unmarshal func(interface{}) error) error { } // MarshalYAML implements the yaml.Unmarshaler interface. -func (t Type) MarshalYAML() (interface{}, error) { +func (t Type) MarshalYAML() (any, error) { return t.Name, nil } @@ -98,7 +98,7 @@ type Header struct { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (h *Header) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (h *Header) UnmarshalYAML(unmarshal func(any) error) error { var s string if err := unmarshal(&s); err != nil { return err diff --git a/app/vmalert/datasource/vm_prom_api.go b/app/vmalert/datasource/vm_prom_api.go index 0e61c0362..96eedb024 100644 --- a/app/vmalert/datasource/vm_prom_api.go +++ b/app/vmalert/datasource/vm_prom_api.go @@ -119,7 +119,7 @@ func (pi *promInstant) Unmarshal(b []byte) error { type promRange struct { Result []struct { Labels map[string]string `json:"metric"` - TVs [][2]interface{} `json:"values"` + TVs [][2]any `json:"values"` } `json:"result"` } @@ -147,7 +147,7 @@ func (r promRange) metrics() ([]Metric, error) { return result, nil } -type promScalar [2]interface{} +type promScalar [2]any func (r promScalar) metrics() ([]Metric, error) { var m Metric diff --git a/app/vmalert/notifier/config.go b/app/vmalert/notifier/config.go index 44b22fb79..3826c4df9 100644 --- a/app/vmalert/notifier/config.go +++ b/app/vmalert/notifier/config.go @@ -51,7 +51,7 @@ type Config struct { Checksum string // Catches all undefined fields and must be empty after parsing. - XXX map[string]interface{} `yaml:",inline"` + XXX map[string]any `yaml:",inline"` // This is set to the directory from where the config has been loaded. baseDir string @@ -73,7 +73,7 @@ type StaticConfig struct { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (cfg *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (cfg *Config) UnmarshalYAML(unmarshal func(any) error) error { type config Config if err := unmarshal((*config)(cfg)); err != nil { return err diff --git a/app/vmalert/rule/alerting.go b/app/vmalert/rule/alerting.go index 81fbd2cf2..632ac896e 100644 --- a/app/vmalert/rule/alerting.go +++ b/app/vmalert/rule/alerting.go @@ -183,7 +183,7 @@ func (ar *AlertingRule) GetAlert(id uint64) *notifier.Alert { return ar.alerts[id] } -func (ar *AlertingRule) logDebugf(at time.Time, a *notifier.Alert, format string, args ...interface{}) { +func (ar *AlertingRule) logDebugf(at time.Time, a *notifier.Alert, format string, args ...any) { if !ar.Debug { return } diff --git a/app/vmalert/rule/group.go b/app/vmalert/rule/group.go index 520039d71..e77b9b5f5 100644 --- a/app/vmalert/rule/group.go +++ b/app/vmalert/rule/group.go @@ -475,7 +475,7 @@ func delayBeforeStart(ts time.Time, key uint64, interval time.Duration, offset * return randSleep } -func (g *Group) infof(format string, args ...interface{}) { +func (g *Group) infof(format string, args ...any) { msg := fmt.Sprintf(format, args...) logger.Infof("group %q %s; interval=%v; eval_offset=%v; concurrency=%d", g.Name, msg, g.Interval, g.EvalOffset, g.Concurrency) diff --git a/app/vmalert/templates/template.go b/app/vmalert/templates/template.go index 8bbdd4c60..5a6b427bb 100644 --- a/app/vmalert/templates/template.go +++ b/app/vmalert/templates/template.go @@ -316,7 +316,7 @@ func templateFuncs() textTpl.FuncMap { // humanize converts given number to a human readable format // by adding metric prefixes https://en.wikipedia.org/wiki/Metric_prefix - "humanize": func(i interface{}) (string, error) { + "humanize": func(i any) (string, error) { v, err := toFloat64(i) if err != nil { return "", err @@ -347,7 +347,7 @@ func templateFuncs() textTpl.FuncMap { }, // humanize1024 converts given number to a human readable format with 1024 as base - "humanize1024": func(i interface{}) (string, error) { + "humanize1024": func(i any) (string, error) { v, err := toFloat64(i) if err != nil { return "", err @@ -359,7 +359,7 @@ func templateFuncs() textTpl.FuncMap { }, // humanizeDuration converts given seconds to a human-readable duration - "humanizeDuration": func(i interface{}) (string, error) { + "humanizeDuration": func(i any) (string, error) { v, err := toFloat64(i) if err != nil { return "", err @@ -405,7 +405,7 @@ func templateFuncs() textTpl.FuncMap { }, // humanizePercentage converts given ratio value to a fraction of 100 - "humanizePercentage": func(i interface{}) (string, error) { + "humanizePercentage": func(i any) (string, error) { v, err := toFloat64(i) if err != nil { return "", err @@ -414,7 +414,7 @@ func templateFuncs() textTpl.FuncMap { }, // humanizeTimestamp converts given timestamp to a human readable time equivalent - "humanizeTimestamp": func(i interface{}) (string, error) { + "humanizeTimestamp": func(i any) (string, error) { v, err := toFloat64(i) if err != nil { return "", err @@ -427,7 +427,7 @@ func templateFuncs() textTpl.FuncMap { }, // toTime converts given timestamp to a time.Time. - "toTime": func(i interface{}) (time.Time, error) { + "toTime": func(i any) (time.Time, error) { v, err := toFloat64(i) if err != nil { return time.Time{}, err @@ -524,8 +524,8 @@ func templateFuncs() textTpl.FuncMap { // Converts a list of objects to a map with keys arg0, arg1 etc. // This is intended to allow multiple arguments to be passed to templates. - "args": func(args ...interface{}) map[string]interface{} { - result := make(map[string]interface{}) + "args": func(args ...any) map[string]any { + result := make(map[string]any) for i, a := range args { result[fmt.Sprintf("arg%d", i)] = a } @@ -565,7 +565,7 @@ func (t Time) Time() time.Time { return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) } -func toFloat64(v interface{}) (float64, error) { +func toFloat64(v any) (float64, error) { switch i := v.(type) { case float64: return i, nil diff --git a/app/vmalert/templates/template_test.go b/app/vmalert/templates/template_test.go index 30bf1e975..56f7719e6 100644 --- a/app/vmalert/templates/template_test.go +++ b/app/vmalert/templates/template_test.go @@ -52,10 +52,10 @@ func TestTemplateFuncs(t *testing.T) { t.Fatalf("unexpected mismatch") } - formatting := func(funcName string, p interface{}, resultExpected string) { + formatting := func(funcName string, p any, resultExpected string) { t.Helper() v := funcs[funcName] - fLocal := v.(func(s interface{}) (string, error)) + fLocal := v.(func(s any) (string, error)) result, err := fLocal(p) if err != nil { t.Fatalf("unexpected error for %s(%f): %s", funcName, p, err) @@ -92,7 +92,7 @@ func TestTemplateFuncs(t *testing.T) { formatting("humanizeTimestamp", 1679055557, "2023-03-17 12:19:17 +0000 UTC") } -func mkTemplate(current, replacement interface{}) textTemplate { +func mkTemplate(current, replacement any) textTemplate { tmpl := textTemplate{} if current != nil { switch val := current.(type) { diff --git a/app/vmalert/web_test.go b/app/vmalert/web_test.go index 30796e3ad..4d78c8039 100644 --- a/app/vmalert/web_test.go +++ b/app/vmalert/web_test.go @@ -36,7 +36,7 @@ func TestHandler(t *testing.T) { }} rh := &requestHandler{m: m} - getResp := func(t *testing.T, url string, to interface{}, code int) { + getResp := func(t *testing.T, url string, to any, code int) { t.Helper() resp, err := http.Get(url) if err != nil { @@ -241,7 +241,7 @@ func TestEmptyResponse(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { rhWithNoGroups.handler(w, r) })) defer ts.Close() - getResp := func(t *testing.T, url string, to interface{}, code int) { + getResp := func(t *testing.T, url string, to any, code int) { t.Helper() resp, err := http.Get(url) if err != nil { diff --git a/app/vmalert/web_types.go b/app/vmalert/web_types.go index 1da1d941d..639236591 100644 --- a/app/vmalert/web_types.go +++ b/app/vmalert/web_types.go @@ -183,7 +183,7 @@ func (ar apiRule) WebLink() string { paramGroupID, ar.GroupID, paramRuleID, ar.ID) } -func ruleToAPI(r interface{}) apiRule { +func ruleToAPI(r any) apiRule { if ar, ok := r.(*rule.AlertingRule); ok { return alertingToAPI(ar) } diff --git a/app/vmauth/auth_config.go b/app/vmauth/auth_config.go index a5fea2785..18788c5c6 100644 --- a/app/vmauth/auth_config.go +++ b/app/vmauth/auth_config.go @@ -129,7 +129,7 @@ type Header struct { } // UnmarshalYAML unmarshals h from f. -func (h *Header) UnmarshalYAML(f func(interface{}) error) error { +func (h *Header) UnmarshalYAML(f func(any) error) error { var s string if err := f(&s); err != nil { return err @@ -146,7 +146,7 @@ func (h *Header) UnmarshalYAML(f func(interface{}) error) error { } // MarshalYAML marshals h to yaml. -func (h *Header) MarshalYAML() (interface{}, error) { +func (h *Header) MarshalYAML() (any, error) { return h.sOriginal, nil } @@ -201,7 +201,7 @@ type QueryArg struct { } // UnmarshalYAML unmarshals qa from yaml. -func (qa *QueryArg) UnmarshalYAML(f func(interface{}) error) error { +func (qa *QueryArg) UnmarshalYAML(f func(any) error) error { var s string if err := f(&s); err != nil { return err @@ -230,7 +230,7 @@ func (qa *QueryArg) UnmarshalYAML(f func(interface{}) error) error { } // MarshalYAML marshals qa to yaml. -func (qa *QueryArg) MarshalYAML() (interface{}, error) { +func (qa *QueryArg) MarshalYAML() (any, error) { return qa.sOriginal, nil } @@ -263,7 +263,7 @@ type URLPrefix struct { nextDiscoveryDeadline atomic.Uint64 // vOriginal contains the original yaml value for URLPrefix. - vOriginal interface{} + vOriginal any } func (up *URLPrefix) setLoadBalancingPolicy(loadBalancingPolicy string) error { @@ -497,8 +497,8 @@ func getLeastLoadedBackendURL(bus []*backendURL, atomicCounter *atomic.Uint32) * } // UnmarshalYAML unmarshals up from yaml. -func (up *URLPrefix) UnmarshalYAML(f func(interface{}) error) error { - var v interface{} +func (up *URLPrefix) UnmarshalYAML(f func(any) error) error { + var v any if err := f(&v); err != nil { return err } @@ -508,7 +508,7 @@ func (up *URLPrefix) UnmarshalYAML(f func(interface{}) error) error { switch x := v.(type) { case string: urls = []string{x} - case []interface{}: + case []any: if len(x) == 0 { return fmt.Errorf("`url_prefix` must contain at least a single url") } @@ -538,7 +538,7 @@ func (up *URLPrefix) UnmarshalYAML(f func(interface{}) error) error { } // MarshalYAML marshals up to yaml. -func (up *URLPrefix) MarshalYAML() (interface{}, error) { +func (up *URLPrefix) MarshalYAML() (any, error) { return up.vOriginal, nil } @@ -562,7 +562,7 @@ func (r *Regex) match(s string) bool { } // UnmarshalYAML implements yaml.Unmarshaler -func (r *Regex) UnmarshalYAML(f func(interface{}) error) error { +func (r *Regex) UnmarshalYAML(f func(any) error) error { var s string if err := f(&s); err != nil { return err @@ -579,7 +579,7 @@ func (r *Regex) UnmarshalYAML(f func(interface{}) error) error { } // MarshalYAML implements yaml.Marshaler. -func (r *Regex) MarshalYAML() (interface{}, error) { +func (r *Regex) MarshalYAML() (any, error) { return r.sOriginal, nil } diff --git a/app/vmctl/influx/parser.go b/app/vmctl/influx/parser.go index a4b7ab7d6..daec00ecf 100644 --- a/app/vmctl/influx/parser.go +++ b/app/vmctl/influx/parser.go @@ -12,7 +12,7 @@ import ( type queryValues struct { name string - values map[string][]interface{} + values map[string][]any } func parseResult(r influx.Result) ([]queryValues, error) { @@ -21,7 +21,7 @@ func parseResult(r influx.Result) ([]queryValues, error) { } qValues := make([]queryValues, len(r.Series)) for i, row := range r.Series { - values := make(map[string][]interface{}, len(row.Values)) + values := make(map[string][]any, len(row.Values)) for _, value := range row.Values { for idx, v := range value { key := row.Columns[idx] @@ -36,7 +36,7 @@ func parseResult(r influx.Result) ([]queryValues, error) { return qValues, nil } -func toFloat64(v interface{}) (float64, error) { +func toFloat64(v any) (float64, error) { switch i := v.(type) { case json.Number: return i.Float64() diff --git a/app/vmctl/influx/parser_test.go b/app/vmctl/influx/parser_test.go index 70ee424bb..861f379cf 100644 --- a/app/vmctl/influx/parser_test.go +++ b/app/vmctl/influx/parser_test.go @@ -61,7 +61,7 @@ func TestSeries_Unmarshal(t *testing.T) { } func TestToFloat64(t *testing.T) { - f := func(in interface{}, want float64) { + f := func(in any, want float64) { t.Helper() got, err := toFloat64(in) if err != nil { diff --git a/app/vmctl/opentsdb/opentsdb.go b/app/vmctl/opentsdb/opentsdb.go index b75e20b5a..11e5caeb5 100644 --- a/app/vmctl/opentsdb/opentsdb.go +++ b/app/vmctl/opentsdb/opentsdb.go @@ -75,12 +75,6 @@ type TimeRange struct { type MetaResults struct { Type string `json:"type"` Results []Meta `json:"results"` - //metric string - //tags interface{} - //limit int - //time int - //startIndex int - //totalResults int } // Meta A meta object about a metric @@ -88,7 +82,6 @@ type MetaResults struct { type Meta struct { Metric string `json:"metric"` Tags map[string]string `json:"tags"` - //tsuid string } // OtsdbMetric is a single series in OpenTSDB's returned format diff --git a/app/vmctl/vm/timeseries.go b/app/vmctl/vm/timeseries.go index 188d56033..31fc368be 100644 --- a/app/vmctl/vm/timeseries.go +++ b/app/vmctl/vm/timeseries.go @@ -45,7 +45,7 @@ type cWriter struct { err error } -func (cw *cWriter) printf(format string, args ...interface{}) { +func (cw *cWriter) printf(format string, args ...any) { if cw.err != nil { return } diff --git a/app/vmctl/vm_native.go b/app/vmctl/vm_native.go index dea0fdc75..618e1fdec 100644 --- a/app/vmctl/vm_native.go +++ b/app/vmctl/vm_native.go @@ -176,10 +176,10 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string, } initMessage := "Initing import process from %q to %q with filter %s" - initParams := []interface{}{srcURL, dstURL, p.filter.String()} + initParams := []any{srcURL, dstURL, p.filter.String()} if p.interCluster { initMessage = "Initing import process from %q to %q with filter %s for tenant %s" - initParams = []interface{}{srcURL, dstURL, p.filter.String(), tenantID} + initParams = []any{srcURL, dstURL, p.filter.String(), tenantID} } fmt.Println("") // extra line for better output formatting diff --git a/app/vmselect/graphite/aggr.go b/app/vmselect/graphite/aggr.go index 49c51efbf..d1fe5ad3d 100644 --- a/app/vmselect/graphite/aggr.go +++ b/app/vmselect/graphite/aggr.go @@ -253,7 +253,7 @@ func putHistogram(h *histogram.Fast) { } var histogramPool = &sync.Pool{ - New: func() interface{} { + New: func() any { return histogram.NewFast() }, } diff --git a/app/vmselect/graphite/functions_api.go b/app/vmselect/graphite/functions_api.go index c7bb7cccc..8c60583bc 100644 --- a/app/vmselect/graphite/functions_api.go +++ b/app/vmselect/graphite/functions_api.go @@ -16,7 +16,7 @@ import ( func FunctionsHandler(w http.ResponseWriter, r *http.Request) error { grouped := httputils.GetBool(r, "grouped") group := r.FormValue("group") - result := make(map[string]interface{}) + result := make(map[string]any) for funcName, fi := range funcs { if group != "" && fi.Group != group { continue @@ -47,7 +47,7 @@ func FunctionDetailsHandler(funcName string, w http.ResponseWriter, r *http.Requ return writeJSON(result, w, r) } -func writeJSON(result interface{}, w http.ResponseWriter, r *http.Request) error { +func writeJSON(result any, w http.ResponseWriter, r *http.Request) error { data, err := json.Marshal(result) if err != nil { return fmt.Errorf("cannot marshal response to JSON: %w", err) diff --git a/app/vmselect/graphite/transform.go b/app/vmselect/graphite/transform.go index 0b3b62658..66ed47d17 100644 --- a/app/vmselect/graphite/transform.go +++ b/app/vmselect/graphite/transform.go @@ -1968,10 +1968,10 @@ func (h *minSeriesHeap) Swap(i, j int) { a := *h a[i], a[j] = a[j], a[i] } -func (h *minSeriesHeap) Push(x interface{}) { +func (h *minSeriesHeap) Push(x any) { *h = append(*h, x.(*seriesWithWeight)) } -func (h *minSeriesHeap) Pop() interface{} { +func (h *minSeriesHeap) Pop() any { a := *h x := a[len(a)-1] *h = a[:len(a)-1] @@ -2499,10 +2499,10 @@ func (h *maxSeriesHeap) Swap(i, j int) { a := *h a[i], a[j] = a[j], a[i] } -func (h *maxSeriesHeap) Push(x interface{}) { +func (h *maxSeriesHeap) Push(x any) { *h = append(*h, x.(*seriesWithWeight)) } -func (h *maxSeriesHeap) Pop() interface{} { +func (h *maxSeriesHeap) Pop() any { a := *h x := a[len(a)-1] *h = a[:len(a)-1] diff --git a/app/vmselect/netstorage/netstorage.go b/app/vmselect/netstorage/netstorage.go index f58da33e6..a279da1f3 100644 --- a/app/vmselect/netstorage/netstorage.go +++ b/app/vmselect/netstorage/netstorage.go @@ -760,11 +760,11 @@ func (sbh *sortBlocksHeap) Swap(i, j int) { sbs[i], sbs[j] = sbs[j], sbs[i] } -func (sbh *sortBlocksHeap) Push(x interface{}) { +func (sbh *sortBlocksHeap) Push(x any) { sbh.sbs = append(sbh.sbs, x.(*sortBlock)) } -func (sbh *sortBlocksHeap) Pop() interface{} { +func (sbh *sortBlocksHeap) Pop() any { sbs := sbh.sbs v := sbs[len(sbs)-1] sbs[len(sbs)-1] = nil @@ -810,7 +810,7 @@ func RegisterMetricNames(qt *querytracer.Tracer, mrs []storage.MetricRow, deadli } // Push mrs to storage nodes in parallel. - snr := startStorageNodesRequest(qt, sns, true, func(qt *querytracer.Tracer, workerID uint, sn *storageNode) interface{} { + snr := startStorageNodesRequest(qt, sns, true, func(qt *querytracer.Tracer, workerID uint, sn *storageNode) any { sn.registerMetricNamesRequests.Inc() err := sn.registerMetricNames(qt, mrsPerNode[workerID], deadline) if err != nil { @@ -820,7 +820,7 @@ func RegisterMetricNames(qt *querytracer.Tracer, mrs []storage.MetricRow, deadli }) // Collect results - err := snr.collectAllResults(func(result interface{}) error { + err := snr.collectAllResults(func(result any) error { errP := result.(*error) return *errP }) @@ -842,7 +842,7 @@ func DeleteSeries(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline sear err error } sns := getStorageNodes() - snr := startStorageNodesRequest(qt, sns, true, func(qt *querytracer.Tracer, _ uint, sn *storageNode) interface{} { + snr := startStorageNodesRequest(qt, sns, true, func(qt *querytracer.Tracer, _ uint, sn *storageNode) any { sn.deleteSeriesRequests.Inc() deletedCount, err := sn.deleteSeries(qt, requestData, deadline) if err != nil { @@ -856,7 +856,7 @@ func DeleteSeries(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline sear // Collect results deletedTotal := 0 - err := snr.collectAllResults(func(result interface{}) error { + err := snr.collectAllResults(func(result any) error { nr := result.(*nodeResult) if nr.err != nil { return nr.err @@ -884,7 +884,7 @@ func LabelNames(qt *querytracer.Tracer, denyPartialResponse bool, sq *storage.Se err error } sns := getStorageNodes() - snr := startStorageNodesRequest(qt, sns, denyPartialResponse, func(qt *querytracer.Tracer, _ uint, sn *storageNode) interface{} { + snr := startStorageNodesRequest(qt, sns, denyPartialResponse, func(qt *querytracer.Tracer, _ uint, sn *storageNode) any { sn.labelNamesRequests.Inc() labelNames, err := sn.getLabelNames(qt, requestData, maxLabelNames, deadline) if err != nil { @@ -899,7 +899,7 @@ func LabelNames(qt *querytracer.Tracer, denyPartialResponse bool, sq *storage.Se // Collect results var labelNames []string - isPartial, err := snr.collectResults(partialLabelNamesResults, func(result interface{}) error { + isPartial, err := snr.collectResults(partialLabelNamesResults, func(result any) error { nr := result.(*nodeResult) if nr.err != nil { return nr.err @@ -987,7 +987,7 @@ func LabelValues(qt *querytracer.Tracer, denyPartialResponse bool, labelName str err error } sns := getStorageNodes() - snr := startStorageNodesRequest(qt, sns, denyPartialResponse, func(qt *querytracer.Tracer, _ uint, sn *storageNode) interface{} { + snr := startStorageNodesRequest(qt, sns, denyPartialResponse, func(qt *querytracer.Tracer, _ uint, sn *storageNode) any { sn.labelValuesRequests.Inc() labelValues, err := sn.getLabelValues(qt, labelName, requestData, maxLabelValues, deadline) if err != nil { @@ -1002,7 +1002,7 @@ func LabelValues(qt *querytracer.Tracer, denyPartialResponse bool, labelName str // Collect results var labelValues []string - isPartial, err := snr.collectResults(partialLabelValuesResults, func(result interface{}) error { + isPartial, err := snr.collectResults(partialLabelValuesResults, func(result any) error { nr := result.(*nodeResult) if nr.err != nil { return nr.err @@ -1042,7 +1042,7 @@ func Tenants(qt *querytracer.Tracer, tr storage.TimeRange, deadline searchutils. } sns := getStorageNodes() // Deny partial responses when obtaining the list of tenants, since partial tenants have little sense. - snr := startStorageNodesRequest(qt, sns, true, func(qt *querytracer.Tracer, _ uint, sn *storageNode) interface{} { + snr := startStorageNodesRequest(qt, sns, true, func(qt *querytracer.Tracer, _ uint, sn *storageNode) any { sn.tenantsRequests.Inc() tenants, err := sn.getTenants(qt, tr, deadline) if err != nil { @@ -1057,7 +1057,7 @@ func Tenants(qt *querytracer.Tracer, tr storage.TimeRange, deadline searchutils. // Collect results var tenants []string - _, err := snr.collectResults(partialLabelValuesResults, func(result interface{}) error { + _, err := snr.collectResults(partialLabelValuesResults, func(result any) error { nr := result.(*nodeResult) if nr.err != nil { return nr.err @@ -1122,7 +1122,7 @@ func TagValueSuffixes(qt *querytracer.Tracer, accountID, projectID uint32, denyP err error } sns := getStorageNodes() - snr := startStorageNodesRequest(qt, sns, denyPartialResponse, func(qt *querytracer.Tracer, _ uint, sn *storageNode) interface{} { + snr := startStorageNodesRequest(qt, sns, denyPartialResponse, func(qt *querytracer.Tracer, _ uint, sn *storageNode) any { sn.tagValueSuffixesRequests.Inc() suffixes, err := sn.getTagValueSuffixes(qt, accountID, projectID, tr, tagKey, tagValuePrefix, delimiter, maxSuffixes, deadline) if err != nil { @@ -1138,7 +1138,7 @@ func TagValueSuffixes(qt *querytracer.Tracer, accountID, projectID uint32, denyP // Collect results m := make(map[string]struct{}) - isPartial, err := snr.collectResults(partialTagValueSuffixesResults, func(result interface{}) error { + isPartial, err := snr.collectResults(partialTagValueSuffixesResults, func(result any) error { nr := result.(*nodeResult) if nr.err != nil { return nr.err @@ -1187,7 +1187,7 @@ func TSDBStatus(qt *querytracer.Tracer, denyPartialResponse bool, sq *storage.Se err error } sns := getStorageNodes() - snr := startStorageNodesRequest(qt, sns, denyPartialResponse, func(qt *querytracer.Tracer, _ uint, sn *storageNode) interface{} { + snr := startStorageNodesRequest(qt, sns, denyPartialResponse, func(qt *querytracer.Tracer, _ uint, sn *storageNode) any { sn.tsdbStatusRequests.Inc() status, err := sn.getTSDBStatus(qt, requestData, focusLabel, topN, deadline) if err != nil { @@ -1202,7 +1202,7 @@ func TSDBStatus(qt *querytracer.Tracer, denyPartialResponse bool, sq *storage.Se // Collect results. var statuses []*storage.TSDBStatus - isPartial, err := snr.collectResults(partialTSDBStatusResults, func(result interface{}) error { + isPartial, err := snr.collectResults(partialTSDBStatusResults, func(result any) error { nr := result.(*nodeResult) if nr.err != nil { return nr.err @@ -1293,7 +1293,7 @@ func SeriesCount(qt *querytracer.Tracer, accountID, projectID uint32, denyPartia err error } sns := getStorageNodes() - snr := startStorageNodesRequest(qt, sns, denyPartialResponse, func(qt *querytracer.Tracer, _ uint, sn *storageNode) interface{} { + snr := startStorageNodesRequest(qt, sns, denyPartialResponse, func(qt *querytracer.Tracer, _ uint, sn *storageNode) any { sn.seriesCountRequests.Inc() n, err := sn.getSeriesCount(qt, accountID, projectID, deadline) if err != nil { @@ -1308,7 +1308,7 @@ func SeriesCount(qt *querytracer.Tracer, accountID, projectID uint32, denyPartia // Collect results var n uint64 - isPartial, err := snr.collectResults(partialSeriesCountResults, func(result interface{}) error { + isPartial, err := snr.collectResults(partialSeriesCountResults, func(result any) error { nr := result.(*nodeResult) if nr.err != nil { return nr.err @@ -1548,7 +1548,7 @@ func (tbfw *tmpBlocksFileWrapper) getTmpBlockFiles() []*tmpBlocksFile { } var metricNamePool = &sync.Pool{ - New: func() interface{} { + New: func() any { return &storage.MetricName{} }, } @@ -1611,7 +1611,7 @@ func SearchMetricNames(qt *querytracer.Tracer, denyPartialResponse bool, sq *sto err error } sns := getStorageNodes() - snr := startStorageNodesRequest(qt, sns, denyPartialResponse, func(qt *querytracer.Tracer, _ uint, sn *storageNode) interface{} { + snr := startStorageNodesRequest(qt, sns, denyPartialResponse, func(qt *querytracer.Tracer, _ uint, sn *storageNode) any { sn.searchMetricNamesRequests.Inc() metricNames, err := sn.processSearchMetricNames(qt, requestData, deadline) if err != nil { @@ -1626,7 +1626,7 @@ func SearchMetricNames(qt *querytracer.Tracer, denyPartialResponse bool, sq *sto // Collect results. metricNamesMap := make(map[string]struct{}) - isPartial, err := snr.collectResults(partialSearchMetricNamesResults, func(result interface{}) error { + isPartial, err := snr.collectResults(partialSearchMetricNamesResults, func(result any) error { nr := result.(*nodeResult) if nr.err != nil { return nr.err @@ -1772,7 +1772,7 @@ func processBlocks(qt *querytracer.Tracer, sns []*storageNode, denyPartialRespon } // Send the query to all the storage nodes in parallel. - snr := startStorageNodesRequest(qt, sns, denyPartialResponse, func(qt *querytracer.Tracer, workerID uint, sn *storageNode) interface{} { + snr := startStorageNodesRequest(qt, sns, denyPartialResponse, func(qt *querytracer.Tracer, workerID uint, sn *storageNode) any { sn.searchRequests.Inc() err := sn.processSearchQuery(qt, requestData, f, workerID, deadline) if err != nil { @@ -1783,7 +1783,7 @@ func processBlocks(qt *querytracer.Tracer, sns []*storageNode, denyPartialRespon }) // Collect results. - isPartial, err := snr.collectResults(partialSearchResults, func(result interface{}) error { + isPartial, err := snr.collectResults(partialSearchResults, func(result any) error { errP := result.(*error) return *errP }) @@ -1811,13 +1811,13 @@ type storageNodesRequest struct { } type rpcResult struct { - data interface{} + data any qt *querytracer.Tracer group *storageNodesGroup } func startStorageNodesRequest(qt *querytracer.Tracer, sns []*storageNode, denyPartialResponse bool, - f func(qt *querytracer.Tracer, workerID uint, sn *storageNode) interface{}) *storageNodesRequest { + f func(qt *querytracer.Tracer, workerID uint, sn *storageNode) any) *storageNodesRequest { resultsCh := make(chan rpcResult, len(sns)) qts := make(map[*querytracer.Tracer]struct{}, len(sns)) for idx, sn := range sns { @@ -1855,7 +1855,7 @@ func (snr *storageNodesRequest) finishQueryTracer(qt *querytracer.Tracer, msg st delete(snr.qts, qt) } -func (snr *storageNodesRequest) collectAllResults(f func(result interface{}) error) error { +func (snr *storageNodesRequest) collectAllResults(f func(result any) error) error { sns := snr.sns for i := 0; i < len(sns); i++ { result := <-snr.resultsCh @@ -1871,7 +1871,7 @@ func (snr *storageNodesRequest) collectAllResults(f func(result interface{}) err return nil } -func (snr *storageNodesRequest) collectResults(partialResultsCounter *metrics.Counter, f func(result interface{}) error) (bool, error) { +func (snr *storageNodesRequest) collectResults(partialResultsCounter *metrics.Counter, f func(result any) error) (bool, error) { sns := snr.sns if len(sns) == 0 { return false, nil diff --git a/app/vmselect/prometheus/prometheus.go b/app/vmselect/prometheus/prometheus.go index 3a14f70e7..3405ac81e 100644 --- a/app/vmselect/prometheus/prometheus.go +++ b/app/vmselect/prometheus/prometheus.go @@ -475,7 +475,7 @@ func (xb *exportBlock) reset() { } var exportBlockPool = &sync.Pool{ - New: func() interface{} { + New: func() any { return &exportBlock{} }, } @@ -1341,7 +1341,7 @@ func (sw *scalableWriter) maybeFlushBuffer(bb *bytesutil.ByteBuffer) error { } func (sw *scalableWriter) flush() error { - sw.m.Range(func(_, v interface{}) bool { + sw.m.Range(func(_, v any) bool { bb := v.(*bytesutil.ByteBuffer) _, err := sw.bw.Write(bb.B) return err == nil diff --git a/app/vmselect/promql/eval.go b/app/vmselect/promql/eval.go index dbfd124a6..83d44610f 100644 --- a/app/vmselect/promql/eval.go +++ b/app/vmselect/promql/eval.go @@ -757,13 +757,13 @@ func evalExprsInParallel(qt *querytracer.Tracer, ec *EvalConfig, es []metricsql. return rvs, nil } -func evalRollupFuncArgs(qt *querytracer.Tracer, ec *EvalConfig, fe *metricsql.FuncExpr) ([]interface{}, *metricsql.RollupExpr, error) { +func evalRollupFuncArgs(qt *querytracer.Tracer, ec *EvalConfig, fe *metricsql.FuncExpr) ([]any, *metricsql.RollupExpr, error) { var re *metricsql.RollupExpr rollupArgIdx := metricsql.GetRollupArgIdx(fe) if len(fe.Args) <= rollupArgIdx { return nil, nil, fmt.Errorf("expecting at least %d args to %q; got %d args; expr: %q", rollupArgIdx+1, fe.Name, len(fe.Args), fe.AppendString(nil)) } - args := make([]interface{}, len(fe.Args)) + args := make([]any, len(fe.Args)) for i, arg := range fe.Args { if i == rollupArgIdx { re = getRollupExprArg(arg) diff --git a/app/vmselect/promql/rollup.go b/app/vmselect/promql/rollup.go index 47af5c3b7..66a0bf11c 100644 --- a/app/vmselect/promql/rollup.go +++ b/app/vmselect/promql/rollup.go @@ -956,10 +956,10 @@ func derivValues(values []float64, timestamps []int64) { values[len(values)-1] = prevDeriv } -type newRollupFunc func(args []interface{}) (rollupFunc, error) +type newRollupFunc func(args []any) (rollupFunc, error) func newRollupFuncOneArg(rf rollupFunc) newRollupFunc { - return func(args []interface{}) (rollupFunc, error) { + return func(args []any) (rollupFunc, error) { if err := expectRollupArgsNum(args, 1); err != nil { return nil, err } @@ -968,7 +968,7 @@ func newRollupFuncOneArg(rf rollupFunc) newRollupFunc { } func newRollupFuncTwoArgs(rf rollupFunc) newRollupFunc { - return func(args []interface{}) (rollupFunc, error) { + return func(args []any) (rollupFunc, error) { if err := expectRollupArgsNum(args, 2); err != nil { return nil, err } @@ -977,7 +977,7 @@ func newRollupFuncTwoArgs(rf rollupFunc) newRollupFunc { } func newRollupFuncOneOrTwoArgs(rf rollupFunc) newRollupFunc { - return func(args []interface{}) (rollupFunc, error) { + return func(args []any) (rollupFunc, error) { if len(args) < 1 || len(args) > 2 { return nil, fmt.Errorf("unexpected number of args; got %d; want 1...2", len(args)) } @@ -985,7 +985,7 @@ func newRollupFuncOneOrTwoArgs(rf rollupFunc) newRollupFunc { } } -func newRollupHoltWinters(args []interface{}) (rollupFunc, error) { +func newRollupHoltWinters(args []any) (rollupFunc, error) { if err := expectRollupArgsNum(args, 3); err != nil { return nil, err } @@ -1035,7 +1035,7 @@ func newRollupHoltWinters(args []interface{}) (rollupFunc, error) { return rf, nil } -func newRollupPredictLinear(args []interface{}) (rollupFunc, error) { +func newRollupPredictLinear(args []any) (rollupFunc, error) { if err := expectRollupArgsNum(args, 2); err != nil { return nil, err } @@ -1106,7 +1106,7 @@ func areConstValues(values []float64) bool { return true } -func newRollupDurationOverTime(args []interface{}) (rollupFunc, error) { +func newRollupDurationOverTime(args []any) (rollupFunc, error) { if err := expectRollupArgsNum(args, 2); err != nil { return nil, err } @@ -1136,7 +1136,7 @@ func newRollupDurationOverTime(args []interface{}) (rollupFunc, error) { return rf, nil } -func newRollupShareLE(args []interface{}) (rollupFunc, error) { +func newRollupShareLE(args []any) (rollupFunc, error) { return newRollupAvgFilter(args, countFilterLE) } @@ -1150,7 +1150,7 @@ func countFilterLE(values []float64, le float64) float64 { return float64(n) } -func newRollupShareGT(args []interface{}) (rollupFunc, error) { +func newRollupShareGT(args []any) (rollupFunc, error) { return newRollupAvgFilter(args, countFilterGT) } @@ -1164,7 +1164,7 @@ func countFilterGT(values []float64, gt float64) float64 { return float64(n) } -func newRollupShareEQ(args []interface{}) (rollupFunc, error) { +func newRollupShareEQ(args []any) (rollupFunc, error) { return newRollupAvgFilter(args, countFilterEQ) } @@ -1218,7 +1218,7 @@ func countFilterNE(values []float64, ne float64) float64 { return float64(n) } -func newRollupAvgFilter(args []interface{}, f func(values []float64, limit float64) float64) (rollupFunc, error) { +func newRollupAvgFilter(args []any, f func(values []float64, limit float64) float64) (rollupFunc, error) { rf, err := newRollupFilter(args, f) if err != nil { return nil, err @@ -1229,35 +1229,35 @@ func newRollupAvgFilter(args []interface{}, f func(values []float64, limit float }, nil } -func newRollupCountEQ(args []interface{}) (rollupFunc, error) { +func newRollupCountEQ(args []any) (rollupFunc, error) { return newRollupFilter(args, countFilterEQ) } -func newRollupCountLE(args []interface{}) (rollupFunc, error) { +func newRollupCountLE(args []any) (rollupFunc, error) { return newRollupFilter(args, countFilterLE) } -func newRollupCountGT(args []interface{}) (rollupFunc, error) { +func newRollupCountGT(args []any) (rollupFunc, error) { return newRollupFilter(args, countFilterGT) } -func newRollupCountNE(args []interface{}) (rollupFunc, error) { +func newRollupCountNE(args []any) (rollupFunc, error) { return newRollupFilter(args, countFilterNE) } -func newRollupSumEQ(args []interface{}) (rollupFunc, error) { +func newRollupSumEQ(args []any) (rollupFunc, error) { return newRollupFilter(args, sumFilterEQ) } -func newRollupSumLE(args []interface{}) (rollupFunc, error) { +func newRollupSumLE(args []any) (rollupFunc, error) { return newRollupFilter(args, sumFilterLE) } -func newRollupSumGT(args []interface{}) (rollupFunc, error) { +func newRollupSumGT(args []any) (rollupFunc, error) { return newRollupFilter(args, sumFilterGT) } -func newRollupFilter(args []interface{}, f func(values []float64, limit float64) float64) (rollupFunc, error) { +func newRollupFilter(args []any, f func(values []float64, limit float64) float64) (rollupFunc, error) { if err := expectRollupArgsNum(args, 2); err != nil { return nil, err } @@ -1278,7 +1278,7 @@ func newRollupFilter(args []interface{}, f func(values []float64, limit float64) return rf, nil } -func newRollupHoeffdingBoundLower(args []interface{}) (rollupFunc, error) { +func newRollupHoeffdingBoundLower(args []any) (rollupFunc, error) { if err := expectRollupArgsNum(args, 2); err != nil { return nil, err } @@ -1293,7 +1293,7 @@ func newRollupHoeffdingBoundLower(args []interface{}) (rollupFunc, error) { return rf, nil } -func newRollupHoeffdingBoundUpper(args []interface{}) (rollupFunc, error) { +func newRollupHoeffdingBoundUpper(args []any) (rollupFunc, error) { if err := expectRollupArgsNum(args, 2); err != nil { return nil, err } @@ -1338,7 +1338,7 @@ func rollupHoeffdingBoundInternal(rfa *rollupFuncArg, phis []float64) (float64, return bound, vAvg } -func newRollupQuantiles(args []interface{}) (rollupFunc, error) { +func newRollupQuantiles(args []any) (rollupFunc, error) { if len(args) < 3 { return nil, fmt.Errorf("unexpected number of args: %d; want at least 3 args", len(args)) } @@ -1405,7 +1405,7 @@ func rollupOutlierIQR(rfa *rollupFuncArg) float64 { return nan } -func newRollupQuantile(args []interface{}) (rollupFunc, error) { +func newRollupQuantile(args []any) (rollupFunc, error) { if err := expectRollupArgsNum(args, 2); err != nil { return nil, err } @@ -1445,7 +1445,7 @@ func mad(values []float64) float64 { return v } -func newRollupCountValues(args []interface{}) (rollupFunc, error) { +func newRollupCountValues(args []any) (rollupFunc, error) { if err := expectRollupArgsNum(args, 2); err != nil { return nil, err } @@ -2389,7 +2389,7 @@ func rollupFake(_ *rollupFuncArg) float64 { return 0 } -func getScalar(arg interface{}, argNum int) ([]float64, error) { +func getScalar(arg any, argNum int) ([]float64, error) { ts, ok := arg.([]*timeseries) if !ok { return nil, fmt.Errorf(`unexpected type for arg #%d; got %T; want %T`, argNum+1, arg, ts) @@ -2400,7 +2400,7 @@ func getScalar(arg interface{}, argNum int) ([]float64, error) { return ts[0].Values, nil } -func getIntNumber(arg interface{}, argNum int) (int, error) { +func getIntNumber(arg any, argNum int) (int, error) { v, err := getScalar(arg, argNum) if err != nil { return 0, err @@ -2425,7 +2425,7 @@ func getString(tss []*timeseries, argNum int) (string, error) { return string(ts.MetricName.MetricGroup), nil } -func expectRollupArgsNum(args []interface{}, expectedNum int) error { +func expectRollupArgsNum(args []any, expectedNum int) error { if len(args) == expectedNum { return nil } diff --git a/app/vmselect/promql/rollup_test.go b/app/vmselect/promql/rollup_test.go index af0dc2680..d10d2cebf 100644 --- a/app/vmselect/promql/rollup_test.go +++ b/app/vmselect/promql/rollup_test.go @@ -200,7 +200,7 @@ func TestDerivValues(t *testing.T) { testRowsEqual(t, values, timestamps, valuesExpected, timestamps) } -func testRollupFunc(t *testing.T, funcName string, args []interface{}, vExpected float64) { +func testRollupFunc(t *testing.T, funcName string, args []any, vExpected float64) { t.Helper() nrf := getRollupFunc(funcName) if nrf == nil { @@ -245,7 +245,7 @@ func TestRollupDurationOverTime(t *testing.T) { Timestamps: []int64{123}, }} var me metricsql.MetricExpr - args := []interface{}{&metricsql.RollupExpr{Expr: &me}, maxIntervals} + args := []any{&metricsql.RollupExpr{Expr: &me}, maxIntervals} testRollupFunc(t, "duration_over_time", args, dExpected) } f(-123, 0) @@ -266,7 +266,7 @@ func TestRollupShareLEOverTime(t *testing.T) { Timestamps: []int64{123}, }} var me metricsql.MetricExpr - args := []interface{}{&metricsql.RollupExpr{Expr: &me}, les} + args := []any{&metricsql.RollupExpr{Expr: &me}, les} testRollupFunc(t, "share_le_over_time", args, vExpected) } @@ -289,7 +289,7 @@ func TestRollupShareGTOverTime(t *testing.T) { Timestamps: []int64{123}, }} var me metricsql.MetricExpr - args := []interface{}{&metricsql.RollupExpr{Expr: &me}, gts} + args := []any{&metricsql.RollupExpr{Expr: &me}, gts} testRollupFunc(t, "share_gt_over_time", args, vExpected) } @@ -312,7 +312,7 @@ func TestRollupShareEQOverTime(t *testing.T) { Timestamps: []int64{123}, }} var me metricsql.MetricExpr - args := []interface{}{&metricsql.RollupExpr{Expr: &me}, eqs} + args := []any{&metricsql.RollupExpr{Expr: &me}, eqs} testRollupFunc(t, "share_eq_over_time", args, vExpected) } @@ -331,7 +331,7 @@ func TestRollupCountLEOverTime(t *testing.T) { Timestamps: []int64{123}, }} var me metricsql.MetricExpr - args := []interface{}{&metricsql.RollupExpr{Expr: &me}, les} + args := []any{&metricsql.RollupExpr{Expr: &me}, les} testRollupFunc(t, "count_le_over_time", args, vExpected) } @@ -354,7 +354,7 @@ func TestRollupCountGTOverTime(t *testing.T) { Timestamps: []int64{123}, }} var me metricsql.MetricExpr - args := []interface{}{&metricsql.RollupExpr{Expr: &me}, gts} + args := []any{&metricsql.RollupExpr{Expr: &me}, gts} testRollupFunc(t, "count_gt_over_time", args, vExpected) } @@ -377,7 +377,7 @@ func TestRollupCountEQOverTime(t *testing.T) { Timestamps: []int64{123}, }} var me metricsql.MetricExpr - args := []interface{}{&metricsql.RollupExpr{Expr: &me}, eqs} + args := []any{&metricsql.RollupExpr{Expr: &me}, eqs} testRollupFunc(t, "count_eq_over_time", args, vExpected) } @@ -396,7 +396,7 @@ func TestRollupCountNEOverTime(t *testing.T) { Timestamps: []int64{123}, }} var me metricsql.MetricExpr - args := []interface{}{&metricsql.RollupExpr{Expr: &me}, nes} + args := []any{&metricsql.RollupExpr{Expr: &me}, nes} testRollupFunc(t, "count_ne_over_time", args, vExpected) } @@ -415,7 +415,7 @@ func TestRollupSumLEOverTime(t *testing.T) { Timestamps: []int64{123}, }} var me metricsql.MetricExpr - args := []interface{}{&metricsql.RollupExpr{Expr: &me}, les} + args := []any{&metricsql.RollupExpr{Expr: &me}, les} testRollupFunc(t, "sum_le_over_time", args, vExpected) } @@ -438,7 +438,7 @@ func TestRollupSumGTOverTime(t *testing.T) { Timestamps: []int64{123}, }} var me metricsql.MetricExpr - args := []interface{}{&metricsql.RollupExpr{Expr: &me}, les} + args := []any{&metricsql.RollupExpr{Expr: &me}, les} testRollupFunc(t, "sum_gt_over_time", args, vExpected) } @@ -461,7 +461,7 @@ func TestRollupSumEQOverTime(t *testing.T) { Timestamps: []int64{123}, }} var me metricsql.MetricExpr - args := []interface{}{&metricsql.RollupExpr{Expr: &me}, les} + args := []any{&metricsql.RollupExpr{Expr: &me}, les} testRollupFunc(t, "sum_eq_over_time", args, vExpected) } @@ -484,7 +484,7 @@ func TestRollupQuantileOverTime(t *testing.T) { Timestamps: []int64{123}, }} var me metricsql.MetricExpr - args := []interface{}{phis, &metricsql.RollupExpr{Expr: &me}} + args := []any{phis, &metricsql.RollupExpr{Expr: &me}} testRollupFunc(t, "quantile_over_time", args, vExpected) } @@ -506,7 +506,7 @@ func TestRollupPredictLinear(t *testing.T) { Timestamps: []int64{123}, }} var me metricsql.MetricExpr - args := []interface{}{&metricsql.RollupExpr{Expr: &me}, secs} + args := []any{&metricsql.RollupExpr{Expr: &me}, secs} testRollupFunc(t, "predict_linear", args, vExpected) } @@ -545,7 +545,7 @@ func TestRollupHoltWinters(t *testing.T) { Timestamps: []int64{123}, }} var me metricsql.MetricExpr - args := []interface{}{&metricsql.RollupExpr{Expr: &me}, sfs, tfs} + args := []any{&metricsql.RollupExpr{Expr: &me}, sfs, tfs} testRollupFunc(t, "holt_winters", args, vExpected) } @@ -573,7 +573,7 @@ func TestRollupHoeffdingBoundLower(t *testing.T) { Timestamps: []int64{123}, }} var me metricsql.MetricExpr - args := []interface{}{phis, &metricsql.RollupExpr{Expr: &me}} + args := []any{phis, &metricsql.RollupExpr{Expr: &me}} testRollupFunc(t, "hoeffding_bound_lower", args, vExpected) } @@ -594,7 +594,7 @@ func TestRollupHoeffdingBoundUpper(t *testing.T) { Timestamps: []int64{123}, }} var me metricsql.MetricExpr - args := []interface{}{phis, &metricsql.RollupExpr{Expr: &me}} + args := []any{phis, &metricsql.RollupExpr{Expr: &me}} testRollupFunc(t, "hoeffding_bound_upper", args, vExpected) } @@ -611,7 +611,7 @@ func TestRollupNewRollupFuncSuccess(t *testing.T) { f := func(funcName string, vExpected float64) { t.Helper() var me metricsql.MetricExpr - args := []interface{}{&metricsql.RollupExpr{Expr: &me}} + args := []any{&metricsql.RollupExpr{Expr: &me}} testRollupFunc(t, funcName, args, vExpected) } @@ -668,7 +668,7 @@ func TestRollupNewRollupFuncError(t *testing.T) { t.Fatalf("expecting nil func; got %p", nrf) } - f := func(funcName string, args []interface{}) { + f := func(funcName string, args []any) { t.Helper() nrf := getRollupFunc(funcName) @@ -694,13 +694,13 @@ func TestRollupNewRollupFuncError(t *testing.T) { Timestamps: []int64{123}, }} me := &metricsql.MetricExpr{} - f("holt_winters", []interface{}{123, 123, 321}) - f("holt_winters", []interface{}{me, 123, 321}) - f("holt_winters", []interface{}{me, scalarTs, 321}) - f("predict_linear", []interface{}{123, 123}) - f("predict_linear", []interface{}{me, 123}) - f("quantile_over_time", []interface{}{123, 123}) - f("quantiles_over_time", []interface{}{123, 123}) + f("holt_winters", []any{123, 123, 321}) + f("holt_winters", []any{me, 123, 321}) + f("holt_winters", []any{me, scalarTs, 321}) + f("predict_linear", []any{123, 123}) + f("predict_linear", []any{me, 123}) + f("quantile_over_time", []any{123, 123}) + f("quantiles_over_time", []any{123, 123}) } func TestRollupNoWindowNoPoints(t *testing.T) { diff --git a/lib/blockcache/blockcache.go b/lib/blockcache/blockcache.go index c331f4227..14c8f9ffd 100644 --- a/lib/blockcache/blockcache.go +++ b/lib/blockcache/blockcache.go @@ -65,7 +65,7 @@ func (c *Cache) MustStop() { } // RemoveBlocksForPart removes all the blocks for the given part from the cache. -func (c *Cache) RemoveBlocksForPart(p interface{}) { +func (c *Cache) RemoveBlocksForPart(p any) { for _, shard := range c.shards { shard.RemoveBlocksForPart(p) } @@ -185,7 +185,7 @@ type cache struct { mu sync.Mutex // m contains cached blocks keyed by Key.Part and then by Key.Offset - m map[interface{}]map[uint64]*cacheEntry + m map[any]map[uint64]*cacheEntry // perKeyMisses contains per-block cache misses. // @@ -199,7 +199,7 @@ type cache struct { // Key represents a key, which uniquely identifies the Block. type Key struct { // Part must contain a pointer to part structure where the block belongs to. - Part interface{} + Part any // Offset is the offset of the block in the part. Offset uint64 @@ -233,12 +233,12 @@ type cacheEntry struct { func newCache(getMaxSizeBytes func() int) *cache { var c cache c.getMaxSizeBytes = getMaxSizeBytes - c.m = make(map[interface{}]map[uint64]*cacheEntry) + c.m = make(map[any]map[uint64]*cacheEntry) c.perKeyMisses = make(map[Key]int) return &c } -func (c *cache) RemoveBlocksForPart(p interface{}) { +func (c *cache) RemoveBlocksForPart(p any) { c.mu.Lock() defer c.mu.Unlock() @@ -398,13 +398,13 @@ func (lah *lastAccessHeap) Less(i, j int) bool { h := *lah return h[i].lastAccessTime < h[j].lastAccessTime } -func (lah *lastAccessHeap) Push(x interface{}) { +func (lah *lastAccessHeap) Push(x any) { e := x.(*cacheEntry) h := *lah e.heapIdx = len(h) *lah = append(h, e) } -func (lah *lastAccessHeap) Pop() interface{} { +func (lah *lastAccessHeap) Pop() any { h := *lah e := h[len(h)-1] diff --git a/lib/blockcache/blockcache_test.go b/lib/blockcache/blockcache_test.go index 175093028..1356225b1 100644 --- a/lib/blockcache/blockcache_test.go +++ b/lib/blockcache/blockcache_test.go @@ -27,7 +27,7 @@ func TestCache(t *testing.T) { t.Fatalf("unexpected SizeMaxBytes(); got %d; want %d", n, sizeMaxBytes) } offset := uint64(1234) - part := (interface{})("foobar") + part := (any)("foobar") k := Key{ Offset: offset, Part: part, @@ -145,7 +145,7 @@ func TestCacheConcurrentAccess(_ *testing.T) { func testCacheSetGet(c *Cache, worker int) { for i := 0; i < 1000; i++ { - part := (interface{})(i) + part := (any)(i) b := testBlock{} k := Key{ Offset: uint64(worker*1000 + i), diff --git a/lib/bytesutil/fast_string_matcher.go b/lib/bytesutil/fast_string_matcher.go index 5d2b0643e..7320cf13d 100644 --- a/lib/bytesutil/fast_string_matcher.go +++ b/lib/bytesutil/fast_string_matcher.go @@ -71,7 +71,7 @@ func (fsm *FastStringMatcher) Match(s string) bool { // Perform a global cleanup for fsm.m by removing items, which weren't accessed during the last 5 minutes. m := &fsm.m deadline := ct - uint64(cacheExpireDuration.Seconds()) - m.Range(func(k, v interface{}) bool { + m.Range(func(k, v any) bool { e := v.(*fsmEntry) if e.lastAccessTime.Load() < deadline { m.Delete(k) diff --git a/lib/bytesutil/fast_string_transformer.go b/lib/bytesutil/fast_string_transformer.go index 342071cc9..79e4222b6 100644 --- a/lib/bytesutil/fast_string_transformer.go +++ b/lib/bytesutil/fast_string_transformer.go @@ -82,7 +82,7 @@ func (fst *FastStringTransformer) Transform(s string) string { // Perform a global cleanup for fst.m by removing items, which weren't accessed during the last 5 minutes. m := &fst.m deadline := ct - uint64(cacheExpireDuration.Seconds()) - m.Range(func(k, v interface{}) bool { + m.Range(func(k, v any) bool { e := v.(*fstEntry) if e.lastAccessTime.Load() < deadline { m.Delete(k) diff --git a/lib/httpserver/httpserver.go b/lib/httpserver/httpserver.go index b52cd4c10..fe5f08753 100644 --- a/lib/httpserver/httpserver.go +++ b/lib/httpserver/httpserver.go @@ -186,7 +186,7 @@ func whetherToCloseConn(r *http.Request) bool { return ok && fasttime.UnixTimestamp() > *deadline } -var connDeadlineTimeKey = interface{}("connDeadlineSecs") +var connDeadlineTimeKey = any("connDeadlineSecs") // Stop stops the http server on the given addrs, which has been started via Serve func. func Stop(addrs []string) error { @@ -617,7 +617,7 @@ func (rwa *responseWriterWithAbort) abort() { } // Errorf writes formatted error message to w and to logger. -func Errorf(w http.ResponseWriter, r *http.Request, format string, args ...interface{}) { +func Errorf(w http.ResponseWriter, r *http.Request, format string, args ...any) { errStr := fmt.Sprintf(format, args...) remoteAddr := GetQuotedRemoteAddr(r) requestURI := GetRequestURI(r) diff --git a/lib/logger/logger.go b/lib/logger/logger.go index 913b87c96..91083a2a9 100644 --- a/lib/logger/logger.go +++ b/lib/logger/logger.go @@ -95,45 +95,45 @@ func StdErrorLogger() *log.Logger { } // Infof logs info message. -func Infof(format string, args ...interface{}) { +func Infof(format string, args ...any) { logLevel("INFO", format, args) } // Warnf logs warn message. -func Warnf(format string, args ...interface{}) { +func Warnf(format string, args ...any) { logLevel("WARN", format, args) } // Errorf logs error message. -func Errorf(format string, args ...interface{}) { +func Errorf(format string, args ...any) { logLevel("ERROR", format, args) } // WarnfSkipframes logs warn message and skips the given number of frames for the caller. -func WarnfSkipframes(skipframes int, format string, args ...interface{}) { +func WarnfSkipframes(skipframes int, format string, args ...any) { logLevelSkipframes(skipframes, "WARN", format, args) } // ErrorfSkipframes logs error message and skips the given number of frames for the caller. -func ErrorfSkipframes(skipframes int, format string, args ...interface{}) { +func ErrorfSkipframes(skipframes int, format string, args ...any) { logLevelSkipframes(skipframes, "ERROR", format, args) } // Fatalf logs fatal message and terminates the app. -func Fatalf(format string, args ...interface{}) { +func Fatalf(format string, args ...any) { logLevel("FATAL", format, args) } // Panicf logs panic message and panics. -func Panicf(format string, args ...interface{}) { +func Panicf(format string, args ...any) { logLevel("PANIC", format, args) } -func logLevel(level, format string, args []interface{}) { +func logLevel(level, format string, args []any) { logLevelSkipframes(1, level, format, args) } -func logLevelSkipframes(skipframes int, level, format string, args []interface{}) { +func logLevelSkipframes(skipframes int, level, format string, args []any) { if shouldSkipLog(level) { return } @@ -141,7 +141,7 @@ func logLevelSkipframes(skipframes int, level, format string, args []interface{} logMessage(level, msg, 3+skipframes) } -func formatLogMessage(maxArgLen int, format string, args []interface{}) string { +func formatLogMessage(maxArgLen int, format string, args []any) string { x := format // Limit the length of every string-like arg in order to prevent from too long log messages for i := range args { @@ -217,7 +217,7 @@ type logWriter struct { } func (lw *logWriter) Write(p []byte) (int, error) { - logLevelSkipframes(2, "ERROR", "%s", []interface{}{p}) + logLevelSkipframes(2, "ERROR", "%s", []any{p}) return len(p), nil } diff --git a/lib/logger/logger_test.go b/lib/logger/logger_test.go index dc1fcc28b..cb5a7b634 100644 --- a/lib/logger/logger_test.go +++ b/lib/logger/logger_test.go @@ -6,7 +6,7 @@ import ( ) func TestFormatLogMessage(t *testing.T) { - f := func(format string, args []interface{}, maxArgLen int, expectedResult string) { + f := func(format string, args []any, maxArgLen int, expectedResult string) { t.Helper() result := formatLogMessage(maxArgLen, format, args) if result != expectedResult { @@ -18,8 +18,8 @@ func TestFormatLogMessage(t *testing.T) { f("foobar", nil, 1, "foobar") // Format args not exceeding the maxArgLen - f("foo: %d, %s, %s, %s", []interface{}{123, "bar", []byte("baz"), fmt.Errorf("abc")}, 3, "foo: 123, bar, baz, abc") + f("foo: %d, %s, %s, %s", []any{123, "bar", []byte("baz"), fmt.Errorf("abc")}, 3, "foo: 123, bar, baz, abc") // Format args exceeding the maxArgLen - f("foo: %s, %q, %s", []interface{}{"abcde", fmt.Errorf("foo bar baz"), "xx"}, 4, `foo: a..e, "f..z", xx`) + f("foo: %s, %q, %s", []any{"abcde", fmt.Errorf("foo bar baz"), "xx"}, 4, `foo: a..e, "f..z", xx`) } diff --git a/lib/logger/throttler.go b/lib/logger/throttler.go index 843a337a6..4269c223d 100644 --- a/lib/logger/throttler.go +++ b/lib/logger/throttler.go @@ -49,7 +49,7 @@ func newLogThrottler(throttle time.Duration) *LogThrottler { } // Errorf logs error message. -func (lt *LogThrottler) Errorf(format string, args ...interface{}) { +func (lt *LogThrottler) Errorf(format string, args ...any) { select { case lt.ch <- struct{}{}: ErrorfSkipframes(1, format, args...) @@ -58,7 +58,7 @@ func (lt *LogThrottler) Errorf(format string, args ...interface{}) { } // Warnf logs warn message. -func (lt *LogThrottler) Warnf(format string, args ...interface{}) { +func (lt *LogThrottler) Warnf(format string, args ...any) { select { case lt.ch <- struct{}{}: WarnfSkipframes(1, format, args...) diff --git a/lib/logstorage/block_stream_merger.go b/lib/logstorage/block_stream_merger.go index 1ff7ec69d..7e57dfa66 100644 --- a/lib/logstorage/block_stream_merger.go +++ b/lib/logstorage/block_stream_merger.go @@ -302,12 +302,12 @@ func (h *blockStreamReadersHeap) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (h *blockStreamReadersHeap) Push(v interface{}) { +func (h *blockStreamReadersHeap) Push(v any) { bsr := v.(*blockStreamReader) *h = append(*h, bsr) } -func (h *blockStreamReadersHeap) Pop() interface{} { +func (h *blockStreamReadersHeap) Pop() any { x := *h bsr := x[len(x)-1] x[len(x)-1] = nil diff --git a/lib/lrucache/lrucache.go b/lib/lrucache/lrucache.go index e3199e95e..9104852bc 100644 --- a/lib/lrucache/lrucache.go +++ b/lib/lrucache/lrucache.go @@ -308,13 +308,13 @@ func (lah *lastAccessHeap) Less(i, j int) bool { h := *lah return h[i].lastAccessTime < h[j].lastAccessTime } -func (lah *lastAccessHeap) Push(x interface{}) { +func (lah *lastAccessHeap) Push(x any) { e := x.(*cacheEntry) h := *lah e.heapIdx = len(h) *lah = append(h, e) } -func (lah *lastAccessHeap) Pop() interface{} { +func (lah *lastAccessHeap) Pop() any { h := *lah e := h[len(h)-1] diff --git a/lib/mergeset/merge.go b/lib/mergeset/merge.go index 39c42c2d7..09dd59b6f 100644 --- a/lib/mergeset/merge.go +++ b/lib/mergeset/merge.go @@ -44,7 +44,7 @@ func mergeBlockStreams(ph *partHeader, bsw *blockStreamWriter, bsrs []*blockStre } var bsmPool = &sync.Pool{ - New: func() interface{} { + New: func() any { return &blockStreamMerger{} }, } @@ -238,14 +238,14 @@ func (bh *bsrHeap) Less(i, j int) bool { return x[i].CurrItem() < x[j].CurrItem() } -func (bh *bsrHeap) Pop() interface{} { +func (bh *bsrHeap) Pop() any { a := *bh v := a[len(a)-1] *bh = a[:len(a)-1] return v } -func (bh *bsrHeap) Push(x interface{}) { +func (bh *bsrHeap) Push(x any) { v := x.(*blockStreamReader) *bh = append(*bh, v) } diff --git a/lib/mergeset/table_search.go b/lib/mergeset/table_search.go index 70f741561..2da5f0cca 100644 --- a/lib/mergeset/table_search.go +++ b/lib/mergeset/table_search.go @@ -205,11 +205,11 @@ func (psh *partSearchHeap) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (psh *partSearchHeap) Push(x interface{}) { +func (psh *partSearchHeap) Push(x any) { *psh = append(*psh, x.(*partSearch)) } -func (psh *partSearchHeap) Pop() interface{} { +func (psh *partSearchHeap) Pop() any { a := *psh v := a[len(a)-1] *psh = a[:len(a)-1] diff --git a/lib/promauth/config.go b/lib/promauth/config.go index dbbcf0fc5..e067fa2c7 100644 --- a/lib/promauth/config.go +++ b/lib/promauth/config.go @@ -43,12 +43,12 @@ func NewSecret(s string) *Secret { // MarshalYAML implements yaml.Marshaler interface. // // It substitutes the secret with "" string. -func (s *Secret) MarshalYAML() (interface{}, error) { +func (s *Secret) MarshalYAML() (any, error) { return "", nil } // UnmarshalYAML implements yaml.Unmarshaler interface. -func (s *Secret) UnmarshalYAML(f func(interface{}) error) error { +func (s *Secret) UnmarshalYAML(f func(any) error) error { var secret string if err := f(&secret); err != nil { return fmt.Errorf("cannot parse secret: %w", err) diff --git a/lib/promrelabel/config.go b/lib/promrelabel/config.go index 8969da7e5..67b8e33d7 100644 --- a/lib/promrelabel/config.go +++ b/lib/promrelabel/config.go @@ -61,8 +61,8 @@ type MultiLineRegex struct { } // UnmarshalYAML unmarshals mlr from YAML passed to f. -func (mlr *MultiLineRegex) UnmarshalYAML(f func(interface{}) error) error { - var v interface{} +func (mlr *MultiLineRegex) UnmarshalYAML(f func(any) error) error { + var v any if err := f(&v); err != nil { return fmt.Errorf("cannot parse multiline regex: %w", err) } @@ -74,12 +74,12 @@ func (mlr *MultiLineRegex) UnmarshalYAML(f func(interface{}) error) error { return nil } -func stringValue(v interface{}) (string, error) { +func stringValue(v any) (string, error) { if v == nil { return "null", nil } switch x := v.(type) { - case []interface{}: + case []any: a := make([]string, len(x)) for i, xx := range x { s, err := stringValue(xx) @@ -106,7 +106,7 @@ func stringValue(v interface{}) (string, error) { } // MarshalYAML marshals mlr to YAML. -func (mlr *MultiLineRegex) MarshalYAML() (interface{}, error) { +func (mlr *MultiLineRegex) MarshalYAML() (any, error) { if strings.ContainsAny(mlr.S, "([") { // The mlr.S contains groups. Fall back to returning the regexp as is without splitting it into parts. // This fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2928 . diff --git a/lib/promrelabel/graphite.go b/lib/promrelabel/graphite.go index e962dcdea..0ae63afb6 100644 --- a/lib/promrelabel/graphite.go +++ b/lib/promrelabel/graphite.go @@ -8,7 +8,7 @@ import ( ) var graphiteMatchesPool = &sync.Pool{ - New: func() interface{} { + New: func() any { return &graphiteMatches{} }, } diff --git a/lib/promrelabel/if_expression.go b/lib/promrelabel/if_expression.go index 90790c244..e49010ebf 100644 --- a/lib/promrelabel/if_expression.go +++ b/lib/promrelabel/if_expression.go @@ -53,7 +53,7 @@ func (ie *IfExpression) Parse(s string) error { // UnmarshalJSON unmarshals ie from JSON data. func (ie *IfExpression) UnmarshalJSON(data []byte) error { - var v interface{} + var v any if err := json.Unmarshal(data, &v); err != nil { return err } @@ -72,15 +72,15 @@ func (ie *IfExpression) MarshalJSON() ([]byte, error) { } // UnmarshalYAML unmarshals ie from YAML passed to f. -func (ie *IfExpression) UnmarshalYAML(f func(interface{}) error) error { - var v interface{} +func (ie *IfExpression) UnmarshalYAML(f func(any) error) error { + var v any if err := f(&v); err != nil { return fmt.Errorf("cannot unmarshal `match` option: %w", err) } return ie.unmarshalFromInterface(v) } -func (ie *IfExpression) unmarshalFromInterface(v interface{}) error { +func (ie *IfExpression) unmarshalFromInterface(v any) error { ies := ie.ies[:0] switch t := v.(type) { case string: @@ -89,7 +89,7 @@ func (ie *IfExpression) unmarshalFromInterface(v interface{}) error { return fmt.Errorf("unexpected `match` option: %w", err) } ies = append(ies, ieLocal) - case []interface{}: + case []any: for _, x := range t { s, ok := x.(string) if !ok { @@ -109,7 +109,7 @@ func (ie *IfExpression) unmarshalFromInterface(v interface{}) error { } // MarshalYAML marshals ie to YAML -func (ie *IfExpression) MarshalYAML() (interface{}, error) { +func (ie *IfExpression) MarshalYAML() (any, error) { if ie == nil || len(ie.ies) == 0 { return nil, nil } @@ -198,7 +198,7 @@ func (ie *ifExpression) MarshalJSON() ([]byte, error) { } // UnmarshalYAML unmarshals ie from YAML passed to f. -func (ie *ifExpression) UnmarshalYAML(f func(interface{}) error) error { +func (ie *ifExpression) UnmarshalYAML(f func(any) error) error { var s string if err := f(&s); err != nil { return fmt.Errorf("cannot unmarshal `if` option: %w", err) @@ -210,7 +210,7 @@ func (ie *ifExpression) UnmarshalYAML(f func(interface{}) error) error { } // MarshalYAML marshals ie to YAML. -func (ie *ifExpression) MarshalYAML() (interface{}, error) { +func (ie *ifExpression) MarshalYAML() (any, error) { return ie.s, nil } diff --git a/lib/promscrape/config.go b/lib/promscrape/config.go index 69b5d2c6d..2c1ac2051 100644 --- a/lib/promscrape/config.go +++ b/lib/promscrape/config.go @@ -331,7 +331,7 @@ type ScrapeConfig struct { } func (sc *ScrapeConfig) mustStart(baseDir string) { - swosFunc := func(metaLabels *promutils.Labels) interface{} { + swosFunc := func(metaLabels *promutils.Labels) any { target := metaLabels.Get("__address__") sw, err := sc.swc.getScrapeWork(target, nil, metaLabels) if err != nil { diff --git a/lib/promscrape/discovery/azure/api.go b/lib/promscrape/discovery/azure/api.go index 90a590e6b..76eb2667d 100644 --- a/lib/promscrape/discovery/azure/api.go +++ b/lib/promscrape/discovery/azure/api.go @@ -72,7 +72,7 @@ type apiConfig struct { type refreshTokenFunc func() (string, time.Duration, error) func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { - v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) }) + v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc, baseDir) }) if err != nil { return nil, err } diff --git a/lib/promscrape/discovery/consul/api.go b/lib/promscrape/discovery/consul/api.go index aea1f15b1..ba1dc9ba8 100644 --- a/lib/promscrape/discovery/consul/api.go +++ b/lib/promscrape/discovery/consul/api.go @@ -30,7 +30,7 @@ func (ac *apiConfig) mustStop() { var configMap = discoveryutils.NewConfigMap() func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { - v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) }) + v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc, baseDir) }) if err != nil { return nil, err } diff --git a/lib/promscrape/discovery/consulagent/api.go b/lib/promscrape/discovery/consulagent/api.go index ba773ac18..2f0bbe653 100644 --- a/lib/promscrape/discovery/consulagent/api.go +++ b/lib/promscrape/discovery/consulagent/api.go @@ -24,7 +24,7 @@ func (ac *apiConfig) mustStop() { var configMap = discoveryutils.NewConfigMap() func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { - v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) }) + v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc, baseDir) }) if err != nil { return nil, err } diff --git a/lib/promscrape/discovery/digitalocean/api.go b/lib/promscrape/discovery/digitalocean/api.go index 69e60b8f9..094e5fab8 100644 --- a/lib/promscrape/discovery/digitalocean/api.go +++ b/lib/promscrape/discovery/digitalocean/api.go @@ -51,7 +51,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { } func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { - v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) }) + v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc, baseDir) }) if err != nil { return nil, err } diff --git a/lib/promscrape/discovery/docker/api.go b/lib/promscrape/discovery/docker/api.go index 4821261dc..b2ea0529f 100644 --- a/lib/promscrape/discovery/docker/api.go +++ b/lib/promscrape/discovery/docker/api.go @@ -22,7 +22,7 @@ type apiConfig struct { } func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { - v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) }) + v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc, baseDir) }) if err != nil { return nil, err } diff --git a/lib/promscrape/discovery/dockerswarm/api.go b/lib/promscrape/discovery/dockerswarm/api.go index 15a7ef6e0..8fbbf0e12 100644 --- a/lib/promscrape/discovery/dockerswarm/api.go +++ b/lib/promscrape/discovery/dockerswarm/api.go @@ -26,7 +26,7 @@ type apiConfig struct { } func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { - v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) }) + v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc, baseDir) }) if err != nil { return nil, err } diff --git a/lib/promscrape/discovery/dockerswarm/services.go b/lib/promscrape/discovery/dockerswarm/services.go index f82fadb13..0e46f9187 100644 --- a/lib/promscrape/discovery/dockerswarm/services.go +++ b/lib/promscrape/discovery/dockerswarm/services.go @@ -35,8 +35,8 @@ type containerSpec struct { } type serviceSpecMode struct { - Global interface{} - Replicated interface{} + Global any + Replicated any } type serviceUpdateStatus struct { diff --git a/lib/promscrape/discovery/dockerswarm/services_test.go b/lib/promscrape/discovery/dockerswarm/services_test.go index 12d804365..153f8ab12 100644 --- a/lib/promscrape/discovery/dockerswarm/services_test.go +++ b/lib/promscrape/discovery/dockerswarm/services_test.go @@ -102,7 +102,7 @@ func TestParseServicesResponse(t *testing.T) { }, }, Mode: serviceSpecMode{ - Replicated: map[string]interface{}{}, + Replicated: map[string]any{}, }, }, Endpoint: serviceEndpoint{ @@ -147,7 +147,7 @@ func TestAddServicesLabels(t *testing.T) { }, }, Mode: serviceSpecMode{ - Replicated: map[string]interface{}{}, + Replicated: map[string]any{}, }, }, Endpoint: serviceEndpoint{ diff --git a/lib/promscrape/discovery/dockerswarm/tasks_test.go b/lib/promscrape/discovery/dockerswarm/tasks_test.go index 3641d6024..9f3bf6b34 100644 --- a/lib/promscrape/discovery/dockerswarm/tasks_test.go +++ b/lib/promscrape/discovery/dockerswarm/tasks_test.go @@ -228,7 +228,7 @@ func TestAddTasksLabels(t *testing.T) { }, }, Mode: serviceSpecMode{ - Replicated: map[string]interface{}{}, + Replicated: map[string]any{}, }, }, Endpoint: serviceEndpoint{ diff --git a/lib/promscrape/discovery/ec2/api.go b/lib/promscrape/discovery/ec2/api.go index def9d0a17..033f3c5a3 100644 --- a/lib/promscrape/discovery/ec2/api.go +++ b/lib/promscrape/discovery/ec2/api.go @@ -21,7 +21,7 @@ type apiConfig struct { var configMap = discoveryutils.NewConfigMap() func getAPIConfig(sdc *SDConfig) (*apiConfig, error) { - v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc) }) + v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc) }) if err != nil { return nil, err } diff --git a/lib/promscrape/discovery/eureka/api.go b/lib/promscrape/discovery/eureka/api.go index 5c6832c6c..cdbb8b68f 100644 --- a/lib/promscrape/discovery/eureka/api.go +++ b/lib/promscrape/discovery/eureka/api.go @@ -45,7 +45,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { } func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { - v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) }) + v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc, baseDir) }) if err != nil { return nil, err } diff --git a/lib/promscrape/discovery/gce/api.go b/lib/promscrape/discovery/gce/api.go index c2953ebfc..b462dbec0 100644 --- a/lib/promscrape/discovery/gce/api.go +++ b/lib/promscrape/discovery/gce/api.go @@ -25,7 +25,7 @@ type apiConfig struct { var configMap = discoveryutils.NewConfigMap() func getAPIConfig(sdc *SDConfig) (*apiConfig, error) { - v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc) }) + v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc) }) if err != nil { return nil, err } diff --git a/lib/promscrape/discovery/gce/gce.go b/lib/promscrape/discovery/gce/gce.go index 1e24e90fb..4a6f17887 100644 --- a/lib/promscrape/discovery/gce/gce.go +++ b/lib/promscrape/discovery/gce/gce.go @@ -32,8 +32,8 @@ type ZoneYAML struct { } // UnmarshalYAML implements yaml.Unmarshaler -func (z *ZoneYAML) UnmarshalYAML(unmarshal func(interface{}) error) error { - var v interface{} +func (z *ZoneYAML) UnmarshalYAML(unmarshal func(any) error) error { + var v any if err := unmarshal(&v); err != nil { return err } @@ -41,7 +41,7 @@ func (z *ZoneYAML) UnmarshalYAML(unmarshal func(interface{}) error) error { switch t := v.(type) { case string: zones = []string{t} - case []interface{}: + case []any: for _, vv := range t { zone, ok := vv.(string) if !ok { @@ -57,7 +57,7 @@ func (z *ZoneYAML) UnmarshalYAML(unmarshal func(interface{}) error) error { } // MarshalYAML implements yaml.Marshaler -func (z ZoneYAML) MarshalYAML() (interface{}, error) { +func (z ZoneYAML) MarshalYAML() (any, error) { return z.Zones, nil } diff --git a/lib/promscrape/discovery/hetzner/api.go b/lib/promscrape/discovery/hetzner/api.go index e34b29cfb..7023015a3 100644 --- a/lib/promscrape/discovery/hetzner/api.go +++ b/lib/promscrape/discovery/hetzner/api.go @@ -15,7 +15,7 @@ type apiConfig struct { } func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { - v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) }) + v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc, baseDir) }) if err != nil { return nil, err } diff --git a/lib/promscrape/discovery/http/api.go b/lib/promscrape/discovery/http/api.go index 5233e4801..50842c327 100644 --- a/lib/promscrape/discovery/http/api.go +++ b/lib/promscrape/discovery/http/api.go @@ -58,7 +58,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { } func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { - v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) }) + v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc, baseDir) }) if err != nil { return nil, err } diff --git a/lib/promscrape/discovery/kubernetes/api_watcher.go b/lib/promscrape/discovery/kubernetes/api_watcher.go index 1160f409d..67b858058 100644 --- a/lib/promscrape/discovery/kubernetes/api_watcher.go +++ b/lib/promscrape/discovery/kubernetes/api_watcher.go @@ -65,7 +65,7 @@ type apiWatcher struct { gw *groupWatcher // swosByURLWatcher contains per-urlWatcher maps of ScrapeWork objects for the given apiWatcher - swosByURLWatcher map[*urlWatcher]map[string][]interface{} + swosByURLWatcher map[*urlWatcher]map[string][]any swosByURLWatcherLock sync.Mutex swosCount *metrics.Counter @@ -94,7 +94,7 @@ func newAPIWatcher(apiServer string, ac *promauth.Config, sdc *SDConfig, swcFunc role: role, swcFunc: swcFunc, gw: gw, - swosByURLWatcher: make(map[*urlWatcher]map[string][]interface{}), + swosByURLWatcher: make(map[*urlWatcher]map[string][]any), swosCount: metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_scrape_works{role=%q}`, role)), } return aw, nil @@ -106,7 +106,7 @@ func (aw *apiWatcher) mustStart() { aw.gw.apiWatcherInflightStartCalls.Add(-1) } -func (aw *apiWatcher) updateSwosCount(multiplier int, swosByKey map[string][]interface{}) { +func (aw *apiWatcher) updateSwosCount(multiplier int, swosByKey map[string][]any) { n := 0 for _, swos := range swosByKey { n += len(swos) @@ -121,11 +121,11 @@ func (aw *apiWatcher) mustStop() { for _, swosByKey := range aw.swosByURLWatcher { aw.updateSwosCount(-1, swosByKey) } - aw.swosByURLWatcher = make(map[*urlWatcher]map[string][]interface{}) + aw.swosByURLWatcher = make(map[*urlWatcher]map[string][]any) aw.swosByURLWatcherLock.Unlock() } -func (aw *apiWatcher) replaceScrapeWorks(uw *urlWatcher, swosByKey map[string][]interface{}) { +func (aw *apiWatcher) replaceScrapeWorks(uw *urlWatcher, swosByKey map[string][]any) { aw.swosByURLWatcherLock.Lock() aw.updateSwosCount(-1, aw.swosByURLWatcher[uw]) aw.updateSwosCount(1, swosByKey) @@ -133,11 +133,11 @@ func (aw *apiWatcher) replaceScrapeWorks(uw *urlWatcher, swosByKey map[string][] aw.swosByURLWatcherLock.Unlock() } -func (aw *apiWatcher) updateScrapeWorks(uw *urlWatcher, swosByKey map[string][]interface{}) { +func (aw *apiWatcher) updateScrapeWorks(uw *urlWatcher, swosByKey map[string][]any) { aw.swosByURLWatcherLock.Lock() dst := aw.swosByURLWatcher[uw] if dst == nil { - dst = make(map[string][]interface{}) + dst = make(map[string][]any) aw.swosByURLWatcher[uw] = dst } for key, swos := range swosByKey { @@ -156,7 +156,7 @@ func (aw *apiWatcher) setScrapeWorks(uw *urlWatcher, key string, labelss []*prom aw.swosByURLWatcherLock.Lock() swosByKey := aw.swosByURLWatcher[uw] if swosByKey == nil { - swosByKey = make(map[string][]interface{}) + swosByKey = make(map[string][]any) aw.swosByURLWatcher[uw] = swosByKey } aw.swosCount.Add(len(swos) - len(swosByKey[key])) @@ -178,9 +178,9 @@ func (aw *apiWatcher) removeScrapeWorks(uw *urlWatcher, key string) { aw.swosByURLWatcherLock.Unlock() } -func getScrapeWorkObjectsForLabels(swcFunc ScrapeWorkConstructorFunc, labelss []*promutils.Labels) []interface{} { +func getScrapeWorkObjectsForLabels(swcFunc ScrapeWorkConstructorFunc, labelss []*promutils.Labels) []any { // Do not pre-allocate swos, since it is likely the swos will be empty because of relabeling - var swos []interface{} + var swos []any for _, labels := range labelss { swo := swcFunc(labels) // The reflect check is needed because of https://mangatmodi.medium.com/go-check-nil-interface-the-right-way-d142776edef1 @@ -192,10 +192,10 @@ func getScrapeWorkObjectsForLabels(swcFunc ScrapeWorkConstructorFunc, labelss [] } // getScrapeWorkObjects returns all the ScrapeWork objects for the given aw. -func (aw *apiWatcher) getScrapeWorkObjects() []interface{} { +func (aw *apiWatcher) getScrapeWorkObjects() []any { aw.gw.registerPendingAPIWatchers() - swos := make([]interface{}, 0, aw.swosCount.Get()) + swos := make([]any, 0, aw.swosCount.Get()) aw.swosByURLWatcherLock.Lock() for _, swosByKey := range aw.swosByURLWatcher { for _, swosLocal := range swosByKey { @@ -352,7 +352,7 @@ func groupWatchersCleaner() { type swosByKeyWithLock struct { mu sync.Mutex - swosByKey map[string][]interface{} + swosByKey map[string][]any } func (gw *groupWatcher) getScrapeWorkObjectsByAPIWatcherLocked(objectsByKey map[string]object, awsMap map[*apiWatcher]struct{}) map[*apiWatcher]*swosByKeyWithLock { @@ -362,7 +362,7 @@ func (gw *groupWatcher) getScrapeWorkObjectsByAPIWatcherLocked(objectsByKey map[ swosByAPIWatcher := make(map[*apiWatcher]*swosByKeyWithLock, len(awsMap)) for aw := range awsMap { swosByAPIWatcher[aw] = &swosByKeyWithLock{ - swosByKey: make(map[string][]interface{}), + swosByKey: make(map[string][]any), } } diff --git a/lib/promscrape/discovery/kubernetes/api_watcher_test.go b/lib/promscrape/discovery/kubernetes/api_watcher_test.go index e767c3a8f..f12715cbe 100644 --- a/lib/promscrape/discovery/kubernetes/api_watcher_test.go +++ b/lib/promscrape/discovery/kubernetes/api_watcher_test.go @@ -921,8 +921,8 @@ func TestGetScrapeWorkObjects(t *testing.T) { } testAPIServer := httptest.NewServer(mux) tc.sdc.APIServer = testAPIServer.URL - ac, err := newAPIConfig(tc.sdc, "", func(metaLabels *promutils.Labels) interface{} { - var res []interface{} + ac, err := newAPIConfig(tc.sdc, "", func(metaLabels *promutils.Labels) any { + var res []any for _, label := range metaLabels.Labels { res = append(res, label.Name) } diff --git a/lib/promscrape/discovery/kubernetes/kubernetes.go b/lib/promscrape/discovery/kubernetes/kubernetes.go index 0026838ec..108c831af 100644 --- a/lib/promscrape/discovery/kubernetes/kubernetes.go +++ b/lib/promscrape/discovery/kubernetes/kubernetes.go @@ -70,12 +70,12 @@ type Selector struct { } // ScrapeWorkConstructorFunc must construct ScrapeWork object for the given metaLabels. -type ScrapeWorkConstructorFunc func(metaLabels *promutils.Labels) interface{} +type ScrapeWorkConstructorFunc func(metaLabels *promutils.Labels) any // GetScrapeWorkObjects returns ScrapeWork objects for the given sdc. // // This function must be called after MustStart call. -func (sdc *SDConfig) GetScrapeWorkObjects() ([]interface{}, error) { +func (sdc *SDConfig) GetScrapeWorkObjects() ([]any, error) { if sdc.cfg == nil { return nil, sdc.startErr } diff --git a/lib/promscrape/discovery/kuma/api.go b/lib/promscrape/discovery/kuma/api.go index 3695dba24..73ca19318 100644 --- a/lib/promscrape/discovery/kuma/api.go +++ b/lib/promscrape/discovery/kuma/api.go @@ -42,7 +42,7 @@ type apiConfig struct { } func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { - v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) }) + v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc, baseDir) }) if err != nil { return nil, err } diff --git a/lib/promscrape/discovery/nomad/api.go b/lib/promscrape/discovery/nomad/api.go index a34eb1a07..a72700248 100644 --- a/lib/promscrape/discovery/nomad/api.go +++ b/lib/promscrape/discovery/nomad/api.go @@ -30,7 +30,7 @@ func (ac *apiConfig) mustStop() { var configMap = discoveryutils.NewConfigMap() func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { - v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) }) + v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc, baseDir) }) if err != nil { return nil, err } diff --git a/lib/promscrape/discovery/openstack/api.go b/lib/promscrape/discovery/openstack/api.go index 14b6bb6a4..7bc1d5c16 100644 --- a/lib/promscrape/discovery/openstack/api.go +++ b/lib/promscrape/discovery/openstack/api.go @@ -61,7 +61,7 @@ func (cfg *apiConfig) getFreshAPICredentials() (*apiCredentials, error) { } func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { - v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) }) + v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc, baseDir) }) if err != nil { return nil, err } diff --git a/lib/promscrape/discovery/openstack/auth.go b/lib/promscrape/discovery/openstack/auth.go index b26afbf91..09a6ea2bb 100644 --- a/lib/promscrape/discovery/openstack/auth.go +++ b/lib/promscrape/discovery/openstack/auth.go @@ -90,8 +90,8 @@ func buildAuthRequestBody(sdc *SDConfig) ([]byte, error) { ApplicationCredential *applicationCredentialReq `json:"application_credential,omitempty"` } type authReq struct { - Identity identityReq `json:"identity"` - Scope map[string]interface{} `json:"scope,omitempty"` + Identity identityReq `json:"identity"` + Scope map[string]any `json:"scope,omitempty"` } type request struct { Auth authReq `json:"auth"` @@ -233,7 +233,7 @@ func buildAuthRequestBody(sdc *SDConfig) ([]byte, error) { // buildScope adds scope information into auth request // // See https://docs.openstack.org/api-ref/identity/v3/#password-authentication-with-unscoped-authorization -func buildScope(sdc *SDConfig) (map[string]interface{}, error) { +func buildScope(sdc *SDConfig) (map[string]any, error) { if len(sdc.ProjectName) == 0 && len(sdc.ProjectID) == 0 && len(sdc.DomainID) == 0 && len(sdc.DomainName) == 0 { return nil, nil } @@ -244,24 +244,24 @@ func buildScope(sdc *SDConfig) (map[string]interface{}, error) { return nil, fmt.Errorf("domain_id or domain_name must present") } if len(sdc.DomainID) > 0 { - return map[string]interface{}{ - "project": map[string]interface{}{ + return map[string]any{ + "project": map[string]any{ "name": &sdc.ProjectName, - "domain": map[string]interface{}{"id": &sdc.DomainID}, + "domain": map[string]any{"id": &sdc.DomainID}, }, }, nil } if len(sdc.DomainName) > 0 { - return map[string]interface{}{ - "project": map[string]interface{}{ + return map[string]any{ + "project": map[string]any{ "name": &sdc.ProjectName, - "domain": map[string]interface{}{"name": &sdc.DomainName}, + "domain": map[string]any{"name": &sdc.DomainName}, }, }, nil } } else if len(sdc.ProjectID) > 0 { - return map[string]interface{}{ - "project": map[string]interface{}{ + return map[string]any{ + "project": map[string]any{ "id": &sdc.ProjectID, }, }, nil @@ -269,14 +269,14 @@ func buildScope(sdc *SDConfig) (map[string]interface{}, error) { if len(sdc.DomainName) > 0 { return nil, fmt.Errorf("both domain_id and domain_name present") } - return map[string]interface{}{ - "domain": map[string]interface{}{ + return map[string]any{ + "domain": map[string]any{ "id": &sdc.DomainID, }, }, nil } else if len(sdc.DomainName) > 0 { - return map[string]interface{}{ - "domain": map[string]interface{}{ + return map[string]any{ + "domain": map[string]any{ "name": &sdc.DomainName, }, }, nil diff --git a/lib/promscrape/discovery/vultr/api.go b/lib/promscrape/discovery/vultr/api.go index fd32e54ce..0a9af09e4 100644 --- a/lib/promscrape/discovery/vultr/api.go +++ b/lib/promscrape/discovery/vultr/api.go @@ -17,7 +17,7 @@ type apiConfig struct { // getAPIConfig get or create API config from configMap. func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { - v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) }) + v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc, baseDir) }) if err != nil { return nil, err } diff --git a/lib/promscrape/discovery/yandexcloud/api.go b/lib/promscrape/discovery/yandexcloud/api.go index dbe91400b..3d5d81341 100644 --- a/lib/promscrape/discovery/yandexcloud/api.go +++ b/lib/promscrape/discovery/yandexcloud/api.go @@ -44,7 +44,7 @@ type apiConfig struct { } func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { - v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) }) + v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc, baseDir) }) if err != nil { return nil, err } diff --git a/lib/promscrape/discoveryutils/config_map.go b/lib/promscrape/discoveryutils/config_map.go index d4c772f34..ca9dc98df 100644 --- a/lib/promscrape/discoveryutils/config_map.go +++ b/lib/promscrape/discoveryutils/config_map.go @@ -11,7 +11,7 @@ import ( // It automatically removes old configs which weren't accessed recently. type ConfigMap struct { mu sync.Mutex - m map[interface{}]interface{} + m map[any]any entriesCount *metrics.Counter } @@ -19,7 +19,7 @@ type ConfigMap struct { // NewConfigMap creates ConfigMap func NewConfigMap() *ConfigMap { return &ConfigMap{ - m: make(map[interface{}]interface{}), + m: make(map[any]any), entriesCount: metrics.GetOrCreateCounter(`vm_promscrape_discoveryutils_configmap_entries_count`), } } @@ -29,7 +29,7 @@ func NewConfigMap() *ConfigMap { // Key must be a pointer. // // It creates new config map with newConfig() call if cm doesn't contain config under the given key. -func (cm *ConfigMap) Get(key interface{}, newConfig func() (interface{}, error)) (interface{}, error) { +func (cm *ConfigMap) Get(key any, newConfig func() (any, error)) (any, error) { cm.mu.Lock() defer cm.mu.Unlock() @@ -47,7 +47,7 @@ func (cm *ConfigMap) Get(key interface{}, newConfig func() (interface{}, error)) } // Delete deletes config for the given key from cm and returns it. -func (cm *ConfigMap) Delete(key interface{}) interface{} { +func (cm *ConfigMap) Delete(key any) any { cm.mu.Lock() defer cm.mu.Unlock() diff --git a/lib/promscrape/targetstatus.go b/lib/promscrape/targetstatus.go index 364acff4c..859fb68d8 100644 --- a/lib/promscrape/targetstatus.go +++ b/lib/promscrape/targetstatus.go @@ -414,7 +414,7 @@ func labelsHash(labels *promutils.Labels) uint64 { } var xxhashPool = &sync.Pool{ - New: func() interface{} { + New: func() any { return xxhash.New() }, } diff --git a/lib/promutils/duration.go b/lib/promutils/duration.go index 2fc57c52e..0811e798e 100644 --- a/lib/promutils/duration.go +++ b/lib/promutils/duration.go @@ -19,12 +19,12 @@ func NewDuration(d time.Duration) *Duration { } // MarshalYAML implements yaml.Marshaler interface. -func (pd Duration) MarshalYAML() (interface{}, error) { +func (pd Duration) MarshalYAML() (any, error) { return pd.D.String(), nil } // UnmarshalYAML implements yaml.Unmarshaler interface. -func (pd *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (pd *Duration) UnmarshalYAML(unmarshal func(any) error) error { var s string if err := unmarshal(&s); err != nil { return err diff --git a/lib/promutils/duration_test.go b/lib/promutils/duration_test.go index 2c09b68d8..d47d187de 100644 --- a/lib/promutils/duration_test.go +++ b/lib/promutils/duration_test.go @@ -28,7 +28,7 @@ func TestDuration(t *testing.T) { if s := v.(string); s != sExpected { t.Fatalf("unexpected value from MarshalYAML(); got %q; want %q", s, sExpected) } - if err := d.UnmarshalYAML(func(v interface{}) error { + if err := d.UnmarshalYAML(func(v any) error { sp := v.(*string) s := "1w3d5h" *sp = s diff --git a/lib/promutils/labels.go b/lib/promutils/labels.go index 567157097..53da300f3 100644 --- a/lib/promutils/labels.go +++ b/lib/promutils/labels.go @@ -34,13 +34,13 @@ func NewLabelsFromMap(m map[string]string) *Labels { } // MarshalYAML implements yaml.Marshaler interface. -func (x *Labels) MarshalYAML() (interface{}, error) { +func (x *Labels) MarshalYAML() (any, error) { m := x.ToMap() return m, nil } // UnmarshalYAML implements yaml.Unmarshaler interface. -func (x *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (x *Labels) UnmarshalYAML(unmarshal func(any) error) error { var m map[string]string if err := unmarshal(&m); err != nil { return err diff --git a/lib/protoparser/native/stream/streamparser.go b/lib/protoparser/native/stream/streamparser.go index 6accdf28c..6e36fff70 100644 --- a/lib/protoparser/native/stream/streamparser.go +++ b/lib/protoparser/native/stream/streamparser.go @@ -197,7 +197,7 @@ func (uw *unmarshalWork) unmarshal() error { } var blockPool = &sync.Pool{ - New: func() interface{} { + New: func() any { return &storage.Block{} }, } diff --git a/lib/proxy/proxy.go b/lib/proxy/proxy.go index 9bf9103c7..8e723aff9 100644 --- a/lib/proxy/proxy.go +++ b/lib/proxy/proxy.go @@ -98,7 +98,7 @@ func (u *URL) getAuthHeader(ac *promauth.Config) (string, error) { } // MarshalYAML implements yaml.Marshaler interface. -func (u *URL) MarshalYAML() (interface{}, error) { +func (u *URL) MarshalYAML() (any, error) { if u.URL == nil { return nil, nil } @@ -106,7 +106,7 @@ func (u *URL) MarshalYAML() (interface{}, error) { } // UnmarshalYAML implements yaml.Unmarshaler interface. -func (u *URL) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (u *URL) UnmarshalYAML(unmarshal func(any) error) error { var s string if err := unmarshal(&s); err != nil { return err diff --git a/lib/querytracer/tracer.go b/lib/querytracer/tracer.go index 3b4595c2e..95645e9e4 100644 --- a/lib/querytracer/tracer.go +++ b/lib/querytracer/tracer.go @@ -49,7 +49,7 @@ type Tracer struct { // If enabled isn't set, then all function calls to the returned object will be no-op. // // Done or Donef must be called when the tracer should be finished. -func New(enabled bool, format string, args ...interface{}) *Tracer { +func New(enabled bool, format string, args ...any) *Tracer { if *denyQueryTracing || !enabled { return nil } @@ -73,7 +73,7 @@ func (t *Tracer) Enabled() bool { // NewChild cannot be called from concurrent goroutines. // Create children tracers from a single goroutine and then pass them // to concurrent goroutines. -func (t *Tracer) NewChild(format string, args ...interface{}) *Tracer { +func (t *Tracer) NewChild(format string, args ...any) *Tracer { if t == nil { return nil } @@ -107,7 +107,7 @@ func (t *Tracer) Done() { // // Donef cannot be called multiple times. // Other Tracer functions cannot be called after Donef call. -func (t *Tracer) Donef(format string, args ...interface{}) { +func (t *Tracer) Donef(format string, args ...any) { if t == nil { return } @@ -122,7 +122,7 @@ func (t *Tracer) Donef(format string, args ...interface{}) { // Printf adds new fmt.Sprintf(format, args...) message to t. // // Printf cannot be called from concurrent goroutines. -func (t *Tracer) Printf(format string, args ...interface{}) { +func (t *Tracer) Printf(format string, args ...any) { if t == nil { return } diff --git a/lib/storage/block_stream_merger.go b/lib/storage/block_stream_merger.go index 7faba9611..036ed3d13 100644 --- a/lib/storage/block_stream_merger.go +++ b/lib/storage/block_stream_merger.go @@ -143,11 +143,11 @@ func (bsrh *blockStreamReaderHeap) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (bsrh *blockStreamReaderHeap) Push(x interface{}) { +func (bsrh *blockStreamReaderHeap) Push(x any) { *bsrh = append(*bsrh, x.(*blockStreamReader)) } -func (bsrh *blockStreamReaderHeap) Pop() interface{} { +func (bsrh *blockStreamReaderHeap) Pop() any { a := *bsrh v := a[len(a)-1] *bsrh = a[:len(a)-1] diff --git a/lib/storage/index_db.go b/lib/storage/index_db.go index 6e8dd1949..b4078fa7f 100644 --- a/lib/storage/index_db.go +++ b/lib/storage/index_db.go @@ -1611,11 +1611,11 @@ func (th *topHeap) Swap(i, j int) { a[j], a[i] = a[i], a[j] } -func (th *topHeap) Push(_ interface{}) { +func (th *topHeap) Push(_ any) { panic(fmt.Errorf("BUG: Push shouldn't be called")) } -func (th *topHeap) Pop() interface{} { +func (th *topHeap) Pop() any { panic(fmt.Errorf("BUG: Pop shouldn't be called")) } diff --git a/lib/storage/merge.go b/lib/storage/merge.go index 4eb0e10cd..e5c526af5 100644 --- a/lib/storage/merge.go +++ b/lib/storage/merge.go @@ -31,7 +31,7 @@ func mergeBlockStreams(ph *partHeader, bsw *blockStreamWriter, bsrs []*blockStre } var bsmPool = &sync.Pool{ - New: func() interface{} { + New: func() any { return &blockStreamMerger{} }, } diff --git a/lib/storage/partition_search.go b/lib/storage/partition_search.go index cc1bacd82..658fb0cab 100644 --- a/lib/storage/partition_search.go +++ b/lib/storage/partition_search.go @@ -190,11 +190,11 @@ func (psh *partSearchHeap) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (psh *partSearchHeap) Push(x interface{}) { +func (psh *partSearchHeap) Push(x any) { *psh = append(*psh, x.(*partSearch)) } -func (psh *partSearchHeap) Pop() interface{} { +func (psh *partSearchHeap) Pop() any { a := *psh v := a[len(a)-1] *psh = a[:len(a)-1] diff --git a/lib/storage/table_search.go b/lib/storage/table_search.go index 1809a8ae1..eb9631111 100644 --- a/lib/storage/table_search.go +++ b/lib/storage/table_search.go @@ -194,11 +194,11 @@ func (ptsh *partitionSearchHeap) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (ptsh *partitionSearchHeap) Push(x interface{}) { +func (ptsh *partitionSearchHeap) Push(x any) { *ptsh = append(*ptsh, x.(*partitionSearch)) } -func (ptsh *partitionSearchHeap) Pop() interface{} { +func (ptsh *partitionSearchHeap) Pop() any { a := *ptsh v := a[len(a)-1] *ptsh = a[:len(a)-1] diff --git a/lib/streamaggr/avg.go b/lib/streamaggr/avg.go index 2d0546058..522c2fcb5 100644 --- a/lib/streamaggr/avg.go +++ b/lib/streamaggr/avg.go @@ -64,7 +64,7 @@ func (as *avgAggrState) pushSamples(samples []pushSample) { func (as *avgAggrState) flushState(ctx *flushCtx, resetState bool) { currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000 m := &as.m - m.Range(func(k, v interface{}) bool { + m.Range(func(k, v any) bool { if resetState { // Atomically delete the entry from the map, so new entry is created for the next flush. m.Delete(k) diff --git a/lib/streamaggr/count_samples.go b/lib/streamaggr/count_samples.go index f48ae44dd..6a05955cc 100644 --- a/lib/streamaggr/count_samples.go +++ b/lib/streamaggr/count_samples.go @@ -61,7 +61,7 @@ func (as *countSamplesAggrState) pushSamples(samples []pushSample) { func (as *countSamplesAggrState) flushState(ctx *flushCtx, resetState bool) { currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000 m := &as.m - m.Range(func(k, v interface{}) bool { + m.Range(func(k, v any) bool { if resetState { // Atomically delete the entry from the map, so new entry is created for the next flush. m.Delete(k) diff --git a/lib/streamaggr/count_series.go b/lib/streamaggr/count_series.go index edf4a95a4..f1037c801 100644 --- a/lib/streamaggr/count_series.go +++ b/lib/streamaggr/count_series.go @@ -70,7 +70,7 @@ func (as *countSeriesAggrState) pushSamples(samples []pushSample) { func (as *countSeriesAggrState) flushState(ctx *flushCtx, resetState bool) { currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000 m := &as.m - m.Range(func(k, v interface{}) bool { + m.Range(func(k, v any) bool { if resetState { // Atomically delete the entry from the map, so new entry is created for the next flush. m.Delete(k) diff --git a/lib/streamaggr/histogram_bucket.go b/lib/streamaggr/histogram_bucket.go index afb7ac8b7..c5fe06630 100644 --- a/lib/streamaggr/histogram_bucket.go +++ b/lib/streamaggr/histogram_bucket.go @@ -69,7 +69,7 @@ func (as *histogramBucketAggrState) pushSamples(samples []pushSample) { func (as *histogramBucketAggrState) removeOldEntries(ctx *flushCtx, currentTime uint64) { m := &as.m var staleOutputSamples int - m.Range(func(k, v interface{}) bool { + m.Range(func(k, v any) bool { sv := v.(*histogramBucketStateValue) sv.mu.Lock() @@ -96,7 +96,7 @@ func (as *histogramBucketAggrState) flushState(ctx *flushCtx, _ bool) { as.removeOldEntries(ctx, currentTime) m := &as.m - m.Range(func(k, v interface{}) bool { + m.Range(func(k, v any) bool { sv := v.(*histogramBucketStateValue) sv.mu.Lock() if !sv.deleted { diff --git a/lib/streamaggr/last.go b/lib/streamaggr/last.go index 9bf4e08e0..eaa803e83 100644 --- a/lib/streamaggr/last.go +++ b/lib/streamaggr/last.go @@ -66,7 +66,7 @@ func (as *lastAggrState) pushSamples(samples []pushSample) { func (as *lastAggrState) flushState(ctx *flushCtx, resetState bool) { currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000 m := &as.m - m.Range(func(k, v interface{}) bool { + m.Range(func(k, v any) bool { if resetState { // Atomically delete the entry from the map, so new entry is created for the next flush. m.Delete(k) diff --git a/lib/streamaggr/max.go b/lib/streamaggr/max.go index a106d3300..bbbb3c83d 100644 --- a/lib/streamaggr/max.go +++ b/lib/streamaggr/max.go @@ -63,7 +63,7 @@ func (as *maxAggrState) pushSamples(samples []pushSample) { func (as *maxAggrState) flushState(ctx *flushCtx, resetState bool) { currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000 m := &as.m - m.Range(func(k, v interface{}) bool { + m.Range(func(k, v any) bool { if resetState { // Atomically delete the entry from the map, so new entry is created for the next flush. m.Delete(k) diff --git a/lib/streamaggr/min.go b/lib/streamaggr/min.go index b03105c92..8970d41a7 100644 --- a/lib/streamaggr/min.go +++ b/lib/streamaggr/min.go @@ -63,7 +63,7 @@ func (as *minAggrState) pushSamples(samples []pushSample) { func (as *minAggrState) flushState(ctx *flushCtx, resetState bool) { currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000 m := &as.m - m.Range(func(k, v interface{}) bool { + m.Range(func(k, v any) bool { if resetState { // Atomically delete the entry from the map, so new entry is created for the next flush. m.Delete(k) diff --git a/lib/streamaggr/quantiles.go b/lib/streamaggr/quantiles.go index edaa128f5..6b37b22d4 100644 --- a/lib/streamaggr/quantiles.go +++ b/lib/streamaggr/quantiles.go @@ -70,7 +70,7 @@ func (as *quantilesAggrState) flushState(ctx *flushCtx, resetState bool) { phis := as.phis var quantiles []float64 var b []byte - m.Range(func(k, v interface{}) bool { + m.Range(func(k, v any) bool { if resetState { // Atomically delete the entry from the map, so new entry is created for the next flush. m.Delete(k) diff --git a/lib/streamaggr/rate.go b/lib/streamaggr/rate.go index 07fd2783b..6fe401d37 100644 --- a/lib/streamaggr/rate.go +++ b/lib/streamaggr/rate.go @@ -111,7 +111,7 @@ func (as *rateAggrState) flushState(ctx *flushCtx, _ bool) { var staleOutputSamples, staleInputSamples int m := &as.m - m.Range(func(k, v interface{}) bool { + m.Range(func(k, v any) bool { sv := v.(*rateStateValue) sv.mu.Lock() diff --git a/lib/streamaggr/stddev.go b/lib/streamaggr/stddev.go index ef10177a0..26ea2db8c 100644 --- a/lib/streamaggr/stddev.go +++ b/lib/streamaggr/stddev.go @@ -64,7 +64,7 @@ func (as *stddevAggrState) pushSamples(samples []pushSample) { func (as *stddevAggrState) flushState(ctx *flushCtx, resetState bool) { currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000 m := &as.m - m.Range(func(k, v interface{}) bool { + m.Range(func(k, v any) bool { if resetState { // Atomically delete the entry from the map, so new entry is created for the next flush. m.Delete(k) diff --git a/lib/streamaggr/stdvar.go b/lib/streamaggr/stdvar.go index dd4c21efe..35a15097e 100644 --- a/lib/streamaggr/stdvar.go +++ b/lib/streamaggr/stdvar.go @@ -63,7 +63,7 @@ func (as *stdvarAggrState) pushSamples(samples []pushSample) { func (as *stdvarAggrState) flushState(ctx *flushCtx, resetState bool) { currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000 m := &as.m - m.Range(func(k, v interface{}) bool { + m.Range(func(k, v any) bool { if resetState { // Atomically delete the entry from the map, so new entry is created for the next flush. m.Delete(k) diff --git a/lib/streamaggr/sum_samples.go b/lib/streamaggr/sum_samples.go index eb621ce5c..685e782c5 100644 --- a/lib/streamaggr/sum_samples.go +++ b/lib/streamaggr/sum_samples.go @@ -61,7 +61,7 @@ func (as *sumSamplesAggrState) pushSamples(samples []pushSample) { func (as *sumSamplesAggrState) flushState(ctx *flushCtx, resetState bool) { currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000 m := &as.m - m.Range(func(k, v interface{}) bool { + m.Range(func(k, v any) bool { if resetState { // Atomically delete the entry from the map, so new entry is created for the next flush. m.Delete(k) diff --git a/lib/streamaggr/total.go b/lib/streamaggr/total.go index 6598d7c15..e0c26a0fe 100644 --- a/lib/streamaggr/total.go +++ b/lib/streamaggr/total.go @@ -127,7 +127,7 @@ func (as *totalAggrState) pushSamples(samples []pushSample) { func (as *totalAggrState) removeOldEntries(ctx *flushCtx, currentTime uint64) { m := &as.m var staleInputSamples, staleOutputSamples int - m.Range(func(k, v interface{}) bool { + m.Range(func(k, v any) bool { sv := v.(*totalStateValue) sv.mu.Lock() @@ -164,7 +164,7 @@ func (as *totalAggrState) flushState(ctx *flushCtx, resetState bool) { as.removeOldEntries(ctx, currentTime) m := &as.m - m.Range(func(k, v interface{}) bool { + m.Range(func(k, v any) bool { sv := v.(*totalStateValue) sv.mu.Lock() total := sv.total diff --git a/lib/streamaggr/unique_samples.go b/lib/streamaggr/unique_samples.go index 310617cc7..9cb63c636 100644 --- a/lib/streamaggr/unique_samples.go +++ b/lib/streamaggr/unique_samples.go @@ -65,7 +65,7 @@ func (as *uniqueSamplesAggrState) pushSamples(samples []pushSample) { func (as *uniqueSamplesAggrState) flushState(ctx *flushCtx, resetState bool) { currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000 m := &as.m - m.Range(func(k, v interface{}) bool { + m.Range(func(k, v any) bool { if resetState { // Atomically delete the entry from the map, so new entry is created for the next flush. m.Delete(k) diff --git a/lib/uint64set/uint64set.go b/lib/uint64set/uint64set.go index 44204b509..edd8bf042 100644 --- a/lib/uint64set/uint64set.go +++ b/lib/uint64set/uint64set.go @@ -535,7 +535,7 @@ func (b *bucket32) forEach(f func(part []uint64) bool) bool { } var partBufPool = &sync.Pool{ - New: func() interface{} { + New: func() any { buf := make([]uint64, 0, bitsPerBucket) return &buf }, @@ -948,7 +948,7 @@ func (b *bucket16) appendTo(dst []uint64, hi uint32, hi16 uint16) []uint64 { } var smallPoolSorterPool = &sync.Pool{ - New: func() interface{} { + New: func() any { return &smallPoolSorter{} }, }