all: consistently use 'any' instead of 'interface{}'

'any' type is supported starting from Go1.18. Let's consistently use it
instead of 'interface{}' type across the code base, since `any` is easier to read than 'interface{}'.
This commit is contained in:
Aliaksandr Valialkin 2024-07-10 00:14:15 +02:00
parent 73ca22bb7d
commit d6415b2572
No known key found for this signature in database
GPG key ID: 52C003EE2BCDB9EB
96 changed files with 302 additions and 309 deletions

View file

@ -181,7 +181,7 @@ func (rctx *relabelCtx) reset() {
} }
var relabelCtxPool = &sync.Pool{ var relabelCtxPool = &sync.Pool{
New: func() interface{} { New: func() any {
return &relabelCtx{} return &relabelCtx{}
}, },
} }

View file

@ -1000,7 +1000,7 @@ func (rwctx *remoteWriteCtx) tryPushInternal(tss []prompbmarshal.TimeSeries) boo
} }
var tssPool = &sync.Pool{ var tssPool = &sync.Pool{
New: func() interface{} { New: func() any {
a := []prompbmarshal.TimeSeries{} a := []prompbmarshal.TimeSeries{}
return &a return &a
}, },

View file

@ -45,11 +45,11 @@ type Group struct {
// EvalAlignment will make the timestamp of group query requests be aligned with interval // EvalAlignment will make the timestamp of group query requests be aligned with interval
EvalAlignment *bool `yaml:"eval_alignment,omitempty"` EvalAlignment *bool `yaml:"eval_alignment,omitempty"`
// Catches all undefined fields and must be empty after parsing. // Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"` XXX map[string]any `yaml:",inline"`
} }
// UnmarshalYAML implements the yaml.Unmarshaler interface. // UnmarshalYAML implements the yaml.Unmarshaler interface.
func (g *Group) UnmarshalYAML(unmarshal func(interface{}) error) error { func (g *Group) UnmarshalYAML(unmarshal func(any) error) error {
type group Group type group Group
if err := unmarshal((*group)(g)); err != nil { if err := unmarshal((*group)(g)); err != nil {
return err return err
@ -142,11 +142,11 @@ type Rule struct {
UpdateEntriesLimit *int `yaml:"update_entries_limit,omitempty"` UpdateEntriesLimit *int `yaml:"update_entries_limit,omitempty"`
// Catches all undefined fields and must be empty after parsing. // Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"` XXX map[string]any `yaml:",inline"`
} }
// UnmarshalYAML implements the yaml.Unmarshaler interface. // UnmarshalYAML implements the yaml.Unmarshaler interface.
func (r *Rule) UnmarshalYAML(unmarshal func(interface{}) error) error { func (r *Rule) UnmarshalYAML(unmarshal func(any) error) error {
type rule Rule type rule Rule
if err := unmarshal((*rule)(r)); err != nil { if err := unmarshal((*rule)(r)); err != nil {
return err return err
@ -301,7 +301,7 @@ func parseConfig(data []byte) ([]Group, error) {
g := struct { g := struct {
Groups []Group `yaml:"groups"` Groups []Group `yaml:"groups"`
// Catches all undefined fields and must be empty after parsing. // Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"` XXX map[string]any `yaml:",inline"`
}{} }{}
err = yaml.Unmarshal(data, &g) err = yaml.Unmarshal(data, &g)
if err != nil { if err != nil {
@ -310,7 +310,7 @@ func parseConfig(data []byte) ([]Group, error) {
return g.Groups, checkOverflow(g.XXX, "config") return g.Groups, checkOverflow(g.XXX, "config")
} }
func checkOverflow(m map[string]interface{}, ctx string) error { func checkOverflow(m map[string]any, ctx string) error {
if len(m) > 0 { if len(m) > 0 {
var keys []string var keys []string
for k := range m { for k := range m {

View file

@ -29,7 +29,7 @@ func (l *Logger) isDisabled() bool {
} }
// Errorf logs error message. // Errorf logs error message.
func (l *Logger) Errorf(format string, args ...interface{}) { func (l *Logger) Errorf(format string, args ...any) {
if l.isDisabled() { if l.isDisabled() {
return return
} }
@ -37,7 +37,7 @@ func (l *Logger) Errorf(format string, args ...interface{}) {
} }
// Warnf logs warning message. // Warnf logs warning message.
func (l *Logger) Warnf(format string, args ...interface{}) { func (l *Logger) Warnf(format string, args ...any) {
if l.isDisabled() { if l.isDisabled() {
return return
} }
@ -45,7 +45,7 @@ func (l *Logger) Warnf(format string, args ...interface{}) {
} }
// Infof logs info message. // Infof logs info message.
func (l *Logger) Infof(format string, args ...interface{}) { func (l *Logger) Infof(format string, args ...any) {
if l.isDisabled() { if l.isDisabled() {
return return
} }
@ -54,6 +54,6 @@ func (l *Logger) Infof(format string, args ...interface{}) {
// Panicf logs panic message and panics. // Panicf logs panic message and panics.
// Panicf can't be suppressed // Panicf can't be suppressed
func (l *Logger) Panicf(format string, args ...interface{}) { func (l *Logger) Panicf(format string, args ...any) {
logger.Panicf(format, args...) logger.Panicf(format, args...)
} }

View file

@ -69,7 +69,7 @@ func (t *Type) ValidateExpr(expr string) error {
} }
// UnmarshalYAML implements the yaml.Unmarshaler interface. // UnmarshalYAML implements the yaml.Unmarshaler interface.
func (t *Type) UnmarshalYAML(unmarshal func(interface{}) error) error { func (t *Type) UnmarshalYAML(unmarshal func(any) error) error {
var s string var s string
if err := unmarshal(&s); err != nil { if err := unmarshal(&s); err != nil {
return err return err
@ -87,7 +87,7 @@ func (t *Type) UnmarshalYAML(unmarshal func(interface{}) error) error {
} }
// MarshalYAML implements the yaml.Unmarshaler interface. // MarshalYAML implements the yaml.Unmarshaler interface.
func (t Type) MarshalYAML() (interface{}, error) { func (t Type) MarshalYAML() (any, error) {
return t.Name, nil return t.Name, nil
} }
@ -98,7 +98,7 @@ type Header struct {
} }
// UnmarshalYAML implements the yaml.Unmarshaler interface. // UnmarshalYAML implements the yaml.Unmarshaler interface.
func (h *Header) UnmarshalYAML(unmarshal func(interface{}) error) error { func (h *Header) UnmarshalYAML(unmarshal func(any) error) error {
var s string var s string
if err := unmarshal(&s); err != nil { if err := unmarshal(&s); err != nil {
return err return err

View file

@ -119,7 +119,7 @@ func (pi *promInstant) Unmarshal(b []byte) error {
type promRange struct { type promRange struct {
Result []struct { Result []struct {
Labels map[string]string `json:"metric"` Labels map[string]string `json:"metric"`
TVs [][2]interface{} `json:"values"` TVs [][2]any `json:"values"`
} `json:"result"` } `json:"result"`
} }
@ -147,7 +147,7 @@ func (r promRange) metrics() ([]Metric, error) {
return result, nil return result, nil
} }
type promScalar [2]interface{} type promScalar [2]any
func (r promScalar) metrics() ([]Metric, error) { func (r promScalar) metrics() ([]Metric, error) {
var m Metric var m Metric

View file

@ -51,7 +51,7 @@ type Config struct {
Checksum string Checksum string
// Catches all undefined fields and must be empty after parsing. // Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"` XXX map[string]any `yaml:",inline"`
// This is set to the directory from where the config has been loaded. // This is set to the directory from where the config has been loaded.
baseDir string baseDir string
@ -73,7 +73,7 @@ type StaticConfig struct {
} }
// UnmarshalYAML implements the yaml.Unmarshaler interface. // UnmarshalYAML implements the yaml.Unmarshaler interface.
func (cfg *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { func (cfg *Config) UnmarshalYAML(unmarshal func(any) error) error {
type config Config type config Config
if err := unmarshal((*config)(cfg)); err != nil { if err := unmarshal((*config)(cfg)); err != nil {
return err return err

View file

@ -183,7 +183,7 @@ func (ar *AlertingRule) GetAlert(id uint64) *notifier.Alert {
return ar.alerts[id] return ar.alerts[id]
} }
func (ar *AlertingRule) logDebugf(at time.Time, a *notifier.Alert, format string, args ...interface{}) { func (ar *AlertingRule) logDebugf(at time.Time, a *notifier.Alert, format string, args ...any) {
if !ar.Debug { if !ar.Debug {
return return
} }

View file

@ -475,7 +475,7 @@ func delayBeforeStart(ts time.Time, key uint64, interval time.Duration, offset *
return randSleep return randSleep
} }
func (g *Group) infof(format string, args ...interface{}) { func (g *Group) infof(format string, args ...any) {
msg := fmt.Sprintf(format, args...) msg := fmt.Sprintf(format, args...)
logger.Infof("group %q %s; interval=%v; eval_offset=%v; concurrency=%d", logger.Infof("group %q %s; interval=%v; eval_offset=%v; concurrency=%d",
g.Name, msg, g.Interval, g.EvalOffset, g.Concurrency) g.Name, msg, g.Interval, g.EvalOffset, g.Concurrency)

View file

@ -316,7 +316,7 @@ func templateFuncs() textTpl.FuncMap {
// humanize converts given number to a human readable format // humanize converts given number to a human readable format
// by adding metric prefixes https://en.wikipedia.org/wiki/Metric_prefix // by adding metric prefixes https://en.wikipedia.org/wiki/Metric_prefix
"humanize": func(i interface{}) (string, error) { "humanize": func(i any) (string, error) {
v, err := toFloat64(i) v, err := toFloat64(i)
if err != nil { if err != nil {
return "", err return "", err
@ -347,7 +347,7 @@ func templateFuncs() textTpl.FuncMap {
}, },
// humanize1024 converts given number to a human readable format with 1024 as base // humanize1024 converts given number to a human readable format with 1024 as base
"humanize1024": func(i interface{}) (string, error) { "humanize1024": func(i any) (string, error) {
v, err := toFloat64(i) v, err := toFloat64(i)
if err != nil { if err != nil {
return "", err return "", err
@ -359,7 +359,7 @@ func templateFuncs() textTpl.FuncMap {
}, },
// humanizeDuration converts given seconds to a human-readable duration // humanizeDuration converts given seconds to a human-readable duration
"humanizeDuration": func(i interface{}) (string, error) { "humanizeDuration": func(i any) (string, error) {
v, err := toFloat64(i) v, err := toFloat64(i)
if err != nil { if err != nil {
return "", err return "", err
@ -405,7 +405,7 @@ func templateFuncs() textTpl.FuncMap {
}, },
// humanizePercentage converts given ratio value to a fraction of 100 // humanizePercentage converts given ratio value to a fraction of 100
"humanizePercentage": func(i interface{}) (string, error) { "humanizePercentage": func(i any) (string, error) {
v, err := toFloat64(i) v, err := toFloat64(i)
if err != nil { if err != nil {
return "", err return "", err
@ -414,7 +414,7 @@ func templateFuncs() textTpl.FuncMap {
}, },
// humanizeTimestamp converts given timestamp to a human readable time equivalent // humanizeTimestamp converts given timestamp to a human readable time equivalent
"humanizeTimestamp": func(i interface{}) (string, error) { "humanizeTimestamp": func(i any) (string, error) {
v, err := toFloat64(i) v, err := toFloat64(i)
if err != nil { if err != nil {
return "", err return "", err
@ -427,7 +427,7 @@ func templateFuncs() textTpl.FuncMap {
}, },
// toTime converts given timestamp to a time.Time. // toTime converts given timestamp to a time.Time.
"toTime": func(i interface{}) (time.Time, error) { "toTime": func(i any) (time.Time, error) {
v, err := toFloat64(i) v, err := toFloat64(i)
if err != nil { if err != nil {
return time.Time{}, err return time.Time{}, err
@ -524,8 +524,8 @@ func templateFuncs() textTpl.FuncMap {
// Converts a list of objects to a map with keys arg0, arg1 etc. // Converts a list of objects to a map with keys arg0, arg1 etc.
// This is intended to allow multiple arguments to be passed to templates. // This is intended to allow multiple arguments to be passed to templates.
"args": func(args ...interface{}) map[string]interface{} { "args": func(args ...any) map[string]any {
result := make(map[string]interface{}) result := make(map[string]any)
for i, a := range args { for i, a := range args {
result[fmt.Sprintf("arg%d", i)] = a result[fmt.Sprintf("arg%d", i)] = a
} }
@ -565,7 +565,7 @@ func (t Time) Time() time.Time {
return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)
} }
func toFloat64(v interface{}) (float64, error) { func toFloat64(v any) (float64, error) {
switch i := v.(type) { switch i := v.(type) {
case float64: case float64:
return i, nil return i, nil

View file

@ -52,10 +52,10 @@ func TestTemplateFuncs(t *testing.T) {
t.Fatalf("unexpected mismatch") t.Fatalf("unexpected mismatch")
} }
formatting := func(funcName string, p interface{}, resultExpected string) { formatting := func(funcName string, p any, resultExpected string) {
t.Helper() t.Helper()
v := funcs[funcName] v := funcs[funcName]
fLocal := v.(func(s interface{}) (string, error)) fLocal := v.(func(s any) (string, error))
result, err := fLocal(p) result, err := fLocal(p)
if err != nil { if err != nil {
t.Fatalf("unexpected error for %s(%f): %s", funcName, p, err) t.Fatalf("unexpected error for %s(%f): %s", funcName, p, err)
@ -92,7 +92,7 @@ func TestTemplateFuncs(t *testing.T) {
formatting("humanizeTimestamp", 1679055557, "2023-03-17 12:19:17 +0000 UTC") formatting("humanizeTimestamp", 1679055557, "2023-03-17 12:19:17 +0000 UTC")
} }
func mkTemplate(current, replacement interface{}) textTemplate { func mkTemplate(current, replacement any) textTemplate {
tmpl := textTemplate{} tmpl := textTemplate{}
if current != nil { if current != nil {
switch val := current.(type) { switch val := current.(type) {

View file

@ -36,7 +36,7 @@ func TestHandler(t *testing.T) {
}} }}
rh := &requestHandler{m: m} rh := &requestHandler{m: m}
getResp := func(t *testing.T, url string, to interface{}, code int) { getResp := func(t *testing.T, url string, to any, code int) {
t.Helper() t.Helper()
resp, err := http.Get(url) resp, err := http.Get(url)
if err != nil { if err != nil {
@ -241,7 +241,7 @@ func TestEmptyResponse(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { rhWithNoGroups.handler(w, r) })) ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { rhWithNoGroups.handler(w, r) }))
defer ts.Close() defer ts.Close()
getResp := func(t *testing.T, url string, to interface{}, code int) { getResp := func(t *testing.T, url string, to any, code int) {
t.Helper() t.Helper()
resp, err := http.Get(url) resp, err := http.Get(url)
if err != nil { if err != nil {

View file

@ -183,7 +183,7 @@ func (ar apiRule) WebLink() string {
paramGroupID, ar.GroupID, paramRuleID, ar.ID) paramGroupID, ar.GroupID, paramRuleID, ar.ID)
} }
func ruleToAPI(r interface{}) apiRule { func ruleToAPI(r any) apiRule {
if ar, ok := r.(*rule.AlertingRule); ok { if ar, ok := r.(*rule.AlertingRule); ok {
return alertingToAPI(ar) return alertingToAPI(ar)
} }

View file

@ -129,7 +129,7 @@ type Header struct {
} }
// UnmarshalYAML unmarshals h from f. // UnmarshalYAML unmarshals h from f.
func (h *Header) UnmarshalYAML(f func(interface{}) error) error { func (h *Header) UnmarshalYAML(f func(any) error) error {
var s string var s string
if err := f(&s); err != nil { if err := f(&s); err != nil {
return err return err
@ -146,7 +146,7 @@ func (h *Header) UnmarshalYAML(f func(interface{}) error) error {
} }
// MarshalYAML marshals h to yaml. // MarshalYAML marshals h to yaml.
func (h *Header) MarshalYAML() (interface{}, error) { func (h *Header) MarshalYAML() (any, error) {
return h.sOriginal, nil return h.sOriginal, nil
} }
@ -201,7 +201,7 @@ type QueryArg struct {
} }
// UnmarshalYAML unmarshals qa from yaml. // UnmarshalYAML unmarshals qa from yaml.
func (qa *QueryArg) UnmarshalYAML(f func(interface{}) error) error { func (qa *QueryArg) UnmarshalYAML(f func(any) error) error {
var s string var s string
if err := f(&s); err != nil { if err := f(&s); err != nil {
return err return err
@ -230,7 +230,7 @@ func (qa *QueryArg) UnmarshalYAML(f func(interface{}) error) error {
} }
// MarshalYAML marshals qa to yaml. // MarshalYAML marshals qa to yaml.
func (qa *QueryArg) MarshalYAML() (interface{}, error) { func (qa *QueryArg) MarshalYAML() (any, error) {
return qa.sOriginal, nil return qa.sOriginal, nil
} }
@ -263,7 +263,7 @@ type URLPrefix struct {
nextDiscoveryDeadline atomic.Uint64 nextDiscoveryDeadline atomic.Uint64
// vOriginal contains the original yaml value for URLPrefix. // vOriginal contains the original yaml value for URLPrefix.
vOriginal interface{} vOriginal any
} }
func (up *URLPrefix) setLoadBalancingPolicy(loadBalancingPolicy string) error { func (up *URLPrefix) setLoadBalancingPolicy(loadBalancingPolicy string) error {
@ -497,8 +497,8 @@ func getLeastLoadedBackendURL(bus []*backendURL, atomicCounter *atomic.Uint32) *
} }
// UnmarshalYAML unmarshals up from yaml. // UnmarshalYAML unmarshals up from yaml.
func (up *URLPrefix) UnmarshalYAML(f func(interface{}) error) error { func (up *URLPrefix) UnmarshalYAML(f func(any) error) error {
var v interface{} var v any
if err := f(&v); err != nil { if err := f(&v); err != nil {
return err return err
} }
@ -508,7 +508,7 @@ func (up *URLPrefix) UnmarshalYAML(f func(interface{}) error) error {
switch x := v.(type) { switch x := v.(type) {
case string: case string:
urls = []string{x} urls = []string{x}
case []interface{}: case []any:
if len(x) == 0 { if len(x) == 0 {
return fmt.Errorf("`url_prefix` must contain at least a single url") return fmt.Errorf("`url_prefix` must contain at least a single url")
} }
@ -538,7 +538,7 @@ func (up *URLPrefix) UnmarshalYAML(f func(interface{}) error) error {
} }
// MarshalYAML marshals up to yaml. // MarshalYAML marshals up to yaml.
func (up *URLPrefix) MarshalYAML() (interface{}, error) { func (up *URLPrefix) MarshalYAML() (any, error) {
return up.vOriginal, nil return up.vOriginal, nil
} }
@ -562,7 +562,7 @@ func (r *Regex) match(s string) bool {
} }
// UnmarshalYAML implements yaml.Unmarshaler // UnmarshalYAML implements yaml.Unmarshaler
func (r *Regex) UnmarshalYAML(f func(interface{}) error) error { func (r *Regex) UnmarshalYAML(f func(any) error) error {
var s string var s string
if err := f(&s); err != nil { if err := f(&s); err != nil {
return err return err
@ -579,7 +579,7 @@ func (r *Regex) UnmarshalYAML(f func(interface{}) error) error {
} }
// MarshalYAML implements yaml.Marshaler. // MarshalYAML implements yaml.Marshaler.
func (r *Regex) MarshalYAML() (interface{}, error) { func (r *Regex) MarshalYAML() (any, error) {
return r.sOriginal, nil return r.sOriginal, nil
} }

View file

@ -12,7 +12,7 @@ import (
type queryValues struct { type queryValues struct {
name string name string
values map[string][]interface{} values map[string][]any
} }
func parseResult(r influx.Result) ([]queryValues, error) { func parseResult(r influx.Result) ([]queryValues, error) {
@ -21,7 +21,7 @@ func parseResult(r influx.Result) ([]queryValues, error) {
} }
qValues := make([]queryValues, len(r.Series)) qValues := make([]queryValues, len(r.Series))
for i, row := range r.Series { for i, row := range r.Series {
values := make(map[string][]interface{}, len(row.Values)) values := make(map[string][]any, len(row.Values))
for _, value := range row.Values { for _, value := range row.Values {
for idx, v := range value { for idx, v := range value {
key := row.Columns[idx] key := row.Columns[idx]
@ -36,7 +36,7 @@ func parseResult(r influx.Result) ([]queryValues, error) {
return qValues, nil return qValues, nil
} }
func toFloat64(v interface{}) (float64, error) { func toFloat64(v any) (float64, error) {
switch i := v.(type) { switch i := v.(type) {
case json.Number: case json.Number:
return i.Float64() return i.Float64()

View file

@ -61,7 +61,7 @@ func TestSeries_Unmarshal(t *testing.T) {
} }
func TestToFloat64(t *testing.T) { func TestToFloat64(t *testing.T) {
f := func(in interface{}, want float64) { f := func(in any, want float64) {
t.Helper() t.Helper()
got, err := toFloat64(in) got, err := toFloat64(in)
if err != nil { if err != nil {

View file

@ -75,12 +75,6 @@ type TimeRange struct {
type MetaResults struct { type MetaResults struct {
Type string `json:"type"` Type string `json:"type"`
Results []Meta `json:"results"` Results []Meta `json:"results"`
//metric string
//tags interface{}
//limit int
//time int
//startIndex int
//totalResults int
} }
// Meta A meta object about a metric // Meta A meta object about a metric
@ -88,7 +82,6 @@ type MetaResults struct {
type Meta struct { type Meta struct {
Metric string `json:"metric"` Metric string `json:"metric"`
Tags map[string]string `json:"tags"` Tags map[string]string `json:"tags"`
//tsuid string
} }
// OtsdbMetric is a single series in OpenTSDB's returned format // OtsdbMetric is a single series in OpenTSDB's returned format

View file

@ -45,7 +45,7 @@ type cWriter struct {
err error err error
} }
func (cw *cWriter) printf(format string, args ...interface{}) { func (cw *cWriter) printf(format string, args ...any) {
if cw.err != nil { if cw.err != nil {
return return
} }

View file

@ -176,10 +176,10 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
} }
initMessage := "Initing import process from %q to %q with filter %s" initMessage := "Initing import process from %q to %q with filter %s"
initParams := []interface{}{srcURL, dstURL, p.filter.String()} initParams := []any{srcURL, dstURL, p.filter.String()}
if p.interCluster { if p.interCluster {
initMessage = "Initing import process from %q to %q with filter %s for tenant %s" initMessage = "Initing import process from %q to %q with filter %s for tenant %s"
initParams = []interface{}{srcURL, dstURL, p.filter.String(), tenantID} initParams = []any{srcURL, dstURL, p.filter.String(), tenantID}
} }
fmt.Println("") // extra line for better output formatting fmt.Println("") // extra line for better output formatting

View file

@ -253,7 +253,7 @@ func putHistogram(h *histogram.Fast) {
} }
var histogramPool = &sync.Pool{ var histogramPool = &sync.Pool{
New: func() interface{} { New: func() any {
return histogram.NewFast() return histogram.NewFast()
}, },
} }

View file

@ -16,7 +16,7 @@ import (
func FunctionsHandler(w http.ResponseWriter, r *http.Request) error { func FunctionsHandler(w http.ResponseWriter, r *http.Request) error {
grouped := httputils.GetBool(r, "grouped") grouped := httputils.GetBool(r, "grouped")
group := r.FormValue("group") group := r.FormValue("group")
result := make(map[string]interface{}) result := make(map[string]any)
for funcName, fi := range funcs { for funcName, fi := range funcs {
if group != "" && fi.Group != group { if group != "" && fi.Group != group {
continue continue
@ -47,7 +47,7 @@ func FunctionDetailsHandler(funcName string, w http.ResponseWriter, r *http.Requ
return writeJSON(result, w, r) return writeJSON(result, w, r)
} }
func writeJSON(result interface{}, w http.ResponseWriter, r *http.Request) error { func writeJSON(result any, w http.ResponseWriter, r *http.Request) error {
data, err := json.Marshal(result) data, err := json.Marshal(result)
if err != nil { if err != nil {
return fmt.Errorf("cannot marshal response to JSON: %w", err) return fmt.Errorf("cannot marshal response to JSON: %w", err)

View file

@ -1968,10 +1968,10 @@ func (h *minSeriesHeap) Swap(i, j int) {
a := *h a := *h
a[i], a[j] = a[j], a[i] a[i], a[j] = a[j], a[i]
} }
func (h *minSeriesHeap) Push(x interface{}) { func (h *minSeriesHeap) Push(x any) {
*h = append(*h, x.(*seriesWithWeight)) *h = append(*h, x.(*seriesWithWeight))
} }
func (h *minSeriesHeap) Pop() interface{} { func (h *minSeriesHeap) Pop() any {
a := *h a := *h
x := a[len(a)-1] x := a[len(a)-1]
*h = a[:len(a)-1] *h = a[:len(a)-1]
@ -2499,10 +2499,10 @@ func (h *maxSeriesHeap) Swap(i, j int) {
a := *h a := *h
a[i], a[j] = a[j], a[i] a[i], a[j] = a[j], a[i]
} }
func (h *maxSeriesHeap) Push(x interface{}) { func (h *maxSeriesHeap) Push(x any) {
*h = append(*h, x.(*seriesWithWeight)) *h = append(*h, x.(*seriesWithWeight))
} }
func (h *maxSeriesHeap) Pop() interface{} { func (h *maxSeriesHeap) Pop() any {
a := *h a := *h
x := a[len(a)-1] x := a[len(a)-1]
*h = a[:len(a)-1] *h = a[:len(a)-1]

View file

@ -760,11 +760,11 @@ func (sbh *sortBlocksHeap) Swap(i, j int) {
sbs[i], sbs[j] = sbs[j], sbs[i] sbs[i], sbs[j] = sbs[j], sbs[i]
} }
func (sbh *sortBlocksHeap) Push(x interface{}) { func (sbh *sortBlocksHeap) Push(x any) {
sbh.sbs = append(sbh.sbs, x.(*sortBlock)) sbh.sbs = append(sbh.sbs, x.(*sortBlock))
} }
func (sbh *sortBlocksHeap) Pop() interface{} { func (sbh *sortBlocksHeap) Pop() any {
sbs := sbh.sbs sbs := sbh.sbs
v := sbs[len(sbs)-1] v := sbs[len(sbs)-1]
sbs[len(sbs)-1] = nil sbs[len(sbs)-1] = nil
@ -810,7 +810,7 @@ func RegisterMetricNames(qt *querytracer.Tracer, mrs []storage.MetricRow, deadli
} }
// Push mrs to storage nodes in parallel. // Push mrs to storage nodes in parallel.
snr := startStorageNodesRequest(qt, sns, true, func(qt *querytracer.Tracer, workerID uint, sn *storageNode) interface{} { snr := startStorageNodesRequest(qt, sns, true, func(qt *querytracer.Tracer, workerID uint, sn *storageNode) any {
sn.registerMetricNamesRequests.Inc() sn.registerMetricNamesRequests.Inc()
err := sn.registerMetricNames(qt, mrsPerNode[workerID], deadline) err := sn.registerMetricNames(qt, mrsPerNode[workerID], deadline)
if err != nil { if err != nil {
@ -820,7 +820,7 @@ func RegisterMetricNames(qt *querytracer.Tracer, mrs []storage.MetricRow, deadli
}) })
// Collect results // Collect results
err := snr.collectAllResults(func(result interface{}) error { err := snr.collectAllResults(func(result any) error {
errP := result.(*error) errP := result.(*error)
return *errP return *errP
}) })
@ -842,7 +842,7 @@ func DeleteSeries(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline sear
err error err error
} }
sns := getStorageNodes() sns := getStorageNodes()
snr := startStorageNodesRequest(qt, sns, true, func(qt *querytracer.Tracer, _ uint, sn *storageNode) interface{} { snr := startStorageNodesRequest(qt, sns, true, func(qt *querytracer.Tracer, _ uint, sn *storageNode) any {
sn.deleteSeriesRequests.Inc() sn.deleteSeriesRequests.Inc()
deletedCount, err := sn.deleteSeries(qt, requestData, deadline) deletedCount, err := sn.deleteSeries(qt, requestData, deadline)
if err != nil { if err != nil {
@ -856,7 +856,7 @@ func DeleteSeries(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline sear
// Collect results // Collect results
deletedTotal := 0 deletedTotal := 0
err := snr.collectAllResults(func(result interface{}) error { err := snr.collectAllResults(func(result any) error {
nr := result.(*nodeResult) nr := result.(*nodeResult)
if nr.err != nil { if nr.err != nil {
return nr.err return nr.err
@ -884,7 +884,7 @@ func LabelNames(qt *querytracer.Tracer, denyPartialResponse bool, sq *storage.Se
err error err error
} }
sns := getStorageNodes() sns := getStorageNodes()
snr := startStorageNodesRequest(qt, sns, denyPartialResponse, func(qt *querytracer.Tracer, _ uint, sn *storageNode) interface{} { snr := startStorageNodesRequest(qt, sns, denyPartialResponse, func(qt *querytracer.Tracer, _ uint, sn *storageNode) any {
sn.labelNamesRequests.Inc() sn.labelNamesRequests.Inc()
labelNames, err := sn.getLabelNames(qt, requestData, maxLabelNames, deadline) labelNames, err := sn.getLabelNames(qt, requestData, maxLabelNames, deadline)
if err != nil { if err != nil {
@ -899,7 +899,7 @@ func LabelNames(qt *querytracer.Tracer, denyPartialResponse bool, sq *storage.Se
// Collect results // Collect results
var labelNames []string var labelNames []string
isPartial, err := snr.collectResults(partialLabelNamesResults, func(result interface{}) error { isPartial, err := snr.collectResults(partialLabelNamesResults, func(result any) error {
nr := result.(*nodeResult) nr := result.(*nodeResult)
if nr.err != nil { if nr.err != nil {
return nr.err return nr.err
@ -987,7 +987,7 @@ func LabelValues(qt *querytracer.Tracer, denyPartialResponse bool, labelName str
err error err error
} }
sns := getStorageNodes() sns := getStorageNodes()
snr := startStorageNodesRequest(qt, sns, denyPartialResponse, func(qt *querytracer.Tracer, _ uint, sn *storageNode) interface{} { snr := startStorageNodesRequest(qt, sns, denyPartialResponse, func(qt *querytracer.Tracer, _ uint, sn *storageNode) any {
sn.labelValuesRequests.Inc() sn.labelValuesRequests.Inc()
labelValues, err := sn.getLabelValues(qt, labelName, requestData, maxLabelValues, deadline) labelValues, err := sn.getLabelValues(qt, labelName, requestData, maxLabelValues, deadline)
if err != nil { if err != nil {
@ -1002,7 +1002,7 @@ func LabelValues(qt *querytracer.Tracer, denyPartialResponse bool, labelName str
// Collect results // Collect results
var labelValues []string var labelValues []string
isPartial, err := snr.collectResults(partialLabelValuesResults, func(result interface{}) error { isPartial, err := snr.collectResults(partialLabelValuesResults, func(result any) error {
nr := result.(*nodeResult) nr := result.(*nodeResult)
if nr.err != nil { if nr.err != nil {
return nr.err return nr.err
@ -1042,7 +1042,7 @@ func Tenants(qt *querytracer.Tracer, tr storage.TimeRange, deadline searchutils.
} }
sns := getStorageNodes() sns := getStorageNodes()
// Deny partial responses when obtaining the list of tenants, since partial tenants have little sense. // Deny partial responses when obtaining the list of tenants, since partial tenants have little sense.
snr := startStorageNodesRequest(qt, sns, true, func(qt *querytracer.Tracer, _ uint, sn *storageNode) interface{} { snr := startStorageNodesRequest(qt, sns, true, func(qt *querytracer.Tracer, _ uint, sn *storageNode) any {
sn.tenantsRequests.Inc() sn.tenantsRequests.Inc()
tenants, err := sn.getTenants(qt, tr, deadline) tenants, err := sn.getTenants(qt, tr, deadline)
if err != nil { if err != nil {
@ -1057,7 +1057,7 @@ func Tenants(qt *querytracer.Tracer, tr storage.TimeRange, deadline searchutils.
// Collect results // Collect results
var tenants []string var tenants []string
_, err := snr.collectResults(partialLabelValuesResults, func(result interface{}) error { _, err := snr.collectResults(partialLabelValuesResults, func(result any) error {
nr := result.(*nodeResult) nr := result.(*nodeResult)
if nr.err != nil { if nr.err != nil {
return nr.err return nr.err
@ -1122,7 +1122,7 @@ func TagValueSuffixes(qt *querytracer.Tracer, accountID, projectID uint32, denyP
err error err error
} }
sns := getStorageNodes() sns := getStorageNodes()
snr := startStorageNodesRequest(qt, sns, denyPartialResponse, func(qt *querytracer.Tracer, _ uint, sn *storageNode) interface{} { snr := startStorageNodesRequest(qt, sns, denyPartialResponse, func(qt *querytracer.Tracer, _ uint, sn *storageNode) any {
sn.tagValueSuffixesRequests.Inc() sn.tagValueSuffixesRequests.Inc()
suffixes, err := sn.getTagValueSuffixes(qt, accountID, projectID, tr, tagKey, tagValuePrefix, delimiter, maxSuffixes, deadline) suffixes, err := sn.getTagValueSuffixes(qt, accountID, projectID, tr, tagKey, tagValuePrefix, delimiter, maxSuffixes, deadline)
if err != nil { if err != nil {
@ -1138,7 +1138,7 @@ func TagValueSuffixes(qt *querytracer.Tracer, accountID, projectID uint32, denyP
// Collect results // Collect results
m := make(map[string]struct{}) m := make(map[string]struct{})
isPartial, err := snr.collectResults(partialTagValueSuffixesResults, func(result interface{}) error { isPartial, err := snr.collectResults(partialTagValueSuffixesResults, func(result any) error {
nr := result.(*nodeResult) nr := result.(*nodeResult)
if nr.err != nil { if nr.err != nil {
return nr.err return nr.err
@ -1187,7 +1187,7 @@ func TSDBStatus(qt *querytracer.Tracer, denyPartialResponse bool, sq *storage.Se
err error err error
} }
sns := getStorageNodes() sns := getStorageNodes()
snr := startStorageNodesRequest(qt, sns, denyPartialResponse, func(qt *querytracer.Tracer, _ uint, sn *storageNode) interface{} { snr := startStorageNodesRequest(qt, sns, denyPartialResponse, func(qt *querytracer.Tracer, _ uint, sn *storageNode) any {
sn.tsdbStatusRequests.Inc() sn.tsdbStatusRequests.Inc()
status, err := sn.getTSDBStatus(qt, requestData, focusLabel, topN, deadline) status, err := sn.getTSDBStatus(qt, requestData, focusLabel, topN, deadline)
if err != nil { if err != nil {
@ -1202,7 +1202,7 @@ func TSDBStatus(qt *querytracer.Tracer, denyPartialResponse bool, sq *storage.Se
// Collect results. // Collect results.
var statuses []*storage.TSDBStatus var statuses []*storage.TSDBStatus
isPartial, err := snr.collectResults(partialTSDBStatusResults, func(result interface{}) error { isPartial, err := snr.collectResults(partialTSDBStatusResults, func(result any) error {
nr := result.(*nodeResult) nr := result.(*nodeResult)
if nr.err != nil { if nr.err != nil {
return nr.err return nr.err
@ -1293,7 +1293,7 @@ func SeriesCount(qt *querytracer.Tracer, accountID, projectID uint32, denyPartia
err error err error
} }
sns := getStorageNodes() sns := getStorageNodes()
snr := startStorageNodesRequest(qt, sns, denyPartialResponse, func(qt *querytracer.Tracer, _ uint, sn *storageNode) interface{} { snr := startStorageNodesRequest(qt, sns, denyPartialResponse, func(qt *querytracer.Tracer, _ uint, sn *storageNode) any {
sn.seriesCountRequests.Inc() sn.seriesCountRequests.Inc()
n, err := sn.getSeriesCount(qt, accountID, projectID, deadline) n, err := sn.getSeriesCount(qt, accountID, projectID, deadline)
if err != nil { if err != nil {
@ -1308,7 +1308,7 @@ func SeriesCount(qt *querytracer.Tracer, accountID, projectID uint32, denyPartia
// Collect results // Collect results
var n uint64 var n uint64
isPartial, err := snr.collectResults(partialSeriesCountResults, func(result interface{}) error { isPartial, err := snr.collectResults(partialSeriesCountResults, func(result any) error {
nr := result.(*nodeResult) nr := result.(*nodeResult)
if nr.err != nil { if nr.err != nil {
return nr.err return nr.err
@ -1548,7 +1548,7 @@ func (tbfw *tmpBlocksFileWrapper) getTmpBlockFiles() []*tmpBlocksFile {
} }
var metricNamePool = &sync.Pool{ var metricNamePool = &sync.Pool{
New: func() interface{} { New: func() any {
return &storage.MetricName{} return &storage.MetricName{}
}, },
} }
@ -1611,7 +1611,7 @@ func SearchMetricNames(qt *querytracer.Tracer, denyPartialResponse bool, sq *sto
err error err error
} }
sns := getStorageNodes() sns := getStorageNodes()
snr := startStorageNodesRequest(qt, sns, denyPartialResponse, func(qt *querytracer.Tracer, _ uint, sn *storageNode) interface{} { snr := startStorageNodesRequest(qt, sns, denyPartialResponse, func(qt *querytracer.Tracer, _ uint, sn *storageNode) any {
sn.searchMetricNamesRequests.Inc() sn.searchMetricNamesRequests.Inc()
metricNames, err := sn.processSearchMetricNames(qt, requestData, deadline) metricNames, err := sn.processSearchMetricNames(qt, requestData, deadline)
if err != nil { if err != nil {
@ -1626,7 +1626,7 @@ func SearchMetricNames(qt *querytracer.Tracer, denyPartialResponse bool, sq *sto
// Collect results. // Collect results.
metricNamesMap := make(map[string]struct{}) metricNamesMap := make(map[string]struct{})
isPartial, err := snr.collectResults(partialSearchMetricNamesResults, func(result interface{}) error { isPartial, err := snr.collectResults(partialSearchMetricNamesResults, func(result any) error {
nr := result.(*nodeResult) nr := result.(*nodeResult)
if nr.err != nil { if nr.err != nil {
return nr.err return nr.err
@ -1772,7 +1772,7 @@ func processBlocks(qt *querytracer.Tracer, sns []*storageNode, denyPartialRespon
} }
// Send the query to all the storage nodes in parallel. // Send the query to all the storage nodes in parallel.
snr := startStorageNodesRequest(qt, sns, denyPartialResponse, func(qt *querytracer.Tracer, workerID uint, sn *storageNode) interface{} { snr := startStorageNodesRequest(qt, sns, denyPartialResponse, func(qt *querytracer.Tracer, workerID uint, sn *storageNode) any {
sn.searchRequests.Inc() sn.searchRequests.Inc()
err := sn.processSearchQuery(qt, requestData, f, workerID, deadline) err := sn.processSearchQuery(qt, requestData, f, workerID, deadline)
if err != nil { if err != nil {
@ -1783,7 +1783,7 @@ func processBlocks(qt *querytracer.Tracer, sns []*storageNode, denyPartialRespon
}) })
// Collect results. // Collect results.
isPartial, err := snr.collectResults(partialSearchResults, func(result interface{}) error { isPartial, err := snr.collectResults(partialSearchResults, func(result any) error {
errP := result.(*error) errP := result.(*error)
return *errP return *errP
}) })
@ -1811,13 +1811,13 @@ type storageNodesRequest struct {
} }
type rpcResult struct { type rpcResult struct {
data interface{} data any
qt *querytracer.Tracer qt *querytracer.Tracer
group *storageNodesGroup group *storageNodesGroup
} }
func startStorageNodesRequest(qt *querytracer.Tracer, sns []*storageNode, denyPartialResponse bool, func startStorageNodesRequest(qt *querytracer.Tracer, sns []*storageNode, denyPartialResponse bool,
f func(qt *querytracer.Tracer, workerID uint, sn *storageNode) interface{}) *storageNodesRequest { f func(qt *querytracer.Tracer, workerID uint, sn *storageNode) any) *storageNodesRequest {
resultsCh := make(chan rpcResult, len(sns)) resultsCh := make(chan rpcResult, len(sns))
qts := make(map[*querytracer.Tracer]struct{}, len(sns)) qts := make(map[*querytracer.Tracer]struct{}, len(sns))
for idx, sn := range sns { for idx, sn := range sns {
@ -1855,7 +1855,7 @@ func (snr *storageNodesRequest) finishQueryTracer(qt *querytracer.Tracer, msg st
delete(snr.qts, qt) delete(snr.qts, qt)
} }
func (snr *storageNodesRequest) collectAllResults(f func(result interface{}) error) error { func (snr *storageNodesRequest) collectAllResults(f func(result any) error) error {
sns := snr.sns sns := snr.sns
for i := 0; i < len(sns); i++ { for i := 0; i < len(sns); i++ {
result := <-snr.resultsCh result := <-snr.resultsCh
@ -1871,7 +1871,7 @@ func (snr *storageNodesRequest) collectAllResults(f func(result interface{}) err
return nil return nil
} }
func (snr *storageNodesRequest) collectResults(partialResultsCounter *metrics.Counter, f func(result interface{}) error) (bool, error) { func (snr *storageNodesRequest) collectResults(partialResultsCounter *metrics.Counter, f func(result any) error) (bool, error) {
sns := snr.sns sns := snr.sns
if len(sns) == 0 { if len(sns) == 0 {
return false, nil return false, nil

View file

@ -475,7 +475,7 @@ func (xb *exportBlock) reset() {
} }
var exportBlockPool = &sync.Pool{ var exportBlockPool = &sync.Pool{
New: func() interface{} { New: func() any {
return &exportBlock{} return &exportBlock{}
}, },
} }
@ -1341,7 +1341,7 @@ func (sw *scalableWriter) maybeFlushBuffer(bb *bytesutil.ByteBuffer) error {
} }
func (sw *scalableWriter) flush() error { func (sw *scalableWriter) flush() error {
sw.m.Range(func(_, v interface{}) bool { sw.m.Range(func(_, v any) bool {
bb := v.(*bytesutil.ByteBuffer) bb := v.(*bytesutil.ByteBuffer)
_, err := sw.bw.Write(bb.B) _, err := sw.bw.Write(bb.B)
return err == nil return err == nil

View file

@ -757,13 +757,13 @@ func evalExprsInParallel(qt *querytracer.Tracer, ec *EvalConfig, es []metricsql.
return rvs, nil return rvs, nil
} }
func evalRollupFuncArgs(qt *querytracer.Tracer, ec *EvalConfig, fe *metricsql.FuncExpr) ([]interface{}, *metricsql.RollupExpr, error) { func evalRollupFuncArgs(qt *querytracer.Tracer, ec *EvalConfig, fe *metricsql.FuncExpr) ([]any, *metricsql.RollupExpr, error) {
var re *metricsql.RollupExpr var re *metricsql.RollupExpr
rollupArgIdx := metricsql.GetRollupArgIdx(fe) rollupArgIdx := metricsql.GetRollupArgIdx(fe)
if len(fe.Args) <= rollupArgIdx { if len(fe.Args) <= rollupArgIdx {
return nil, nil, fmt.Errorf("expecting at least %d args to %q; got %d args; expr: %q", rollupArgIdx+1, fe.Name, len(fe.Args), fe.AppendString(nil)) return nil, nil, fmt.Errorf("expecting at least %d args to %q; got %d args; expr: %q", rollupArgIdx+1, fe.Name, len(fe.Args), fe.AppendString(nil))
} }
args := make([]interface{}, len(fe.Args)) args := make([]any, len(fe.Args))
for i, arg := range fe.Args { for i, arg := range fe.Args {
if i == rollupArgIdx { if i == rollupArgIdx {
re = getRollupExprArg(arg) re = getRollupExprArg(arg)

View file

@ -956,10 +956,10 @@ func derivValues(values []float64, timestamps []int64) {
values[len(values)-1] = prevDeriv values[len(values)-1] = prevDeriv
} }
type newRollupFunc func(args []interface{}) (rollupFunc, error) type newRollupFunc func(args []any) (rollupFunc, error)
func newRollupFuncOneArg(rf rollupFunc) newRollupFunc { func newRollupFuncOneArg(rf rollupFunc) newRollupFunc {
return func(args []interface{}) (rollupFunc, error) { return func(args []any) (rollupFunc, error) {
if err := expectRollupArgsNum(args, 1); err != nil { if err := expectRollupArgsNum(args, 1); err != nil {
return nil, err return nil, err
} }
@ -968,7 +968,7 @@ func newRollupFuncOneArg(rf rollupFunc) newRollupFunc {
} }
func newRollupFuncTwoArgs(rf rollupFunc) newRollupFunc { func newRollupFuncTwoArgs(rf rollupFunc) newRollupFunc {
return func(args []interface{}) (rollupFunc, error) { return func(args []any) (rollupFunc, error) {
if err := expectRollupArgsNum(args, 2); err != nil { if err := expectRollupArgsNum(args, 2); err != nil {
return nil, err return nil, err
} }
@ -977,7 +977,7 @@ func newRollupFuncTwoArgs(rf rollupFunc) newRollupFunc {
} }
func newRollupFuncOneOrTwoArgs(rf rollupFunc) newRollupFunc { func newRollupFuncOneOrTwoArgs(rf rollupFunc) newRollupFunc {
return func(args []interface{}) (rollupFunc, error) { return func(args []any) (rollupFunc, error) {
if len(args) < 1 || len(args) > 2 { if len(args) < 1 || len(args) > 2 {
return nil, fmt.Errorf("unexpected number of args; got %d; want 1...2", len(args)) return nil, fmt.Errorf("unexpected number of args; got %d; want 1...2", len(args))
} }
@ -985,7 +985,7 @@ func newRollupFuncOneOrTwoArgs(rf rollupFunc) newRollupFunc {
} }
} }
func newRollupHoltWinters(args []interface{}) (rollupFunc, error) { func newRollupHoltWinters(args []any) (rollupFunc, error) {
if err := expectRollupArgsNum(args, 3); err != nil { if err := expectRollupArgsNum(args, 3); err != nil {
return nil, err return nil, err
} }
@ -1035,7 +1035,7 @@ func newRollupHoltWinters(args []interface{}) (rollupFunc, error) {
return rf, nil return rf, nil
} }
func newRollupPredictLinear(args []interface{}) (rollupFunc, error) { func newRollupPredictLinear(args []any) (rollupFunc, error) {
if err := expectRollupArgsNum(args, 2); err != nil { if err := expectRollupArgsNum(args, 2); err != nil {
return nil, err return nil, err
} }
@ -1106,7 +1106,7 @@ func areConstValues(values []float64) bool {
return true return true
} }
func newRollupDurationOverTime(args []interface{}) (rollupFunc, error) { func newRollupDurationOverTime(args []any) (rollupFunc, error) {
if err := expectRollupArgsNum(args, 2); err != nil { if err := expectRollupArgsNum(args, 2); err != nil {
return nil, err return nil, err
} }
@ -1136,7 +1136,7 @@ func newRollupDurationOverTime(args []interface{}) (rollupFunc, error) {
return rf, nil return rf, nil
} }
func newRollupShareLE(args []interface{}) (rollupFunc, error) { func newRollupShareLE(args []any) (rollupFunc, error) {
return newRollupAvgFilter(args, countFilterLE) return newRollupAvgFilter(args, countFilterLE)
} }
@ -1150,7 +1150,7 @@ func countFilterLE(values []float64, le float64) float64 {
return float64(n) return float64(n)
} }
func newRollupShareGT(args []interface{}) (rollupFunc, error) { func newRollupShareGT(args []any) (rollupFunc, error) {
return newRollupAvgFilter(args, countFilterGT) return newRollupAvgFilter(args, countFilterGT)
} }
@ -1164,7 +1164,7 @@ func countFilterGT(values []float64, gt float64) float64 {
return float64(n) return float64(n)
} }
func newRollupShareEQ(args []interface{}) (rollupFunc, error) { func newRollupShareEQ(args []any) (rollupFunc, error) {
return newRollupAvgFilter(args, countFilterEQ) return newRollupAvgFilter(args, countFilterEQ)
} }
@ -1218,7 +1218,7 @@ func countFilterNE(values []float64, ne float64) float64 {
return float64(n) return float64(n)
} }
func newRollupAvgFilter(args []interface{}, f func(values []float64, limit float64) float64) (rollupFunc, error) { func newRollupAvgFilter(args []any, f func(values []float64, limit float64) float64) (rollupFunc, error) {
rf, err := newRollupFilter(args, f) rf, err := newRollupFilter(args, f)
if err != nil { if err != nil {
return nil, err return nil, err
@ -1229,35 +1229,35 @@ func newRollupAvgFilter(args []interface{}, f func(values []float64, limit float
}, nil }, nil
} }
func newRollupCountEQ(args []interface{}) (rollupFunc, error) { func newRollupCountEQ(args []any) (rollupFunc, error) {
return newRollupFilter(args, countFilterEQ) return newRollupFilter(args, countFilterEQ)
} }
func newRollupCountLE(args []interface{}) (rollupFunc, error) { func newRollupCountLE(args []any) (rollupFunc, error) {
return newRollupFilter(args, countFilterLE) return newRollupFilter(args, countFilterLE)
} }
func newRollupCountGT(args []interface{}) (rollupFunc, error) { func newRollupCountGT(args []any) (rollupFunc, error) {
return newRollupFilter(args, countFilterGT) return newRollupFilter(args, countFilterGT)
} }
func newRollupCountNE(args []interface{}) (rollupFunc, error) { func newRollupCountNE(args []any) (rollupFunc, error) {
return newRollupFilter(args, countFilterNE) return newRollupFilter(args, countFilterNE)
} }
func newRollupSumEQ(args []interface{}) (rollupFunc, error) { func newRollupSumEQ(args []any) (rollupFunc, error) {
return newRollupFilter(args, sumFilterEQ) return newRollupFilter(args, sumFilterEQ)
} }
func newRollupSumLE(args []interface{}) (rollupFunc, error) { func newRollupSumLE(args []any) (rollupFunc, error) {
return newRollupFilter(args, sumFilterLE) return newRollupFilter(args, sumFilterLE)
} }
func newRollupSumGT(args []interface{}) (rollupFunc, error) { func newRollupSumGT(args []any) (rollupFunc, error) {
return newRollupFilter(args, sumFilterGT) return newRollupFilter(args, sumFilterGT)
} }
func newRollupFilter(args []interface{}, f func(values []float64, limit float64) float64) (rollupFunc, error) { func newRollupFilter(args []any, f func(values []float64, limit float64) float64) (rollupFunc, error) {
if err := expectRollupArgsNum(args, 2); err != nil { if err := expectRollupArgsNum(args, 2); err != nil {
return nil, err return nil, err
} }
@ -1278,7 +1278,7 @@ func newRollupFilter(args []interface{}, f func(values []float64, limit float64)
return rf, nil return rf, nil
} }
func newRollupHoeffdingBoundLower(args []interface{}) (rollupFunc, error) { func newRollupHoeffdingBoundLower(args []any) (rollupFunc, error) {
if err := expectRollupArgsNum(args, 2); err != nil { if err := expectRollupArgsNum(args, 2); err != nil {
return nil, err return nil, err
} }
@ -1293,7 +1293,7 @@ func newRollupHoeffdingBoundLower(args []interface{}) (rollupFunc, error) {
return rf, nil return rf, nil
} }
func newRollupHoeffdingBoundUpper(args []interface{}) (rollupFunc, error) { func newRollupHoeffdingBoundUpper(args []any) (rollupFunc, error) {
if err := expectRollupArgsNum(args, 2); err != nil { if err := expectRollupArgsNum(args, 2); err != nil {
return nil, err return nil, err
} }
@ -1338,7 +1338,7 @@ func rollupHoeffdingBoundInternal(rfa *rollupFuncArg, phis []float64) (float64,
return bound, vAvg return bound, vAvg
} }
func newRollupQuantiles(args []interface{}) (rollupFunc, error) { func newRollupQuantiles(args []any) (rollupFunc, error) {
if len(args) < 3 { if len(args) < 3 {
return nil, fmt.Errorf("unexpected number of args: %d; want at least 3 args", len(args)) return nil, fmt.Errorf("unexpected number of args: %d; want at least 3 args", len(args))
} }
@ -1405,7 +1405,7 @@ func rollupOutlierIQR(rfa *rollupFuncArg) float64 {
return nan return nan
} }
func newRollupQuantile(args []interface{}) (rollupFunc, error) { func newRollupQuantile(args []any) (rollupFunc, error) {
if err := expectRollupArgsNum(args, 2); err != nil { if err := expectRollupArgsNum(args, 2); err != nil {
return nil, err return nil, err
} }
@ -1445,7 +1445,7 @@ func mad(values []float64) float64 {
return v return v
} }
func newRollupCountValues(args []interface{}) (rollupFunc, error) { func newRollupCountValues(args []any) (rollupFunc, error) {
if err := expectRollupArgsNum(args, 2); err != nil { if err := expectRollupArgsNum(args, 2); err != nil {
return nil, err return nil, err
} }
@ -2389,7 +2389,7 @@ func rollupFake(_ *rollupFuncArg) float64 {
return 0 return 0
} }
func getScalar(arg interface{}, argNum int) ([]float64, error) { func getScalar(arg any, argNum int) ([]float64, error) {
ts, ok := arg.([]*timeseries) ts, ok := arg.([]*timeseries)
if !ok { if !ok {
return nil, fmt.Errorf(`unexpected type for arg #%d; got %T; want %T`, argNum+1, arg, ts) return nil, fmt.Errorf(`unexpected type for arg #%d; got %T; want %T`, argNum+1, arg, ts)
@ -2400,7 +2400,7 @@ func getScalar(arg interface{}, argNum int) ([]float64, error) {
return ts[0].Values, nil return ts[0].Values, nil
} }
func getIntNumber(arg interface{}, argNum int) (int, error) { func getIntNumber(arg any, argNum int) (int, error) {
v, err := getScalar(arg, argNum) v, err := getScalar(arg, argNum)
if err != nil { if err != nil {
return 0, err return 0, err
@ -2425,7 +2425,7 @@ func getString(tss []*timeseries, argNum int) (string, error) {
return string(ts.MetricName.MetricGroup), nil return string(ts.MetricName.MetricGroup), nil
} }
func expectRollupArgsNum(args []interface{}, expectedNum int) error { func expectRollupArgsNum(args []any, expectedNum int) error {
if len(args) == expectedNum { if len(args) == expectedNum {
return nil return nil
} }

View file

@ -200,7 +200,7 @@ func TestDerivValues(t *testing.T) {
testRowsEqual(t, values, timestamps, valuesExpected, timestamps) testRowsEqual(t, values, timestamps, valuesExpected, timestamps)
} }
func testRollupFunc(t *testing.T, funcName string, args []interface{}, vExpected float64) { func testRollupFunc(t *testing.T, funcName string, args []any, vExpected float64) {
t.Helper() t.Helper()
nrf := getRollupFunc(funcName) nrf := getRollupFunc(funcName)
if nrf == nil { if nrf == nil {
@ -245,7 +245,7 @@ func TestRollupDurationOverTime(t *testing.T) {
Timestamps: []int64{123}, Timestamps: []int64{123},
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{&metricsql.RollupExpr{Expr: &me}, maxIntervals} args := []any{&metricsql.RollupExpr{Expr: &me}, maxIntervals}
testRollupFunc(t, "duration_over_time", args, dExpected) testRollupFunc(t, "duration_over_time", args, dExpected)
} }
f(-123, 0) f(-123, 0)
@ -266,7 +266,7 @@ func TestRollupShareLEOverTime(t *testing.T) {
Timestamps: []int64{123}, Timestamps: []int64{123},
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{&metricsql.RollupExpr{Expr: &me}, les} args := []any{&metricsql.RollupExpr{Expr: &me}, les}
testRollupFunc(t, "share_le_over_time", args, vExpected) testRollupFunc(t, "share_le_over_time", args, vExpected)
} }
@ -289,7 +289,7 @@ func TestRollupShareGTOverTime(t *testing.T) {
Timestamps: []int64{123}, Timestamps: []int64{123},
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{&metricsql.RollupExpr{Expr: &me}, gts} args := []any{&metricsql.RollupExpr{Expr: &me}, gts}
testRollupFunc(t, "share_gt_over_time", args, vExpected) testRollupFunc(t, "share_gt_over_time", args, vExpected)
} }
@ -312,7 +312,7 @@ func TestRollupShareEQOverTime(t *testing.T) {
Timestamps: []int64{123}, Timestamps: []int64{123},
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{&metricsql.RollupExpr{Expr: &me}, eqs} args := []any{&metricsql.RollupExpr{Expr: &me}, eqs}
testRollupFunc(t, "share_eq_over_time", args, vExpected) testRollupFunc(t, "share_eq_over_time", args, vExpected)
} }
@ -331,7 +331,7 @@ func TestRollupCountLEOverTime(t *testing.T) {
Timestamps: []int64{123}, Timestamps: []int64{123},
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{&metricsql.RollupExpr{Expr: &me}, les} args := []any{&metricsql.RollupExpr{Expr: &me}, les}
testRollupFunc(t, "count_le_over_time", args, vExpected) testRollupFunc(t, "count_le_over_time", args, vExpected)
} }
@ -354,7 +354,7 @@ func TestRollupCountGTOverTime(t *testing.T) {
Timestamps: []int64{123}, Timestamps: []int64{123},
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{&metricsql.RollupExpr{Expr: &me}, gts} args := []any{&metricsql.RollupExpr{Expr: &me}, gts}
testRollupFunc(t, "count_gt_over_time", args, vExpected) testRollupFunc(t, "count_gt_over_time", args, vExpected)
} }
@ -377,7 +377,7 @@ func TestRollupCountEQOverTime(t *testing.T) {
Timestamps: []int64{123}, Timestamps: []int64{123},
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{&metricsql.RollupExpr{Expr: &me}, eqs} args := []any{&metricsql.RollupExpr{Expr: &me}, eqs}
testRollupFunc(t, "count_eq_over_time", args, vExpected) testRollupFunc(t, "count_eq_over_time", args, vExpected)
} }
@ -396,7 +396,7 @@ func TestRollupCountNEOverTime(t *testing.T) {
Timestamps: []int64{123}, Timestamps: []int64{123},
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{&metricsql.RollupExpr{Expr: &me}, nes} args := []any{&metricsql.RollupExpr{Expr: &me}, nes}
testRollupFunc(t, "count_ne_over_time", args, vExpected) testRollupFunc(t, "count_ne_over_time", args, vExpected)
} }
@ -415,7 +415,7 @@ func TestRollupSumLEOverTime(t *testing.T) {
Timestamps: []int64{123}, Timestamps: []int64{123},
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{&metricsql.RollupExpr{Expr: &me}, les} args := []any{&metricsql.RollupExpr{Expr: &me}, les}
testRollupFunc(t, "sum_le_over_time", args, vExpected) testRollupFunc(t, "sum_le_over_time", args, vExpected)
} }
@ -438,7 +438,7 @@ func TestRollupSumGTOverTime(t *testing.T) {
Timestamps: []int64{123}, Timestamps: []int64{123},
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{&metricsql.RollupExpr{Expr: &me}, les} args := []any{&metricsql.RollupExpr{Expr: &me}, les}
testRollupFunc(t, "sum_gt_over_time", args, vExpected) testRollupFunc(t, "sum_gt_over_time", args, vExpected)
} }
@ -461,7 +461,7 @@ func TestRollupSumEQOverTime(t *testing.T) {
Timestamps: []int64{123}, Timestamps: []int64{123},
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{&metricsql.RollupExpr{Expr: &me}, les} args := []any{&metricsql.RollupExpr{Expr: &me}, les}
testRollupFunc(t, "sum_eq_over_time", args, vExpected) testRollupFunc(t, "sum_eq_over_time", args, vExpected)
} }
@ -484,7 +484,7 @@ func TestRollupQuantileOverTime(t *testing.T) {
Timestamps: []int64{123}, Timestamps: []int64{123},
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{phis, &metricsql.RollupExpr{Expr: &me}} args := []any{phis, &metricsql.RollupExpr{Expr: &me}}
testRollupFunc(t, "quantile_over_time", args, vExpected) testRollupFunc(t, "quantile_over_time", args, vExpected)
} }
@ -506,7 +506,7 @@ func TestRollupPredictLinear(t *testing.T) {
Timestamps: []int64{123}, Timestamps: []int64{123},
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{&metricsql.RollupExpr{Expr: &me}, secs} args := []any{&metricsql.RollupExpr{Expr: &me}, secs}
testRollupFunc(t, "predict_linear", args, vExpected) testRollupFunc(t, "predict_linear", args, vExpected)
} }
@ -545,7 +545,7 @@ func TestRollupHoltWinters(t *testing.T) {
Timestamps: []int64{123}, Timestamps: []int64{123},
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{&metricsql.RollupExpr{Expr: &me}, sfs, tfs} args := []any{&metricsql.RollupExpr{Expr: &me}, sfs, tfs}
testRollupFunc(t, "holt_winters", args, vExpected) testRollupFunc(t, "holt_winters", args, vExpected)
} }
@ -573,7 +573,7 @@ func TestRollupHoeffdingBoundLower(t *testing.T) {
Timestamps: []int64{123}, Timestamps: []int64{123},
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{phis, &metricsql.RollupExpr{Expr: &me}} args := []any{phis, &metricsql.RollupExpr{Expr: &me}}
testRollupFunc(t, "hoeffding_bound_lower", args, vExpected) testRollupFunc(t, "hoeffding_bound_lower", args, vExpected)
} }
@ -594,7 +594,7 @@ func TestRollupHoeffdingBoundUpper(t *testing.T) {
Timestamps: []int64{123}, Timestamps: []int64{123},
}} }}
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{phis, &metricsql.RollupExpr{Expr: &me}} args := []any{phis, &metricsql.RollupExpr{Expr: &me}}
testRollupFunc(t, "hoeffding_bound_upper", args, vExpected) testRollupFunc(t, "hoeffding_bound_upper", args, vExpected)
} }
@ -611,7 +611,7 @@ func TestRollupNewRollupFuncSuccess(t *testing.T) {
f := func(funcName string, vExpected float64) { f := func(funcName string, vExpected float64) {
t.Helper() t.Helper()
var me metricsql.MetricExpr var me metricsql.MetricExpr
args := []interface{}{&metricsql.RollupExpr{Expr: &me}} args := []any{&metricsql.RollupExpr{Expr: &me}}
testRollupFunc(t, funcName, args, vExpected) testRollupFunc(t, funcName, args, vExpected)
} }
@ -668,7 +668,7 @@ func TestRollupNewRollupFuncError(t *testing.T) {
t.Fatalf("expecting nil func; got %p", nrf) t.Fatalf("expecting nil func; got %p", nrf)
} }
f := func(funcName string, args []interface{}) { f := func(funcName string, args []any) {
t.Helper() t.Helper()
nrf := getRollupFunc(funcName) nrf := getRollupFunc(funcName)
@ -694,13 +694,13 @@ func TestRollupNewRollupFuncError(t *testing.T) {
Timestamps: []int64{123}, Timestamps: []int64{123},
}} }}
me := &metricsql.MetricExpr{} me := &metricsql.MetricExpr{}
f("holt_winters", []interface{}{123, 123, 321}) f("holt_winters", []any{123, 123, 321})
f("holt_winters", []interface{}{me, 123, 321}) f("holt_winters", []any{me, 123, 321})
f("holt_winters", []interface{}{me, scalarTs, 321}) f("holt_winters", []any{me, scalarTs, 321})
f("predict_linear", []interface{}{123, 123}) f("predict_linear", []any{123, 123})
f("predict_linear", []interface{}{me, 123}) f("predict_linear", []any{me, 123})
f("quantile_over_time", []interface{}{123, 123}) f("quantile_over_time", []any{123, 123})
f("quantiles_over_time", []interface{}{123, 123}) f("quantiles_over_time", []any{123, 123})
} }
func TestRollupNoWindowNoPoints(t *testing.T) { func TestRollupNoWindowNoPoints(t *testing.T) {

View file

@ -65,7 +65,7 @@ func (c *Cache) MustStop() {
} }
// RemoveBlocksForPart removes all the blocks for the given part from the cache. // RemoveBlocksForPart removes all the blocks for the given part from the cache.
func (c *Cache) RemoveBlocksForPart(p interface{}) { func (c *Cache) RemoveBlocksForPart(p any) {
for _, shard := range c.shards { for _, shard := range c.shards {
shard.RemoveBlocksForPart(p) shard.RemoveBlocksForPart(p)
} }
@ -185,7 +185,7 @@ type cache struct {
mu sync.Mutex mu sync.Mutex
// m contains cached blocks keyed by Key.Part and then by Key.Offset // m contains cached blocks keyed by Key.Part and then by Key.Offset
m map[interface{}]map[uint64]*cacheEntry m map[any]map[uint64]*cacheEntry
// perKeyMisses contains per-block cache misses. // perKeyMisses contains per-block cache misses.
// //
@ -199,7 +199,7 @@ type cache struct {
// Key represents a key, which uniquely identifies the Block. // Key represents a key, which uniquely identifies the Block.
type Key struct { type Key struct {
// Part must contain a pointer to part structure where the block belongs to. // Part must contain a pointer to part structure where the block belongs to.
Part interface{} Part any
// Offset is the offset of the block in the part. // Offset is the offset of the block in the part.
Offset uint64 Offset uint64
@ -233,12 +233,12 @@ type cacheEntry struct {
func newCache(getMaxSizeBytes func() int) *cache { func newCache(getMaxSizeBytes func() int) *cache {
var c cache var c cache
c.getMaxSizeBytes = getMaxSizeBytes c.getMaxSizeBytes = getMaxSizeBytes
c.m = make(map[interface{}]map[uint64]*cacheEntry) c.m = make(map[any]map[uint64]*cacheEntry)
c.perKeyMisses = make(map[Key]int) c.perKeyMisses = make(map[Key]int)
return &c return &c
} }
func (c *cache) RemoveBlocksForPart(p interface{}) { func (c *cache) RemoveBlocksForPart(p any) {
c.mu.Lock() c.mu.Lock()
defer c.mu.Unlock() defer c.mu.Unlock()
@ -398,13 +398,13 @@ func (lah *lastAccessHeap) Less(i, j int) bool {
h := *lah h := *lah
return h[i].lastAccessTime < h[j].lastAccessTime return h[i].lastAccessTime < h[j].lastAccessTime
} }
func (lah *lastAccessHeap) Push(x interface{}) { func (lah *lastAccessHeap) Push(x any) {
e := x.(*cacheEntry) e := x.(*cacheEntry)
h := *lah h := *lah
e.heapIdx = len(h) e.heapIdx = len(h)
*lah = append(h, e) *lah = append(h, e)
} }
func (lah *lastAccessHeap) Pop() interface{} { func (lah *lastAccessHeap) Pop() any {
h := *lah h := *lah
e := h[len(h)-1] e := h[len(h)-1]

View file

@ -27,7 +27,7 @@ func TestCache(t *testing.T) {
t.Fatalf("unexpected SizeMaxBytes(); got %d; want %d", n, sizeMaxBytes) t.Fatalf("unexpected SizeMaxBytes(); got %d; want %d", n, sizeMaxBytes)
} }
offset := uint64(1234) offset := uint64(1234)
part := (interface{})("foobar") part := (any)("foobar")
k := Key{ k := Key{
Offset: offset, Offset: offset,
Part: part, Part: part,
@ -145,7 +145,7 @@ func TestCacheConcurrentAccess(_ *testing.T) {
func testCacheSetGet(c *Cache, worker int) { func testCacheSetGet(c *Cache, worker int) {
for i := 0; i < 1000; i++ { for i := 0; i < 1000; i++ {
part := (interface{})(i) part := (any)(i)
b := testBlock{} b := testBlock{}
k := Key{ k := Key{
Offset: uint64(worker*1000 + i), Offset: uint64(worker*1000 + i),

View file

@ -71,7 +71,7 @@ func (fsm *FastStringMatcher) Match(s string) bool {
// Perform a global cleanup for fsm.m by removing items, which weren't accessed during the last 5 minutes. // Perform a global cleanup for fsm.m by removing items, which weren't accessed during the last 5 minutes.
m := &fsm.m m := &fsm.m
deadline := ct - uint64(cacheExpireDuration.Seconds()) deadline := ct - uint64(cacheExpireDuration.Seconds())
m.Range(func(k, v interface{}) bool { m.Range(func(k, v any) bool {
e := v.(*fsmEntry) e := v.(*fsmEntry)
if e.lastAccessTime.Load() < deadline { if e.lastAccessTime.Load() < deadline {
m.Delete(k) m.Delete(k)

View file

@ -82,7 +82,7 @@ func (fst *FastStringTransformer) Transform(s string) string {
// Perform a global cleanup for fst.m by removing items, which weren't accessed during the last 5 minutes. // Perform a global cleanup for fst.m by removing items, which weren't accessed during the last 5 minutes.
m := &fst.m m := &fst.m
deadline := ct - uint64(cacheExpireDuration.Seconds()) deadline := ct - uint64(cacheExpireDuration.Seconds())
m.Range(func(k, v interface{}) bool { m.Range(func(k, v any) bool {
e := v.(*fstEntry) e := v.(*fstEntry)
if e.lastAccessTime.Load() < deadline { if e.lastAccessTime.Load() < deadline {
m.Delete(k) m.Delete(k)

View file

@ -186,7 +186,7 @@ func whetherToCloseConn(r *http.Request) bool {
return ok && fasttime.UnixTimestamp() > *deadline return ok && fasttime.UnixTimestamp() > *deadline
} }
var connDeadlineTimeKey = interface{}("connDeadlineSecs") var connDeadlineTimeKey = any("connDeadlineSecs")
// Stop stops the http server on the given addrs, which has been started via Serve func. // Stop stops the http server on the given addrs, which has been started via Serve func.
func Stop(addrs []string) error { func Stop(addrs []string) error {
@ -617,7 +617,7 @@ func (rwa *responseWriterWithAbort) abort() {
} }
// Errorf writes formatted error message to w and to logger. // Errorf writes formatted error message to w and to logger.
func Errorf(w http.ResponseWriter, r *http.Request, format string, args ...interface{}) { func Errorf(w http.ResponseWriter, r *http.Request, format string, args ...any) {
errStr := fmt.Sprintf(format, args...) errStr := fmt.Sprintf(format, args...)
remoteAddr := GetQuotedRemoteAddr(r) remoteAddr := GetQuotedRemoteAddr(r)
requestURI := GetRequestURI(r) requestURI := GetRequestURI(r)

View file

@ -95,45 +95,45 @@ func StdErrorLogger() *log.Logger {
} }
// Infof logs info message. // Infof logs info message.
func Infof(format string, args ...interface{}) { func Infof(format string, args ...any) {
logLevel("INFO", format, args) logLevel("INFO", format, args)
} }
// Warnf logs warn message. // Warnf logs warn message.
func Warnf(format string, args ...interface{}) { func Warnf(format string, args ...any) {
logLevel("WARN", format, args) logLevel("WARN", format, args)
} }
// Errorf logs error message. // Errorf logs error message.
func Errorf(format string, args ...interface{}) { func Errorf(format string, args ...any) {
logLevel("ERROR", format, args) logLevel("ERROR", format, args)
} }
// WarnfSkipframes logs warn message and skips the given number of frames for the caller. // WarnfSkipframes logs warn message and skips the given number of frames for the caller.
func WarnfSkipframes(skipframes int, format string, args ...interface{}) { func WarnfSkipframes(skipframes int, format string, args ...any) {
logLevelSkipframes(skipframes, "WARN", format, args) logLevelSkipframes(skipframes, "WARN", format, args)
} }
// ErrorfSkipframes logs error message and skips the given number of frames for the caller. // ErrorfSkipframes logs error message and skips the given number of frames for the caller.
func ErrorfSkipframes(skipframes int, format string, args ...interface{}) { func ErrorfSkipframes(skipframes int, format string, args ...any) {
logLevelSkipframes(skipframes, "ERROR", format, args) logLevelSkipframes(skipframes, "ERROR", format, args)
} }
// Fatalf logs fatal message and terminates the app. // Fatalf logs fatal message and terminates the app.
func Fatalf(format string, args ...interface{}) { func Fatalf(format string, args ...any) {
logLevel("FATAL", format, args) logLevel("FATAL", format, args)
} }
// Panicf logs panic message and panics. // Panicf logs panic message and panics.
func Panicf(format string, args ...interface{}) { func Panicf(format string, args ...any) {
logLevel("PANIC", format, args) logLevel("PANIC", format, args)
} }
func logLevel(level, format string, args []interface{}) { func logLevel(level, format string, args []any) {
logLevelSkipframes(1, level, format, args) logLevelSkipframes(1, level, format, args)
} }
func logLevelSkipframes(skipframes int, level, format string, args []interface{}) { func logLevelSkipframes(skipframes int, level, format string, args []any) {
if shouldSkipLog(level) { if shouldSkipLog(level) {
return return
} }
@ -141,7 +141,7 @@ func logLevelSkipframes(skipframes int, level, format string, args []interface{}
logMessage(level, msg, 3+skipframes) logMessage(level, msg, 3+skipframes)
} }
func formatLogMessage(maxArgLen int, format string, args []interface{}) string { func formatLogMessage(maxArgLen int, format string, args []any) string {
x := format x := format
// Limit the length of every string-like arg in order to prevent from too long log messages // Limit the length of every string-like arg in order to prevent from too long log messages
for i := range args { for i := range args {
@ -217,7 +217,7 @@ type logWriter struct {
} }
func (lw *logWriter) Write(p []byte) (int, error) { func (lw *logWriter) Write(p []byte) (int, error) {
logLevelSkipframes(2, "ERROR", "%s", []interface{}{p}) logLevelSkipframes(2, "ERROR", "%s", []any{p})
return len(p), nil return len(p), nil
} }

View file

@ -6,7 +6,7 @@ import (
) )
func TestFormatLogMessage(t *testing.T) { func TestFormatLogMessage(t *testing.T) {
f := func(format string, args []interface{}, maxArgLen int, expectedResult string) { f := func(format string, args []any, maxArgLen int, expectedResult string) {
t.Helper() t.Helper()
result := formatLogMessage(maxArgLen, format, args) result := formatLogMessage(maxArgLen, format, args)
if result != expectedResult { if result != expectedResult {
@ -18,8 +18,8 @@ func TestFormatLogMessage(t *testing.T) {
f("foobar", nil, 1, "foobar") f("foobar", nil, 1, "foobar")
// Format args not exceeding the maxArgLen // Format args not exceeding the maxArgLen
f("foo: %d, %s, %s, %s", []interface{}{123, "bar", []byte("baz"), fmt.Errorf("abc")}, 3, "foo: 123, bar, baz, abc") f("foo: %d, %s, %s, %s", []any{123, "bar", []byte("baz"), fmt.Errorf("abc")}, 3, "foo: 123, bar, baz, abc")
// Format args exceeding the maxArgLen // Format args exceeding the maxArgLen
f("foo: %s, %q, %s", []interface{}{"abcde", fmt.Errorf("foo bar baz"), "xx"}, 4, `foo: a..e, "f..z", xx`) f("foo: %s, %q, %s", []any{"abcde", fmt.Errorf("foo bar baz"), "xx"}, 4, `foo: a..e, "f..z", xx`)
} }

View file

@ -49,7 +49,7 @@ func newLogThrottler(throttle time.Duration) *LogThrottler {
} }
// Errorf logs error message. // Errorf logs error message.
func (lt *LogThrottler) Errorf(format string, args ...interface{}) { func (lt *LogThrottler) Errorf(format string, args ...any) {
select { select {
case lt.ch <- struct{}{}: case lt.ch <- struct{}{}:
ErrorfSkipframes(1, format, args...) ErrorfSkipframes(1, format, args...)
@ -58,7 +58,7 @@ func (lt *LogThrottler) Errorf(format string, args ...interface{}) {
} }
// Warnf logs warn message. // Warnf logs warn message.
func (lt *LogThrottler) Warnf(format string, args ...interface{}) { func (lt *LogThrottler) Warnf(format string, args ...any) {
select { select {
case lt.ch <- struct{}{}: case lt.ch <- struct{}{}:
WarnfSkipframes(1, format, args...) WarnfSkipframes(1, format, args...)

View file

@ -302,12 +302,12 @@ func (h *blockStreamReadersHeap) Swap(i, j int) {
x[i], x[j] = x[j], x[i] x[i], x[j] = x[j], x[i]
} }
func (h *blockStreamReadersHeap) Push(v interface{}) { func (h *blockStreamReadersHeap) Push(v any) {
bsr := v.(*blockStreamReader) bsr := v.(*blockStreamReader)
*h = append(*h, bsr) *h = append(*h, bsr)
} }
func (h *blockStreamReadersHeap) Pop() interface{} { func (h *blockStreamReadersHeap) Pop() any {
x := *h x := *h
bsr := x[len(x)-1] bsr := x[len(x)-1]
x[len(x)-1] = nil x[len(x)-1] = nil

View file

@ -308,13 +308,13 @@ func (lah *lastAccessHeap) Less(i, j int) bool {
h := *lah h := *lah
return h[i].lastAccessTime < h[j].lastAccessTime return h[i].lastAccessTime < h[j].lastAccessTime
} }
func (lah *lastAccessHeap) Push(x interface{}) { func (lah *lastAccessHeap) Push(x any) {
e := x.(*cacheEntry) e := x.(*cacheEntry)
h := *lah h := *lah
e.heapIdx = len(h) e.heapIdx = len(h)
*lah = append(h, e) *lah = append(h, e)
} }
func (lah *lastAccessHeap) Pop() interface{} { func (lah *lastAccessHeap) Pop() any {
h := *lah h := *lah
e := h[len(h)-1] e := h[len(h)-1]

View file

@ -44,7 +44,7 @@ func mergeBlockStreams(ph *partHeader, bsw *blockStreamWriter, bsrs []*blockStre
} }
var bsmPool = &sync.Pool{ var bsmPool = &sync.Pool{
New: func() interface{} { New: func() any {
return &blockStreamMerger{} return &blockStreamMerger{}
}, },
} }
@ -238,14 +238,14 @@ func (bh *bsrHeap) Less(i, j int) bool {
return x[i].CurrItem() < x[j].CurrItem() return x[i].CurrItem() < x[j].CurrItem()
} }
func (bh *bsrHeap) Pop() interface{} { func (bh *bsrHeap) Pop() any {
a := *bh a := *bh
v := a[len(a)-1] v := a[len(a)-1]
*bh = a[:len(a)-1] *bh = a[:len(a)-1]
return v return v
} }
func (bh *bsrHeap) Push(x interface{}) { func (bh *bsrHeap) Push(x any) {
v := x.(*blockStreamReader) v := x.(*blockStreamReader)
*bh = append(*bh, v) *bh = append(*bh, v)
} }

View file

@ -205,11 +205,11 @@ func (psh *partSearchHeap) Swap(i, j int) {
x[i], x[j] = x[j], x[i] x[i], x[j] = x[j], x[i]
} }
func (psh *partSearchHeap) Push(x interface{}) { func (psh *partSearchHeap) Push(x any) {
*psh = append(*psh, x.(*partSearch)) *psh = append(*psh, x.(*partSearch))
} }
func (psh *partSearchHeap) Pop() interface{} { func (psh *partSearchHeap) Pop() any {
a := *psh a := *psh
v := a[len(a)-1] v := a[len(a)-1]
*psh = a[:len(a)-1] *psh = a[:len(a)-1]

View file

@ -43,12 +43,12 @@ func NewSecret(s string) *Secret {
// MarshalYAML implements yaml.Marshaler interface. // MarshalYAML implements yaml.Marshaler interface.
// //
// It substitutes the secret with "<secret>" string. // It substitutes the secret with "<secret>" string.
func (s *Secret) MarshalYAML() (interface{}, error) { func (s *Secret) MarshalYAML() (any, error) {
return "<secret>", nil return "<secret>", nil
} }
// UnmarshalYAML implements yaml.Unmarshaler interface. // UnmarshalYAML implements yaml.Unmarshaler interface.
func (s *Secret) UnmarshalYAML(f func(interface{}) error) error { func (s *Secret) UnmarshalYAML(f func(any) error) error {
var secret string var secret string
if err := f(&secret); err != nil { if err := f(&secret); err != nil {
return fmt.Errorf("cannot parse secret: %w", err) return fmt.Errorf("cannot parse secret: %w", err)

View file

@ -61,8 +61,8 @@ type MultiLineRegex struct {
} }
// UnmarshalYAML unmarshals mlr from YAML passed to f. // UnmarshalYAML unmarshals mlr from YAML passed to f.
func (mlr *MultiLineRegex) UnmarshalYAML(f func(interface{}) error) error { func (mlr *MultiLineRegex) UnmarshalYAML(f func(any) error) error {
var v interface{} var v any
if err := f(&v); err != nil { if err := f(&v); err != nil {
return fmt.Errorf("cannot parse multiline regex: %w", err) return fmt.Errorf("cannot parse multiline regex: %w", err)
} }
@ -74,12 +74,12 @@ func (mlr *MultiLineRegex) UnmarshalYAML(f func(interface{}) error) error {
return nil return nil
} }
func stringValue(v interface{}) (string, error) { func stringValue(v any) (string, error) {
if v == nil { if v == nil {
return "null", nil return "null", nil
} }
switch x := v.(type) { switch x := v.(type) {
case []interface{}: case []any:
a := make([]string, len(x)) a := make([]string, len(x))
for i, xx := range x { for i, xx := range x {
s, err := stringValue(xx) s, err := stringValue(xx)
@ -106,7 +106,7 @@ func stringValue(v interface{}) (string, error) {
} }
// MarshalYAML marshals mlr to YAML. // MarshalYAML marshals mlr to YAML.
func (mlr *MultiLineRegex) MarshalYAML() (interface{}, error) { func (mlr *MultiLineRegex) MarshalYAML() (any, error) {
if strings.ContainsAny(mlr.S, "([") { if strings.ContainsAny(mlr.S, "([") {
// The mlr.S contains groups. Fall back to returning the regexp as is without splitting it into parts. // The mlr.S contains groups. Fall back to returning the regexp as is without splitting it into parts.
// This fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2928 . // This fixes https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2928 .

View file

@ -8,7 +8,7 @@ import (
) )
var graphiteMatchesPool = &sync.Pool{ var graphiteMatchesPool = &sync.Pool{
New: func() interface{} { New: func() any {
return &graphiteMatches{} return &graphiteMatches{}
}, },
} }

View file

@ -53,7 +53,7 @@ func (ie *IfExpression) Parse(s string) error {
// UnmarshalJSON unmarshals ie from JSON data. // UnmarshalJSON unmarshals ie from JSON data.
func (ie *IfExpression) UnmarshalJSON(data []byte) error { func (ie *IfExpression) UnmarshalJSON(data []byte) error {
var v interface{} var v any
if err := json.Unmarshal(data, &v); err != nil { if err := json.Unmarshal(data, &v); err != nil {
return err return err
} }
@ -72,15 +72,15 @@ func (ie *IfExpression) MarshalJSON() ([]byte, error) {
} }
// UnmarshalYAML unmarshals ie from YAML passed to f. // UnmarshalYAML unmarshals ie from YAML passed to f.
func (ie *IfExpression) UnmarshalYAML(f func(interface{}) error) error { func (ie *IfExpression) UnmarshalYAML(f func(any) error) error {
var v interface{} var v any
if err := f(&v); err != nil { if err := f(&v); err != nil {
return fmt.Errorf("cannot unmarshal `match` option: %w", err) return fmt.Errorf("cannot unmarshal `match` option: %w", err)
} }
return ie.unmarshalFromInterface(v) return ie.unmarshalFromInterface(v)
} }
func (ie *IfExpression) unmarshalFromInterface(v interface{}) error { func (ie *IfExpression) unmarshalFromInterface(v any) error {
ies := ie.ies[:0] ies := ie.ies[:0]
switch t := v.(type) { switch t := v.(type) {
case string: case string:
@ -89,7 +89,7 @@ func (ie *IfExpression) unmarshalFromInterface(v interface{}) error {
return fmt.Errorf("unexpected `match` option: %w", err) return fmt.Errorf("unexpected `match` option: %w", err)
} }
ies = append(ies, ieLocal) ies = append(ies, ieLocal)
case []interface{}: case []any:
for _, x := range t { for _, x := range t {
s, ok := x.(string) s, ok := x.(string)
if !ok { if !ok {
@ -109,7 +109,7 @@ func (ie *IfExpression) unmarshalFromInterface(v interface{}) error {
} }
// MarshalYAML marshals ie to YAML // MarshalYAML marshals ie to YAML
func (ie *IfExpression) MarshalYAML() (interface{}, error) { func (ie *IfExpression) MarshalYAML() (any, error) {
if ie == nil || len(ie.ies) == 0 { if ie == nil || len(ie.ies) == 0 {
return nil, nil return nil, nil
} }
@ -198,7 +198,7 @@ func (ie *ifExpression) MarshalJSON() ([]byte, error) {
} }
// UnmarshalYAML unmarshals ie from YAML passed to f. // UnmarshalYAML unmarshals ie from YAML passed to f.
func (ie *ifExpression) UnmarshalYAML(f func(interface{}) error) error { func (ie *ifExpression) UnmarshalYAML(f func(any) error) error {
var s string var s string
if err := f(&s); err != nil { if err := f(&s); err != nil {
return fmt.Errorf("cannot unmarshal `if` option: %w", err) return fmt.Errorf("cannot unmarshal `if` option: %w", err)
@ -210,7 +210,7 @@ func (ie *ifExpression) UnmarshalYAML(f func(interface{}) error) error {
} }
// MarshalYAML marshals ie to YAML. // MarshalYAML marshals ie to YAML.
func (ie *ifExpression) MarshalYAML() (interface{}, error) { func (ie *ifExpression) MarshalYAML() (any, error) {
return ie.s, nil return ie.s, nil
} }

View file

@ -331,7 +331,7 @@ type ScrapeConfig struct {
} }
func (sc *ScrapeConfig) mustStart(baseDir string) { func (sc *ScrapeConfig) mustStart(baseDir string) {
swosFunc := func(metaLabels *promutils.Labels) interface{} { swosFunc := func(metaLabels *promutils.Labels) any {
target := metaLabels.Get("__address__") target := metaLabels.Get("__address__")
sw, err := sc.swc.getScrapeWork(target, nil, metaLabels) sw, err := sc.swc.getScrapeWork(target, nil, metaLabels)
if err != nil { if err != nil {

View file

@ -72,7 +72,7 @@ type apiConfig struct {
type refreshTokenFunc func() (string, time.Duration, error) type refreshTokenFunc func() (string, time.Duration, error)
func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) }) v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc, baseDir) })
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -30,7 +30,7 @@ func (ac *apiConfig) mustStop() {
var configMap = discoveryutils.NewConfigMap() var configMap = discoveryutils.NewConfigMap()
func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) }) v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc, baseDir) })
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -24,7 +24,7 @@ func (ac *apiConfig) mustStop() {
var configMap = discoveryutils.NewConfigMap() var configMap = discoveryutils.NewConfigMap()
func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) }) v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc, baseDir) })
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -51,7 +51,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
} }
func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) }) v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc, baseDir) })
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -22,7 +22,7 @@ type apiConfig struct {
} }
func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) }) v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc, baseDir) })
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -26,7 +26,7 @@ type apiConfig struct {
} }
func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) }) v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc, baseDir) })
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -35,8 +35,8 @@ type containerSpec struct {
} }
type serviceSpecMode struct { type serviceSpecMode struct {
Global interface{} Global any
Replicated interface{} Replicated any
} }
type serviceUpdateStatus struct { type serviceUpdateStatus struct {

View file

@ -102,7 +102,7 @@ func TestParseServicesResponse(t *testing.T) {
}, },
}, },
Mode: serviceSpecMode{ Mode: serviceSpecMode{
Replicated: map[string]interface{}{}, Replicated: map[string]any{},
}, },
}, },
Endpoint: serviceEndpoint{ Endpoint: serviceEndpoint{
@ -147,7 +147,7 @@ func TestAddServicesLabels(t *testing.T) {
}, },
}, },
Mode: serviceSpecMode{ Mode: serviceSpecMode{
Replicated: map[string]interface{}{}, Replicated: map[string]any{},
}, },
}, },
Endpoint: serviceEndpoint{ Endpoint: serviceEndpoint{

View file

@ -228,7 +228,7 @@ func TestAddTasksLabels(t *testing.T) {
}, },
}, },
Mode: serviceSpecMode{ Mode: serviceSpecMode{
Replicated: map[string]interface{}{}, Replicated: map[string]any{},
}, },
}, },
Endpoint: serviceEndpoint{ Endpoint: serviceEndpoint{

View file

@ -21,7 +21,7 @@ type apiConfig struct {
var configMap = discoveryutils.NewConfigMap() var configMap = discoveryutils.NewConfigMap()
func getAPIConfig(sdc *SDConfig) (*apiConfig, error) { func getAPIConfig(sdc *SDConfig) (*apiConfig, error) {
v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc) }) v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc) })
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -45,7 +45,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
} }
func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) }) v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc, baseDir) })
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -25,7 +25,7 @@ type apiConfig struct {
var configMap = discoveryutils.NewConfigMap() var configMap = discoveryutils.NewConfigMap()
func getAPIConfig(sdc *SDConfig) (*apiConfig, error) { func getAPIConfig(sdc *SDConfig) (*apiConfig, error) {
v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc) }) v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc) })
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -32,8 +32,8 @@ type ZoneYAML struct {
} }
// UnmarshalYAML implements yaml.Unmarshaler // UnmarshalYAML implements yaml.Unmarshaler
func (z *ZoneYAML) UnmarshalYAML(unmarshal func(interface{}) error) error { func (z *ZoneYAML) UnmarshalYAML(unmarshal func(any) error) error {
var v interface{} var v any
if err := unmarshal(&v); err != nil { if err := unmarshal(&v); err != nil {
return err return err
} }
@ -41,7 +41,7 @@ func (z *ZoneYAML) UnmarshalYAML(unmarshal func(interface{}) error) error {
switch t := v.(type) { switch t := v.(type) {
case string: case string:
zones = []string{t} zones = []string{t}
case []interface{}: case []any:
for _, vv := range t { for _, vv := range t {
zone, ok := vv.(string) zone, ok := vv.(string)
if !ok { if !ok {
@ -57,7 +57,7 @@ func (z *ZoneYAML) UnmarshalYAML(unmarshal func(interface{}) error) error {
} }
// MarshalYAML implements yaml.Marshaler // MarshalYAML implements yaml.Marshaler
func (z ZoneYAML) MarshalYAML() (interface{}, error) { func (z ZoneYAML) MarshalYAML() (any, error) {
return z.Zones, nil return z.Zones, nil
} }

View file

@ -15,7 +15,7 @@ type apiConfig struct {
} }
func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) }) v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc, baseDir) })
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -58,7 +58,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
} }
func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) }) v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc, baseDir) })
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -65,7 +65,7 @@ type apiWatcher struct {
gw *groupWatcher gw *groupWatcher
// swosByURLWatcher contains per-urlWatcher maps of ScrapeWork objects for the given apiWatcher // swosByURLWatcher contains per-urlWatcher maps of ScrapeWork objects for the given apiWatcher
swosByURLWatcher map[*urlWatcher]map[string][]interface{} swosByURLWatcher map[*urlWatcher]map[string][]any
swosByURLWatcherLock sync.Mutex swosByURLWatcherLock sync.Mutex
swosCount *metrics.Counter swosCount *metrics.Counter
@ -94,7 +94,7 @@ func newAPIWatcher(apiServer string, ac *promauth.Config, sdc *SDConfig, swcFunc
role: role, role: role,
swcFunc: swcFunc, swcFunc: swcFunc,
gw: gw, gw: gw,
swosByURLWatcher: make(map[*urlWatcher]map[string][]interface{}), swosByURLWatcher: make(map[*urlWatcher]map[string][]any),
swosCount: metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_scrape_works{role=%q}`, role)), swosCount: metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_scrape_works{role=%q}`, role)),
} }
return aw, nil return aw, nil
@ -106,7 +106,7 @@ func (aw *apiWatcher) mustStart() {
aw.gw.apiWatcherInflightStartCalls.Add(-1) aw.gw.apiWatcherInflightStartCalls.Add(-1)
} }
func (aw *apiWatcher) updateSwosCount(multiplier int, swosByKey map[string][]interface{}) { func (aw *apiWatcher) updateSwosCount(multiplier int, swosByKey map[string][]any) {
n := 0 n := 0
for _, swos := range swosByKey { for _, swos := range swosByKey {
n += len(swos) n += len(swos)
@ -121,11 +121,11 @@ func (aw *apiWatcher) mustStop() {
for _, swosByKey := range aw.swosByURLWatcher { for _, swosByKey := range aw.swosByURLWatcher {
aw.updateSwosCount(-1, swosByKey) aw.updateSwosCount(-1, swosByKey)
} }
aw.swosByURLWatcher = make(map[*urlWatcher]map[string][]interface{}) aw.swosByURLWatcher = make(map[*urlWatcher]map[string][]any)
aw.swosByURLWatcherLock.Unlock() aw.swosByURLWatcherLock.Unlock()
} }
func (aw *apiWatcher) replaceScrapeWorks(uw *urlWatcher, swosByKey map[string][]interface{}) { func (aw *apiWatcher) replaceScrapeWorks(uw *urlWatcher, swosByKey map[string][]any) {
aw.swosByURLWatcherLock.Lock() aw.swosByURLWatcherLock.Lock()
aw.updateSwosCount(-1, aw.swosByURLWatcher[uw]) aw.updateSwosCount(-1, aw.swosByURLWatcher[uw])
aw.updateSwosCount(1, swosByKey) aw.updateSwosCount(1, swosByKey)
@ -133,11 +133,11 @@ func (aw *apiWatcher) replaceScrapeWorks(uw *urlWatcher, swosByKey map[string][]
aw.swosByURLWatcherLock.Unlock() aw.swosByURLWatcherLock.Unlock()
} }
func (aw *apiWatcher) updateScrapeWorks(uw *urlWatcher, swosByKey map[string][]interface{}) { func (aw *apiWatcher) updateScrapeWorks(uw *urlWatcher, swosByKey map[string][]any) {
aw.swosByURLWatcherLock.Lock() aw.swosByURLWatcherLock.Lock()
dst := aw.swosByURLWatcher[uw] dst := aw.swosByURLWatcher[uw]
if dst == nil { if dst == nil {
dst = make(map[string][]interface{}) dst = make(map[string][]any)
aw.swosByURLWatcher[uw] = dst aw.swosByURLWatcher[uw] = dst
} }
for key, swos := range swosByKey { for key, swos := range swosByKey {
@ -156,7 +156,7 @@ func (aw *apiWatcher) setScrapeWorks(uw *urlWatcher, key string, labelss []*prom
aw.swosByURLWatcherLock.Lock() aw.swosByURLWatcherLock.Lock()
swosByKey := aw.swosByURLWatcher[uw] swosByKey := aw.swosByURLWatcher[uw]
if swosByKey == nil { if swosByKey == nil {
swosByKey = make(map[string][]interface{}) swosByKey = make(map[string][]any)
aw.swosByURLWatcher[uw] = swosByKey aw.swosByURLWatcher[uw] = swosByKey
} }
aw.swosCount.Add(len(swos) - len(swosByKey[key])) aw.swosCount.Add(len(swos) - len(swosByKey[key]))
@ -178,9 +178,9 @@ func (aw *apiWatcher) removeScrapeWorks(uw *urlWatcher, key string) {
aw.swosByURLWatcherLock.Unlock() aw.swosByURLWatcherLock.Unlock()
} }
func getScrapeWorkObjectsForLabels(swcFunc ScrapeWorkConstructorFunc, labelss []*promutils.Labels) []interface{} { func getScrapeWorkObjectsForLabels(swcFunc ScrapeWorkConstructorFunc, labelss []*promutils.Labels) []any {
// Do not pre-allocate swos, since it is likely the swos will be empty because of relabeling // Do not pre-allocate swos, since it is likely the swos will be empty because of relabeling
var swos []interface{} var swos []any
for _, labels := range labelss { for _, labels := range labelss {
swo := swcFunc(labels) swo := swcFunc(labels)
// The reflect check is needed because of https://mangatmodi.medium.com/go-check-nil-interface-the-right-way-d142776edef1 // The reflect check is needed because of https://mangatmodi.medium.com/go-check-nil-interface-the-right-way-d142776edef1
@ -192,10 +192,10 @@ func getScrapeWorkObjectsForLabels(swcFunc ScrapeWorkConstructorFunc, labelss []
} }
// getScrapeWorkObjects returns all the ScrapeWork objects for the given aw. // getScrapeWorkObjects returns all the ScrapeWork objects for the given aw.
func (aw *apiWatcher) getScrapeWorkObjects() []interface{} { func (aw *apiWatcher) getScrapeWorkObjects() []any {
aw.gw.registerPendingAPIWatchers() aw.gw.registerPendingAPIWatchers()
swos := make([]interface{}, 0, aw.swosCount.Get()) swos := make([]any, 0, aw.swosCount.Get())
aw.swosByURLWatcherLock.Lock() aw.swosByURLWatcherLock.Lock()
for _, swosByKey := range aw.swosByURLWatcher { for _, swosByKey := range aw.swosByURLWatcher {
for _, swosLocal := range swosByKey { for _, swosLocal := range swosByKey {
@ -352,7 +352,7 @@ func groupWatchersCleaner() {
type swosByKeyWithLock struct { type swosByKeyWithLock struct {
mu sync.Mutex mu sync.Mutex
swosByKey map[string][]interface{} swosByKey map[string][]any
} }
func (gw *groupWatcher) getScrapeWorkObjectsByAPIWatcherLocked(objectsByKey map[string]object, awsMap map[*apiWatcher]struct{}) map[*apiWatcher]*swosByKeyWithLock { func (gw *groupWatcher) getScrapeWorkObjectsByAPIWatcherLocked(objectsByKey map[string]object, awsMap map[*apiWatcher]struct{}) map[*apiWatcher]*swosByKeyWithLock {
@ -362,7 +362,7 @@ func (gw *groupWatcher) getScrapeWorkObjectsByAPIWatcherLocked(objectsByKey map[
swosByAPIWatcher := make(map[*apiWatcher]*swosByKeyWithLock, len(awsMap)) swosByAPIWatcher := make(map[*apiWatcher]*swosByKeyWithLock, len(awsMap))
for aw := range awsMap { for aw := range awsMap {
swosByAPIWatcher[aw] = &swosByKeyWithLock{ swosByAPIWatcher[aw] = &swosByKeyWithLock{
swosByKey: make(map[string][]interface{}), swosByKey: make(map[string][]any),
} }
} }

View file

@ -921,8 +921,8 @@ func TestGetScrapeWorkObjects(t *testing.T) {
} }
testAPIServer := httptest.NewServer(mux) testAPIServer := httptest.NewServer(mux)
tc.sdc.APIServer = testAPIServer.URL tc.sdc.APIServer = testAPIServer.URL
ac, err := newAPIConfig(tc.sdc, "", func(metaLabels *promutils.Labels) interface{} { ac, err := newAPIConfig(tc.sdc, "", func(metaLabels *promutils.Labels) any {
var res []interface{} var res []any
for _, label := range metaLabels.Labels { for _, label := range metaLabels.Labels {
res = append(res, label.Name) res = append(res, label.Name)
} }

View file

@ -70,12 +70,12 @@ type Selector struct {
} }
// ScrapeWorkConstructorFunc must construct ScrapeWork object for the given metaLabels. // ScrapeWorkConstructorFunc must construct ScrapeWork object for the given metaLabels.
type ScrapeWorkConstructorFunc func(metaLabels *promutils.Labels) interface{} type ScrapeWorkConstructorFunc func(metaLabels *promutils.Labels) any
// GetScrapeWorkObjects returns ScrapeWork objects for the given sdc. // GetScrapeWorkObjects returns ScrapeWork objects for the given sdc.
// //
// This function must be called after MustStart call. // This function must be called after MustStart call.
func (sdc *SDConfig) GetScrapeWorkObjects() ([]interface{}, error) { func (sdc *SDConfig) GetScrapeWorkObjects() ([]any, error) {
if sdc.cfg == nil { if sdc.cfg == nil {
return nil, sdc.startErr return nil, sdc.startErr
} }

View file

@ -42,7 +42,7 @@ type apiConfig struct {
} }
func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) }) v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc, baseDir) })
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -30,7 +30,7 @@ func (ac *apiConfig) mustStop() {
var configMap = discoveryutils.NewConfigMap() var configMap = discoveryutils.NewConfigMap()
func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) }) v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc, baseDir) })
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -61,7 +61,7 @@ func (cfg *apiConfig) getFreshAPICredentials() (*apiCredentials, error) {
} }
func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) }) v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc, baseDir) })
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -90,8 +90,8 @@ func buildAuthRequestBody(sdc *SDConfig) ([]byte, error) {
ApplicationCredential *applicationCredentialReq `json:"application_credential,omitempty"` ApplicationCredential *applicationCredentialReq `json:"application_credential,omitempty"`
} }
type authReq struct { type authReq struct {
Identity identityReq `json:"identity"` Identity identityReq `json:"identity"`
Scope map[string]interface{} `json:"scope,omitempty"` Scope map[string]any `json:"scope,omitempty"`
} }
type request struct { type request struct {
Auth authReq `json:"auth"` Auth authReq `json:"auth"`
@ -233,7 +233,7 @@ func buildAuthRequestBody(sdc *SDConfig) ([]byte, error) {
// buildScope adds scope information into auth request // buildScope adds scope information into auth request
// //
// See https://docs.openstack.org/api-ref/identity/v3/#password-authentication-with-unscoped-authorization // See https://docs.openstack.org/api-ref/identity/v3/#password-authentication-with-unscoped-authorization
func buildScope(sdc *SDConfig) (map[string]interface{}, error) { func buildScope(sdc *SDConfig) (map[string]any, error) {
if len(sdc.ProjectName) == 0 && len(sdc.ProjectID) == 0 && len(sdc.DomainID) == 0 && len(sdc.DomainName) == 0 { if len(sdc.ProjectName) == 0 && len(sdc.ProjectID) == 0 && len(sdc.DomainID) == 0 && len(sdc.DomainName) == 0 {
return nil, nil return nil, nil
} }
@ -244,24 +244,24 @@ func buildScope(sdc *SDConfig) (map[string]interface{}, error) {
return nil, fmt.Errorf("domain_id or domain_name must present") return nil, fmt.Errorf("domain_id or domain_name must present")
} }
if len(sdc.DomainID) > 0 { if len(sdc.DomainID) > 0 {
return map[string]interface{}{ return map[string]any{
"project": map[string]interface{}{ "project": map[string]any{
"name": &sdc.ProjectName, "name": &sdc.ProjectName,
"domain": map[string]interface{}{"id": &sdc.DomainID}, "domain": map[string]any{"id": &sdc.DomainID},
}, },
}, nil }, nil
} }
if len(sdc.DomainName) > 0 { if len(sdc.DomainName) > 0 {
return map[string]interface{}{ return map[string]any{
"project": map[string]interface{}{ "project": map[string]any{
"name": &sdc.ProjectName, "name": &sdc.ProjectName,
"domain": map[string]interface{}{"name": &sdc.DomainName}, "domain": map[string]any{"name": &sdc.DomainName},
}, },
}, nil }, nil
} }
} else if len(sdc.ProjectID) > 0 { } else if len(sdc.ProjectID) > 0 {
return map[string]interface{}{ return map[string]any{
"project": map[string]interface{}{ "project": map[string]any{
"id": &sdc.ProjectID, "id": &sdc.ProjectID,
}, },
}, nil }, nil
@ -269,14 +269,14 @@ func buildScope(sdc *SDConfig) (map[string]interface{}, error) {
if len(sdc.DomainName) > 0 { if len(sdc.DomainName) > 0 {
return nil, fmt.Errorf("both domain_id and domain_name present") return nil, fmt.Errorf("both domain_id and domain_name present")
} }
return map[string]interface{}{ return map[string]any{
"domain": map[string]interface{}{ "domain": map[string]any{
"id": &sdc.DomainID, "id": &sdc.DomainID,
}, },
}, nil }, nil
} else if len(sdc.DomainName) > 0 { } else if len(sdc.DomainName) > 0 {
return map[string]interface{}{ return map[string]any{
"domain": map[string]interface{}{ "domain": map[string]any{
"name": &sdc.DomainName, "name": &sdc.DomainName,
}, },
}, nil }, nil

View file

@ -17,7 +17,7 @@ type apiConfig struct {
// getAPIConfig get or create API config from configMap. // getAPIConfig get or create API config from configMap.
func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) }) v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc, baseDir) })
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -44,7 +44,7 @@ type apiConfig struct {
} }
func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) }) v, err := configMap.Get(sdc, func() (any, error) { return newAPIConfig(sdc, baseDir) })
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -11,7 +11,7 @@ import (
// It automatically removes old configs which weren't accessed recently. // It automatically removes old configs which weren't accessed recently.
type ConfigMap struct { type ConfigMap struct {
mu sync.Mutex mu sync.Mutex
m map[interface{}]interface{} m map[any]any
entriesCount *metrics.Counter entriesCount *metrics.Counter
} }
@ -19,7 +19,7 @@ type ConfigMap struct {
// NewConfigMap creates ConfigMap // NewConfigMap creates ConfigMap
func NewConfigMap() *ConfigMap { func NewConfigMap() *ConfigMap {
return &ConfigMap{ return &ConfigMap{
m: make(map[interface{}]interface{}), m: make(map[any]any),
entriesCount: metrics.GetOrCreateCounter(`vm_promscrape_discoveryutils_configmap_entries_count`), entriesCount: metrics.GetOrCreateCounter(`vm_promscrape_discoveryutils_configmap_entries_count`),
} }
} }
@ -29,7 +29,7 @@ func NewConfigMap() *ConfigMap {
// Key must be a pointer. // Key must be a pointer.
// //
// It creates new config map with newConfig() call if cm doesn't contain config under the given key. // It creates new config map with newConfig() call if cm doesn't contain config under the given key.
func (cm *ConfigMap) Get(key interface{}, newConfig func() (interface{}, error)) (interface{}, error) { func (cm *ConfigMap) Get(key any, newConfig func() (any, error)) (any, error) {
cm.mu.Lock() cm.mu.Lock()
defer cm.mu.Unlock() defer cm.mu.Unlock()
@ -47,7 +47,7 @@ func (cm *ConfigMap) Get(key interface{}, newConfig func() (interface{}, error))
} }
// Delete deletes config for the given key from cm and returns it. // Delete deletes config for the given key from cm and returns it.
func (cm *ConfigMap) Delete(key interface{}) interface{} { func (cm *ConfigMap) Delete(key any) any {
cm.mu.Lock() cm.mu.Lock()
defer cm.mu.Unlock() defer cm.mu.Unlock()

View file

@ -414,7 +414,7 @@ func labelsHash(labels *promutils.Labels) uint64 {
} }
var xxhashPool = &sync.Pool{ var xxhashPool = &sync.Pool{
New: func() interface{} { New: func() any {
return xxhash.New() return xxhash.New()
}, },
} }

View file

@ -19,12 +19,12 @@ func NewDuration(d time.Duration) *Duration {
} }
// MarshalYAML implements yaml.Marshaler interface. // MarshalYAML implements yaml.Marshaler interface.
func (pd Duration) MarshalYAML() (interface{}, error) { func (pd Duration) MarshalYAML() (any, error) {
return pd.D.String(), nil return pd.D.String(), nil
} }
// UnmarshalYAML implements yaml.Unmarshaler interface. // UnmarshalYAML implements yaml.Unmarshaler interface.
func (pd *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { func (pd *Duration) UnmarshalYAML(unmarshal func(any) error) error {
var s string var s string
if err := unmarshal(&s); err != nil { if err := unmarshal(&s); err != nil {
return err return err

View file

@ -28,7 +28,7 @@ func TestDuration(t *testing.T) {
if s := v.(string); s != sExpected { if s := v.(string); s != sExpected {
t.Fatalf("unexpected value from MarshalYAML(); got %q; want %q", s, sExpected) t.Fatalf("unexpected value from MarshalYAML(); got %q; want %q", s, sExpected)
} }
if err := d.UnmarshalYAML(func(v interface{}) error { if err := d.UnmarshalYAML(func(v any) error {
sp := v.(*string) sp := v.(*string)
s := "1w3d5h" s := "1w3d5h"
*sp = s *sp = s

View file

@ -34,13 +34,13 @@ func NewLabelsFromMap(m map[string]string) *Labels {
} }
// MarshalYAML implements yaml.Marshaler interface. // MarshalYAML implements yaml.Marshaler interface.
func (x *Labels) MarshalYAML() (interface{}, error) { func (x *Labels) MarshalYAML() (any, error) {
m := x.ToMap() m := x.ToMap()
return m, nil return m, nil
} }
// UnmarshalYAML implements yaml.Unmarshaler interface. // UnmarshalYAML implements yaml.Unmarshaler interface.
func (x *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error { func (x *Labels) UnmarshalYAML(unmarshal func(any) error) error {
var m map[string]string var m map[string]string
if err := unmarshal(&m); err != nil { if err := unmarshal(&m); err != nil {
return err return err

View file

@ -197,7 +197,7 @@ func (uw *unmarshalWork) unmarshal() error {
} }
var blockPool = &sync.Pool{ var blockPool = &sync.Pool{
New: func() interface{} { New: func() any {
return &storage.Block{} return &storage.Block{}
}, },
} }

View file

@ -98,7 +98,7 @@ func (u *URL) getAuthHeader(ac *promauth.Config) (string, error) {
} }
// MarshalYAML implements yaml.Marshaler interface. // MarshalYAML implements yaml.Marshaler interface.
func (u *URL) MarshalYAML() (interface{}, error) { func (u *URL) MarshalYAML() (any, error) {
if u.URL == nil { if u.URL == nil {
return nil, nil return nil, nil
} }
@ -106,7 +106,7 @@ func (u *URL) MarshalYAML() (interface{}, error) {
} }
// UnmarshalYAML implements yaml.Unmarshaler interface. // UnmarshalYAML implements yaml.Unmarshaler interface.
func (u *URL) UnmarshalYAML(unmarshal func(interface{}) error) error { func (u *URL) UnmarshalYAML(unmarshal func(any) error) error {
var s string var s string
if err := unmarshal(&s); err != nil { if err := unmarshal(&s); err != nil {
return err return err

View file

@ -49,7 +49,7 @@ type Tracer struct {
// If enabled isn't set, then all function calls to the returned object will be no-op. // If enabled isn't set, then all function calls to the returned object will be no-op.
// //
// Done or Donef must be called when the tracer should be finished. // Done or Donef must be called when the tracer should be finished.
func New(enabled bool, format string, args ...interface{}) *Tracer { func New(enabled bool, format string, args ...any) *Tracer {
if *denyQueryTracing || !enabled { if *denyQueryTracing || !enabled {
return nil return nil
} }
@ -73,7 +73,7 @@ func (t *Tracer) Enabled() bool {
// NewChild cannot be called from concurrent goroutines. // NewChild cannot be called from concurrent goroutines.
// Create children tracers from a single goroutine and then pass them // Create children tracers from a single goroutine and then pass them
// to concurrent goroutines. // to concurrent goroutines.
func (t *Tracer) NewChild(format string, args ...interface{}) *Tracer { func (t *Tracer) NewChild(format string, args ...any) *Tracer {
if t == nil { if t == nil {
return nil return nil
} }
@ -107,7 +107,7 @@ func (t *Tracer) Done() {
// //
// Donef cannot be called multiple times. // Donef cannot be called multiple times.
// Other Tracer functions cannot be called after Donef call. // Other Tracer functions cannot be called after Donef call.
func (t *Tracer) Donef(format string, args ...interface{}) { func (t *Tracer) Donef(format string, args ...any) {
if t == nil { if t == nil {
return return
} }
@ -122,7 +122,7 @@ func (t *Tracer) Donef(format string, args ...interface{}) {
// Printf adds new fmt.Sprintf(format, args...) message to t. // Printf adds new fmt.Sprintf(format, args...) message to t.
// //
// Printf cannot be called from concurrent goroutines. // Printf cannot be called from concurrent goroutines.
func (t *Tracer) Printf(format string, args ...interface{}) { func (t *Tracer) Printf(format string, args ...any) {
if t == nil { if t == nil {
return return
} }

View file

@ -143,11 +143,11 @@ func (bsrh *blockStreamReaderHeap) Swap(i, j int) {
x[i], x[j] = x[j], x[i] x[i], x[j] = x[j], x[i]
} }
func (bsrh *blockStreamReaderHeap) Push(x interface{}) { func (bsrh *blockStreamReaderHeap) Push(x any) {
*bsrh = append(*bsrh, x.(*blockStreamReader)) *bsrh = append(*bsrh, x.(*blockStreamReader))
} }
func (bsrh *blockStreamReaderHeap) Pop() interface{} { func (bsrh *blockStreamReaderHeap) Pop() any {
a := *bsrh a := *bsrh
v := a[len(a)-1] v := a[len(a)-1]
*bsrh = a[:len(a)-1] *bsrh = a[:len(a)-1]

View file

@ -1611,11 +1611,11 @@ func (th *topHeap) Swap(i, j int) {
a[j], a[i] = a[i], a[j] a[j], a[i] = a[i], a[j]
} }
func (th *topHeap) Push(_ interface{}) { func (th *topHeap) Push(_ any) {
panic(fmt.Errorf("BUG: Push shouldn't be called")) panic(fmt.Errorf("BUG: Push shouldn't be called"))
} }
func (th *topHeap) Pop() interface{} { func (th *topHeap) Pop() any {
panic(fmt.Errorf("BUG: Pop shouldn't be called")) panic(fmt.Errorf("BUG: Pop shouldn't be called"))
} }

View file

@ -31,7 +31,7 @@ func mergeBlockStreams(ph *partHeader, bsw *blockStreamWriter, bsrs []*blockStre
} }
var bsmPool = &sync.Pool{ var bsmPool = &sync.Pool{
New: func() interface{} { New: func() any {
return &blockStreamMerger{} return &blockStreamMerger{}
}, },
} }

View file

@ -190,11 +190,11 @@ func (psh *partSearchHeap) Swap(i, j int) {
x[i], x[j] = x[j], x[i] x[i], x[j] = x[j], x[i]
} }
func (psh *partSearchHeap) Push(x interface{}) { func (psh *partSearchHeap) Push(x any) {
*psh = append(*psh, x.(*partSearch)) *psh = append(*psh, x.(*partSearch))
} }
func (psh *partSearchHeap) Pop() interface{} { func (psh *partSearchHeap) Pop() any {
a := *psh a := *psh
v := a[len(a)-1] v := a[len(a)-1]
*psh = a[:len(a)-1] *psh = a[:len(a)-1]

View file

@ -194,11 +194,11 @@ func (ptsh *partitionSearchHeap) Swap(i, j int) {
x[i], x[j] = x[j], x[i] x[i], x[j] = x[j], x[i]
} }
func (ptsh *partitionSearchHeap) Push(x interface{}) { func (ptsh *partitionSearchHeap) Push(x any) {
*ptsh = append(*ptsh, x.(*partitionSearch)) *ptsh = append(*ptsh, x.(*partitionSearch))
} }
func (ptsh *partitionSearchHeap) Pop() interface{} { func (ptsh *partitionSearchHeap) Pop() any {
a := *ptsh a := *ptsh
v := a[len(a)-1] v := a[len(a)-1]
*ptsh = a[:len(a)-1] *ptsh = a[:len(a)-1]

View file

@ -64,7 +64,7 @@ func (as *avgAggrState) pushSamples(samples []pushSample) {
func (as *avgAggrState) flushState(ctx *flushCtx, resetState bool) { func (as *avgAggrState) flushState(ctx *flushCtx, resetState bool) {
currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000 currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000
m := &as.m m := &as.m
m.Range(func(k, v interface{}) bool { m.Range(func(k, v any) bool {
if resetState { if resetState {
// Atomically delete the entry from the map, so new entry is created for the next flush. // Atomically delete the entry from the map, so new entry is created for the next flush.
m.Delete(k) m.Delete(k)

View file

@ -61,7 +61,7 @@ func (as *countSamplesAggrState) pushSamples(samples []pushSample) {
func (as *countSamplesAggrState) flushState(ctx *flushCtx, resetState bool) { func (as *countSamplesAggrState) flushState(ctx *flushCtx, resetState bool) {
currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000 currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000
m := &as.m m := &as.m
m.Range(func(k, v interface{}) bool { m.Range(func(k, v any) bool {
if resetState { if resetState {
// Atomically delete the entry from the map, so new entry is created for the next flush. // Atomically delete the entry from the map, so new entry is created for the next flush.
m.Delete(k) m.Delete(k)

View file

@ -70,7 +70,7 @@ func (as *countSeriesAggrState) pushSamples(samples []pushSample) {
func (as *countSeriesAggrState) flushState(ctx *flushCtx, resetState bool) { func (as *countSeriesAggrState) flushState(ctx *flushCtx, resetState bool) {
currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000 currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000
m := &as.m m := &as.m
m.Range(func(k, v interface{}) bool { m.Range(func(k, v any) bool {
if resetState { if resetState {
// Atomically delete the entry from the map, so new entry is created for the next flush. // Atomically delete the entry from the map, so new entry is created for the next flush.
m.Delete(k) m.Delete(k)

View file

@ -69,7 +69,7 @@ func (as *histogramBucketAggrState) pushSamples(samples []pushSample) {
func (as *histogramBucketAggrState) removeOldEntries(ctx *flushCtx, currentTime uint64) { func (as *histogramBucketAggrState) removeOldEntries(ctx *flushCtx, currentTime uint64) {
m := &as.m m := &as.m
var staleOutputSamples int var staleOutputSamples int
m.Range(func(k, v interface{}) bool { m.Range(func(k, v any) bool {
sv := v.(*histogramBucketStateValue) sv := v.(*histogramBucketStateValue)
sv.mu.Lock() sv.mu.Lock()
@ -96,7 +96,7 @@ func (as *histogramBucketAggrState) flushState(ctx *flushCtx, _ bool) {
as.removeOldEntries(ctx, currentTime) as.removeOldEntries(ctx, currentTime)
m := &as.m m := &as.m
m.Range(func(k, v interface{}) bool { m.Range(func(k, v any) bool {
sv := v.(*histogramBucketStateValue) sv := v.(*histogramBucketStateValue)
sv.mu.Lock() sv.mu.Lock()
if !sv.deleted { if !sv.deleted {

View file

@ -66,7 +66,7 @@ func (as *lastAggrState) pushSamples(samples []pushSample) {
func (as *lastAggrState) flushState(ctx *flushCtx, resetState bool) { func (as *lastAggrState) flushState(ctx *flushCtx, resetState bool) {
currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000 currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000
m := &as.m m := &as.m
m.Range(func(k, v interface{}) bool { m.Range(func(k, v any) bool {
if resetState { if resetState {
// Atomically delete the entry from the map, so new entry is created for the next flush. // Atomically delete the entry from the map, so new entry is created for the next flush.
m.Delete(k) m.Delete(k)

View file

@ -63,7 +63,7 @@ func (as *maxAggrState) pushSamples(samples []pushSample) {
func (as *maxAggrState) flushState(ctx *flushCtx, resetState bool) { func (as *maxAggrState) flushState(ctx *flushCtx, resetState bool) {
currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000 currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000
m := &as.m m := &as.m
m.Range(func(k, v interface{}) bool { m.Range(func(k, v any) bool {
if resetState { if resetState {
// Atomically delete the entry from the map, so new entry is created for the next flush. // Atomically delete the entry from the map, so new entry is created for the next flush.
m.Delete(k) m.Delete(k)

View file

@ -63,7 +63,7 @@ func (as *minAggrState) pushSamples(samples []pushSample) {
func (as *minAggrState) flushState(ctx *flushCtx, resetState bool) { func (as *minAggrState) flushState(ctx *flushCtx, resetState bool) {
currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000 currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000
m := &as.m m := &as.m
m.Range(func(k, v interface{}) bool { m.Range(func(k, v any) bool {
if resetState { if resetState {
// Atomically delete the entry from the map, so new entry is created for the next flush. // Atomically delete the entry from the map, so new entry is created for the next flush.
m.Delete(k) m.Delete(k)

View file

@ -70,7 +70,7 @@ func (as *quantilesAggrState) flushState(ctx *flushCtx, resetState bool) {
phis := as.phis phis := as.phis
var quantiles []float64 var quantiles []float64
var b []byte var b []byte
m.Range(func(k, v interface{}) bool { m.Range(func(k, v any) bool {
if resetState { if resetState {
// Atomically delete the entry from the map, so new entry is created for the next flush. // Atomically delete the entry from the map, so new entry is created for the next flush.
m.Delete(k) m.Delete(k)

View file

@ -111,7 +111,7 @@ func (as *rateAggrState) flushState(ctx *flushCtx, _ bool) {
var staleOutputSamples, staleInputSamples int var staleOutputSamples, staleInputSamples int
m := &as.m m := &as.m
m.Range(func(k, v interface{}) bool { m.Range(func(k, v any) bool {
sv := v.(*rateStateValue) sv := v.(*rateStateValue)
sv.mu.Lock() sv.mu.Lock()

View file

@ -64,7 +64,7 @@ func (as *stddevAggrState) pushSamples(samples []pushSample) {
func (as *stddevAggrState) flushState(ctx *flushCtx, resetState bool) { func (as *stddevAggrState) flushState(ctx *flushCtx, resetState bool) {
currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000 currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000
m := &as.m m := &as.m
m.Range(func(k, v interface{}) bool { m.Range(func(k, v any) bool {
if resetState { if resetState {
// Atomically delete the entry from the map, so new entry is created for the next flush. // Atomically delete the entry from the map, so new entry is created for the next flush.
m.Delete(k) m.Delete(k)

View file

@ -63,7 +63,7 @@ func (as *stdvarAggrState) pushSamples(samples []pushSample) {
func (as *stdvarAggrState) flushState(ctx *flushCtx, resetState bool) { func (as *stdvarAggrState) flushState(ctx *flushCtx, resetState bool) {
currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000 currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000
m := &as.m m := &as.m
m.Range(func(k, v interface{}) bool { m.Range(func(k, v any) bool {
if resetState { if resetState {
// Atomically delete the entry from the map, so new entry is created for the next flush. // Atomically delete the entry from the map, so new entry is created for the next flush.
m.Delete(k) m.Delete(k)

View file

@ -61,7 +61,7 @@ func (as *sumSamplesAggrState) pushSamples(samples []pushSample) {
func (as *sumSamplesAggrState) flushState(ctx *flushCtx, resetState bool) { func (as *sumSamplesAggrState) flushState(ctx *flushCtx, resetState bool) {
currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000 currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000
m := &as.m m := &as.m
m.Range(func(k, v interface{}) bool { m.Range(func(k, v any) bool {
if resetState { if resetState {
// Atomically delete the entry from the map, so new entry is created for the next flush. // Atomically delete the entry from the map, so new entry is created for the next flush.
m.Delete(k) m.Delete(k)

View file

@ -127,7 +127,7 @@ func (as *totalAggrState) pushSamples(samples []pushSample) {
func (as *totalAggrState) removeOldEntries(ctx *flushCtx, currentTime uint64) { func (as *totalAggrState) removeOldEntries(ctx *flushCtx, currentTime uint64) {
m := &as.m m := &as.m
var staleInputSamples, staleOutputSamples int var staleInputSamples, staleOutputSamples int
m.Range(func(k, v interface{}) bool { m.Range(func(k, v any) bool {
sv := v.(*totalStateValue) sv := v.(*totalStateValue)
sv.mu.Lock() sv.mu.Lock()
@ -164,7 +164,7 @@ func (as *totalAggrState) flushState(ctx *flushCtx, resetState bool) {
as.removeOldEntries(ctx, currentTime) as.removeOldEntries(ctx, currentTime)
m := &as.m m := &as.m
m.Range(func(k, v interface{}) bool { m.Range(func(k, v any) bool {
sv := v.(*totalStateValue) sv := v.(*totalStateValue)
sv.mu.Lock() sv.mu.Lock()
total := sv.total total := sv.total

View file

@ -65,7 +65,7 @@ func (as *uniqueSamplesAggrState) pushSamples(samples []pushSample) {
func (as *uniqueSamplesAggrState) flushState(ctx *flushCtx, resetState bool) { func (as *uniqueSamplesAggrState) flushState(ctx *flushCtx, resetState bool) {
currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000 currentTimeMsec := int64(fasttime.UnixTimestamp()) * 1000
m := &as.m m := &as.m
m.Range(func(k, v interface{}) bool { m.Range(func(k, v any) bool {
if resetState { if resetState {
// Atomically delete the entry from the map, so new entry is created for the next flush. // Atomically delete the entry from the map, so new entry is created for the next flush.
m.Delete(k) m.Delete(k)

View file

@ -535,7 +535,7 @@ func (b *bucket32) forEach(f func(part []uint64) bool) bool {
} }
var partBufPool = &sync.Pool{ var partBufPool = &sync.Pool{
New: func() interface{} { New: func() any {
buf := make([]uint64, 0, bitsPerBucket) buf := make([]uint64, 0, bitsPerBucket)
return &buf return &buf
}, },
@ -948,7 +948,7 @@ func (b *bucket16) appendTo(dst []uint64, hi uint32, hi16 uint16) []uint64 {
} }
var smallPoolSorterPool = &sync.Pool{ var smallPoolSorterPool = &sync.Pool{
New: func() interface{} { New: func() any {
return &smallPoolSorter{} return &smallPoolSorter{}
}, },
} }