diff --git a/app/vmagent/remotewrite/client.go b/app/vmagent/remotewrite/client.go index ed126238c..76a58656e 100644 --- a/app/vmagent/remotewrite/client.go +++ b/app/vmagent/remotewrite/client.go @@ -162,7 +162,7 @@ func getTLSConfig(argIdx int) (*tls.Config, error) { } cfg, err := promauth.NewConfig(".", nil, "", "", tlsConfig) if err != nil { - return nil, fmt.Errorf("cannot populate TLS config: %s", err) + return nil, fmt.Errorf("cannot populate TLS config: %w", err) } tlsCfg := cfg.NewTLSConfig() return tlsCfg, nil diff --git a/app/vmagent/remotewrite/relabel.go b/app/vmagent/remotewrite/relabel.go index 1e18f8e1a..7b739a71c 100644 --- a/app/vmagent/remotewrite/relabel.go +++ b/app/vmagent/remotewrite/relabel.go @@ -33,7 +33,7 @@ func loadRelabelConfigs() (*relabelConfigs, error) { if *relabelConfigPathGlobal != "" { global, err := promrelabel.LoadRelabelConfigs(*relabelConfigPathGlobal) if err != nil { - return nil, fmt.Errorf("cannot load -remoteWrite.relabelConfig=%q: %s", *relabelConfigPathGlobal, err) + return nil, fmt.Errorf("cannot load -remoteWrite.relabelConfig=%q: %w", *relabelConfigPathGlobal, err) } rcs.global = global } @@ -45,7 +45,7 @@ func loadRelabelConfigs() (*relabelConfigs, error) { for i, path := range *relabelConfigPaths { prc, err := promrelabel.LoadRelabelConfigs(path) if err != nil { - return nil, fmt.Errorf("cannot load relabel configs from -remoteWrite.urlRelabelConfig=%q: %s", path, err) + return nil, fmt.Errorf("cannot load relabel configs from -remoteWrite.urlRelabelConfig=%q: %w", path, err) } rcs.perURL[i] = prc } diff --git a/app/vmalert/alerting.go b/app/vmalert/alerting.go index f8643c11d..42fa5e39c 100644 --- a/app/vmalert/alerting.go +++ b/app/vmalert/alerting.go @@ -72,7 +72,7 @@ func (ar *AlertingRule) Exec(ctx context.Context, q datasource.Querier, series b ar.lastExecError = err ar.lastExecTime = time.Now() if err != nil { - return nil, fmt.Errorf("failed to execute query %q: %s", ar.Expr, err) + return nil, fmt.Errorf("failed to execute query %q: %w", ar.Expr, err) } for h, a := range ar.alerts { @@ -103,7 +103,7 @@ func (ar *AlertingRule) Exec(ctx context.Context, q datasource.Querier, series b a, err := ar.newAlert(m, ar.lastExecTime) if err != nil { ar.lastExecError = err - return nil, fmt.Errorf("failed to create alert: %s", err) + return nil, fmt.Errorf("failed to create alert: %w", err) } a.ID = h a.State = notifier.StatePending @@ -363,7 +363,7 @@ func (ar *AlertingRule) Restore(ctx context.Context, q datasource.Querier, lookb a, err := ar.newAlert(m, time.Unix(int64(m.Value), 0)) if err != nil { - return fmt.Errorf("failed to create alert: %s", err) + return fmt.Errorf("failed to create alert: %w", err) } a.ID = hash(m) a.State = notifier.StatePending diff --git a/app/vmalert/config/config.go b/app/vmalert/config/config.go index 2d62e39a0..7adcd6289 100644 --- a/app/vmalert/config/config.go +++ b/app/vmalert/config/config.go @@ -46,19 +46,19 @@ func (g *Group) Validate(validateAnnotations, validateExpressions bool) error { } uniqueRules[r.ID] = struct{}{} if err := r.Validate(); err != nil { - return fmt.Errorf("invalid rule %q.%q: %s", g.Name, ruleName, err) + return fmt.Errorf("invalid rule %q.%q: %w", g.Name, ruleName, err) } if validateExpressions { if _, err := metricsql.Parse(r.Expr); err != nil { - return fmt.Errorf("invalid expression for rule %q.%q: %s", g.Name, ruleName, err) + return fmt.Errorf("invalid expression for rule %q.%q: %w", g.Name, ruleName, err) } } if validateAnnotations { if err := notifier.ValidateTemplates(r.Annotations); err != nil { - return fmt.Errorf("invalid annotations for rule %q.%q: %s", g.Name, ruleName, err) + return fmt.Errorf("invalid annotations for rule %q.%q: %w", g.Name, ruleName, err) } if err := notifier.ValidateTemplates(r.Labels); err != nil { - return fmt.Errorf("invalid labels for rule %q.%q: %s", g.Name, ruleName, err) + return fmt.Errorf("invalid labels for rule %q.%q: %w", g.Name, ruleName, err) } } } @@ -137,7 +137,7 @@ func Parse(pathPatterns []string, validateAnnotations, validateExpressions bool) for _, pattern := range pathPatterns { matches, err := filepath.Glob(pattern) if err != nil { - return nil, fmt.Errorf("error reading file pattern %s: %v", pattern, err) + return nil, fmt.Errorf("error reading file pattern %s: %w", pattern, err) } fp = append(fp, matches...) } @@ -150,7 +150,7 @@ func Parse(pathPatterns []string, validateAnnotations, validateExpressions bool) } for _, g := range gr { if err := g.Validate(validateAnnotations, validateExpressions); err != nil { - return nil, fmt.Errorf("invalid group %q in file %q: %s", g.Name, file, err) + return nil, fmt.Errorf("invalid group %q in file %q: %w", g.Name, file, err) } if _, ok := uniqueGroups[g.Name]; ok { return nil, fmt.Errorf("group name %q duplicate in file %q", g.Name, file) diff --git a/app/vmalert/datasource/init.go b/app/vmalert/datasource/init.go index 3e371f5c6..244d3c9c3 100644 --- a/app/vmalert/datasource/init.go +++ b/app/vmalert/datasource/init.go @@ -31,7 +31,7 @@ func Init() (Querier, error) { } tr, err := utils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify) if err != nil { - return nil, fmt.Errorf("failed to create transport: %s", err) + return nil, fmt.Errorf("failed to create transport: %w", err) } c := &http.Client{Transport: tr} return NewVMStorage(*addr, *basicAuthUsername, *basicAuthPassword, c), nil diff --git a/app/vmalert/datasource/vm.go b/app/vmalert/datasource/vm.go index 295cf6ac9..e622d159d 100644 --- a/app/vmalert/datasource/vm.go +++ b/app/vmalert/datasource/vm.go @@ -32,7 +32,7 @@ func (r response) metrics() ([]Metric, error) { for i, res := range r.Data.Result { f, err = strconv.ParseFloat(res.TV[1].(string), 64) if err != nil { - return nil, fmt.Errorf("metric %v, unable to parse float64 from %s: %s", res, res.TV[1], err) + return nil, fmt.Errorf("metric %v, unable to parse float64 from %s: %w", res, res.TV[1], err) } m.Labels = nil for k, v := range r.Data.Result[i].Labels { @@ -80,25 +80,25 @@ func (s *VMStorage) Query(ctx context.Context, query string) ([]Metric, error) { } resp, err := s.c.Do(req.WithContext(ctx)) if err != nil { - return nil, fmt.Errorf("error getting response from %s:%s", req.URL, err) + return nil, fmt.Errorf("error getting response from %s: %w", req.URL, err) } defer func() { _ = resp.Body.Close() }() if resp.StatusCode != http.StatusOK { body, _ := ioutil.ReadAll(resp.Body) - return nil, fmt.Errorf("datasource returns unxeprected response code %d for %s with err %s. Reponse body %s", resp.StatusCode, req.URL, err, body) + return nil, fmt.Errorf("datasource returns unxeprected response code %d for %s with err %w; reponse body: %s", resp.StatusCode, req.URL, err, body) } r := &response{} if err := json.NewDecoder(resp.Body).Decode(r); err != nil { - return nil, fmt.Errorf("error parsing metrics for %s:%s", req.URL, err) + return nil, fmt.Errorf("error parsing metrics for %s: %w", req.URL, err) } if r.Status == statusError { return nil, fmt.Errorf("response error, query: %s, errorType: %s, error: %s", req.URL, r.ErrorType, r.Error) } if r.Status != statusSuccess { - return nil, fmt.Errorf("unkown status:%s, Expected success or error ", r.Status) + return nil, fmt.Errorf("unknown status: %s, Expected success or error ", r.Status) } if r.Data.ResultType != rtVector { - return nil, fmt.Errorf("unkown restul type:%s. Expected vector", r.Data.ResultType) + return nil, fmt.Errorf("unknown restul type:%s. Expected vector", r.Data.ResultType) } return r.metrics() } diff --git a/app/vmalert/group.go b/app/vmalert/group.go index 2ae35c087..ec65ea508 100644 --- a/app/vmalert/group.go +++ b/app/vmalert/group.go @@ -84,7 +84,7 @@ func (g *Group) Restore(ctx context.Context, q datasource.Querier, lookback time continue } if err := rr.Restore(ctx, q, lookback); err != nil { - return fmt.Errorf("error while restoring rule %q: %s", rule, err) + return fmt.Errorf("error while restoring rule %q: %w", rule, err) } } return nil @@ -251,7 +251,7 @@ func (e *executor) exec(ctx context.Context, rule Rule, returnSeries bool, inter tss, err := rule.Exec(ctx, e.querier, returnSeries) if err != nil { execErrors.Inc() - return fmt.Errorf("rule %q: failed to execute: %s", rule, err) + return fmt.Errorf("rule %q: failed to execute: %w", rule, err) } if len(tss) > 0 && e.rw != nil { @@ -259,7 +259,7 @@ func (e *executor) exec(ctx context.Context, rule Rule, returnSeries bool, inter for _, ts := range tss { if err := e.rw.Push(ts); err != nil { remoteWriteErrors.Inc() - return fmt.Errorf("rule %q: remote write failure: %s", rule, err) + return fmt.Errorf("rule %q: remote write failure: %w", rule, err) } } } @@ -293,7 +293,7 @@ func (e *executor) exec(ctx context.Context, rule Rule, returnSeries bool, inter for _, nt := range e.notifiers { if err := nt.Send(ctx, alerts); err != nil { alertsSendErrors.Inc() - errGr.Add(fmt.Errorf("rule %q: failed to send alerts: %s", rule, err)) + errGr.Add(fmt.Errorf("rule %q: failed to send alerts: %w", rule, err)) } } return errGr.Err() diff --git a/app/vmalert/main.go b/app/vmalert/main.go index a917933cf..768cadbf1 100644 --- a/app/vmalert/main.go +++ b/app/vmalert/main.go @@ -105,20 +105,20 @@ var ( func newManager(ctx context.Context) (*manager, error) { q, err := datasource.Init() if err != nil { - return nil, fmt.Errorf("failed to init datasource: %s", err) + return nil, fmt.Errorf("failed to init datasource: %w", err) } eu, err := getExternalURL(*externalURL, *httpListenAddr, httpserver.IsTLS()) if err != nil { - return nil, fmt.Errorf("failed to init `external.url`: %s", err) + return nil, fmt.Errorf("failed to init `external.url`: %w", err) } notifier.InitTemplateFunc(eu) aug, err := getAlertURLGenerator(eu, *externalAlertSource, *validateTemplates) if err != nil { - return nil, fmt.Errorf("failed to init `external.alert.source`: %s", err) + return nil, fmt.Errorf("failed to init `external.alert.source`: %w", err) } nts, err := notifier.Init(aug) if err != nil { - return nil, fmt.Errorf("failed to init notifier: %s", err) + return nil, fmt.Errorf("failed to init notifier: %w", err) } manager := &manager{ @@ -128,13 +128,13 @@ func newManager(ctx context.Context) (*manager, error) { } rw, err := remotewrite.Init(ctx) if err != nil { - return nil, fmt.Errorf("failed to init remoteWrite: %s", err) + return nil, fmt.Errorf("failed to init remoteWrite: %w", err) } manager.rw = rw rr, err := remoteread.Init() if err != nil { - return nil, fmt.Errorf("failed to init remoteRead: %s", err) + return nil, fmt.Errorf("failed to init remoteRead: %w", err) } manager.rr = rr return manager, nil @@ -169,7 +169,7 @@ func getAlertURLGenerator(externalURL *url.URL, externalAlertSource string, vali if err := notifier.ValidateTemplates(map[string]string{ "tpl": externalAlertSource, }); err != nil { - return nil, fmt.Errorf("error validating source template %s:%w", externalAlertSource, err) + return nil, fmt.Errorf("error validating source template %s: %w", externalAlertSource, err) } } m := map[string]string{ diff --git a/app/vmalert/manager.go b/app/vmalert/manager.go index bce4026bf..98b505caa 100644 --- a/app/vmalert/manager.go +++ b/app/vmalert/manager.go @@ -83,7 +83,7 @@ func (m *manager) update(ctx context.Context, path []string, validateTpl, valida logger.Infof("reading rules configuration file from %q", strings.Join(path, ";")) groupsCfg, err := config.Parse(path, validateTpl, validateExpr) if err != nil { - return fmt.Errorf("cannot parse configuration file: %s", err) + return fmt.Errorf("cannot parse configuration file: %w", err) } groupsRegistry := make(map[uint64]*Group) diff --git a/app/vmalert/notifier/alert.go b/app/vmalert/notifier/alert.go index 67302ca17..04d4d0a71 100644 --- a/app/vmalert/notifier/alert.go +++ b/app/vmalert/notifier/alert.go @@ -89,7 +89,7 @@ func templateAnnotations(annotations map[string]string, header string, data aler builder.WriteString(header) builder.WriteString(text) if err := templateAnnotation(&buf, builder.String(), data); err != nil { - eg.Add(fmt.Errorf("key %q, template %q: %s", key, text, err)) + eg.Add(fmt.Errorf("key %q, template %q: %w", key, text, err)) continue } r[key] = buf.String() diff --git a/app/vmalert/notifier/alertmanager.go b/app/vmalert/notifier/alertmanager.go index 78fb453bc..f7dd03615 100644 --- a/app/vmalert/notifier/alertmanager.go +++ b/app/vmalert/notifier/alertmanager.go @@ -43,7 +43,7 @@ func (am *AlertManager) Send(ctx context.Context, alerts []Alert) error { if resp.StatusCode != http.StatusOK { body, err := ioutil.ReadAll(resp.Body) if err != nil { - return fmt.Errorf("failed to read response from %q: %s", am.alertURL, err) + return fmt.Errorf("failed to read response from %q: %w", am.alertURL, err) } return fmt.Errorf("invalid SC %d from %q; response body: %s", resp.StatusCode, am.alertURL, string(body)) } diff --git a/app/vmalert/notifier/init.go b/app/vmalert/notifier/init.go index b84c42c26..e1f100a51 100644 --- a/app/vmalert/notifier/init.go +++ b/app/vmalert/notifier/init.go @@ -36,7 +36,7 @@ func Init(gen AlertURLGenerator) ([]Notifier, error) { ca, serverName := tlsCAFile.GetOptionalArg(i), tlsServerName.GetOptionalArg(i) tr, err := utils.Transport(addr, cert, key, ca, serverName, *tlsInsecureSkipVerify) if err != nil { - return nil, fmt.Errorf("failed to create transport: %s", err) + return nil, fmt.Errorf("failed to create transport: %w", err) } user, pass := basicAuthUsername.GetOptionalArg(i), basicAuthPassword.GetOptionalArg(i) am := NewAlertManager(addr, user, pass, gen, &http.Client{Transport: tr}) diff --git a/app/vmalert/recording.go b/app/vmalert/recording.go index 6913e8a65..3aadd2a4e 100644 --- a/app/vmalert/recording.go +++ b/app/vmalert/recording.go @@ -71,7 +71,7 @@ func (rr *RecordingRule) Exec(ctx context.Context, q datasource.Querier, series rr.lastExecTime = time.Now() rr.lastExecError = err if err != nil { - return nil, fmt.Errorf("failed to execute query %q: %s", rr.Expr, err) + return nil, fmt.Errorf("failed to execute query %q: %w", rr.Expr, err) } duplicates := make(map[uint64]prompbmarshal.TimeSeries, len(qMetrics)) diff --git a/app/vmalert/remoteread/init.go b/app/vmalert/remoteread/init.go index 643332373..69067493d 100644 --- a/app/vmalert/remoteread/init.go +++ b/app/vmalert/remoteread/init.go @@ -32,7 +32,7 @@ func Init() (datasource.Querier, error) { } tr, err := utils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify) if err != nil { - return nil, fmt.Errorf("failed to create transport: %s", err) + return nil, fmt.Errorf("failed to create transport: %w", err) } c := &http.Client{Transport: tr} return datasource.NewVMStorage(*addr, *basicAuthUsername, *basicAuthPassword, c), nil diff --git a/app/vmalert/remotewrite/init.go b/app/vmalert/remotewrite/init.go index 4141c163e..573c87020 100644 --- a/app/vmalert/remotewrite/init.go +++ b/app/vmalert/remotewrite/init.go @@ -38,7 +38,7 @@ func Init(ctx context.Context) (*Client, error) { t, err := utils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify) if err != nil { - return nil, fmt.Errorf("failed to create transport: %s", err) + return nil, fmt.Errorf("failed to create transport: %w", err) } return NewClient(ctx, Config{ diff --git a/app/vmalert/utils/tls.go b/app/vmalert/utils/tls.go index bc7a0c807..4155f16c2 100644 --- a/app/vmalert/utils/tls.go +++ b/app/vmalert/utils/tls.go @@ -30,7 +30,7 @@ func TLSConfig(certFile, keyFile, CAFile, serverName string, insecureSkipVerify if certFile != "" { cert, err := tls.LoadX509KeyPair(certFile, keyFile) if err != nil { - return nil, fmt.Errorf("cannot load TLS certificate from `cert_file`=%q, `key_file`=%q: %s", certFile, keyFile, err) + return nil, fmt.Errorf("cannot load TLS certificate from `cert_file`=%q, `key_file`=%q: %w", certFile, keyFile, err) } certs = []tls.Certificate{cert} @@ -40,7 +40,7 @@ func TLSConfig(certFile, keyFile, CAFile, serverName string, insecureSkipVerify if CAFile != "" { pem, err := ioutil.ReadFile(CAFile) if err != nil { - return nil, fmt.Errorf("cannot read `ca_file` %q: %s", CAFile, err) + return nil, fmt.Errorf("cannot read `ca_file` %q: %w", CAFile, err) } rootCAs = x509.NewCertPool() diff --git a/app/vmalert/web.go b/app/vmalert/web.go index d271fd555..07d46beba 100644 --- a/app/vmalert/web.go +++ b/app/vmalert/web.go @@ -80,7 +80,7 @@ func (rh *requestHandler) listGroups() ([]byte, error) { b, err := json.Marshal(lr) if err != nil { return nil, &httpserver.ErrorWithStatusCode{ - Err: fmt.Errorf(`error encoding list of active alerts: %s`, err), + Err: fmt.Errorf(`error encoding list of active alerts: %w`, err), StatusCode: http.StatusInternalServerError, } } @@ -117,7 +117,7 @@ func (rh *requestHandler) listAlerts() ([]byte, error) { b, err := json.Marshal(lr) if err != nil { return nil, &httpserver.ErrorWithStatusCode{ - Err: fmt.Errorf(`error encoding list of active alerts: %s`, err), + Err: fmt.Errorf(`error encoding list of active alerts: %w`, err), StatusCode: http.StatusInternalServerError, } } @@ -138,11 +138,11 @@ func (rh *requestHandler) alert(path string) ([]byte, error) { groupID, err := uint64FromPath(parts[0]) if err != nil { - return nil, badRequest(fmt.Errorf(`cannot parse groupID: %s`, err)) + return nil, badRequest(fmt.Errorf(`cannot parse groupID: %w`, err)) } alertID, err := uint64FromPath(parts[1]) if err != nil { - return nil, badRequest(fmt.Errorf(`cannot parse alertID: %s`, err)) + return nil, badRequest(fmt.Errorf(`cannot parse alertID: %w`, err)) } resp, err := rh.m.AlertAPI(groupID, alertID) if err != nil { diff --git a/app/vmauth/auth_config.go b/app/vmauth/auth_config.go index 9fb1369de..02dc96b4b 100644 --- a/app/vmauth/auth_config.go +++ b/app/vmauth/auth_config.go @@ -82,11 +82,11 @@ var stopCh chan struct{} func readAuthConfig(path string) (map[string]*UserInfo, error) { data, err := ioutil.ReadFile(path) if err != nil { - return nil, fmt.Errorf("cannot read %q: %s", path, err) + return nil, fmt.Errorf("cannot read %q: %w", path, err) } m, err := parseAuthConfig(data) if err != nil { - return nil, fmt.Errorf("cannot parse %q: %s", path, err) + return nil, fmt.Errorf("cannot parse %q: %w", path, err) } logger.Infof("Loaded information about %d users from %q", len(m), path) return m, nil @@ -95,7 +95,7 @@ func readAuthConfig(path string) (map[string]*UserInfo, error) { func parseAuthConfig(data []byte) (map[string]*UserInfo, error) { var ac AuthConfig if err := yaml.UnmarshalStrict(data, &ac); err != nil { - return nil, fmt.Errorf("cannot unmarshal AuthConfig data: %s", err) + return nil, fmt.Errorf("cannot unmarshal AuthConfig data: %w", err) } uis := ac.Users if len(uis) == 0 { @@ -115,7 +115,7 @@ func parseAuthConfig(data []byte) (map[string]*UserInfo, error) { // Validate urlPrefix target, err := url.Parse(urlPrefix) if err != nil { - return nil, fmt.Errorf("invalid `url_prefix: %q`: %s", urlPrefix, err) + return nil, fmt.Errorf("invalid `url_prefix: %q`: %w", urlPrefix, err) } if target.Scheme != "http" && target.Scheme != "https" { return nil, fmt.Errorf("unsupported scheme for `url_prefix: %q`: %q; must be `http` or `https`", urlPrefix, target.Scheme) diff --git a/app/vmbackup/main.go b/app/vmbackup/main.go index 1e2dcbf18..0742b4fa5 100644 --- a/app/vmbackup/main.go +++ b/app/vmbackup/main.go @@ -110,12 +110,12 @@ func newSrcFS() (*fslocal.FS, error) { // Verify the snapshot exists. f, err := os.Open(snapshotPath) if err != nil { - return nil, fmt.Errorf("cannot open snapshot at %q: %s", snapshotPath, err) + return nil, fmt.Errorf("cannot open snapshot at %q: %w", snapshotPath, err) } fi, err := f.Stat() _ = f.Close() if err != nil { - return nil, fmt.Errorf("cannot stat %q: %s", snapshotPath, err) + return nil, fmt.Errorf("cannot stat %q: %w", snapshotPath, err) } if !fi.IsDir() { return nil, fmt.Errorf("snapshot %q must be a directory", snapshotPath) @@ -126,7 +126,7 @@ func newSrcFS() (*fslocal.FS, error) { MaxBytesPerSecond: *maxBytesPerSecond, } if err := fs.Init(); err != nil { - return nil, fmt.Errorf("cannot initialize fs: %s", err) + return nil, fmt.Errorf("cannot initialize fs: %w", err) } return fs, nil } @@ -134,7 +134,7 @@ func newSrcFS() (*fslocal.FS, error) { func newDstFS() (common.RemoteFS, error) { fs, err := actions.NewRemoteFS(*dst) if err != nil { - return nil, fmt.Errorf("cannot parse `-dst`=%q: %s", *dst, err) + return nil, fmt.Errorf("cannot parse `-dst`=%q: %w", *dst, err) } return fs, nil } @@ -145,7 +145,7 @@ func newOriginFS() (common.RemoteFS, error) { } fs, err := actions.NewRemoteFS(*origin) if err != nil { - return nil, fmt.Errorf("cannot parse `-origin`=%q: %s", *origin, err) + return nil, fmt.Errorf("cannot parse `-origin`=%q: %w", *origin, err) } return fs, nil } diff --git a/app/vminsert/common/insert_ctx.go b/app/vminsert/common/insert_ctx.go index 1de903808..a81b060fe 100644 --- a/app/vminsert/common/insert_ctx.go +++ b/app/vminsert/common/insert_ctx.go @@ -122,7 +122,7 @@ func (ctx *InsertCtx) AddLabel(name, value string) { func (ctx *InsertCtx) FlushBufs() error { if err := vmstorage.AddRows(ctx.mrs); err != nil { return &httpserver.ErrorWithStatusCode{ - Err: fmt.Errorf("cannot store metrics: %s", err), + Err: fmt.Errorf("cannot store metrics: %w", err), StatusCode: http.StatusServiceUnavailable, } } diff --git a/app/vmrestore/main.go b/app/vmrestore/main.go index 6319008f3..b2f492475 100644 --- a/app/vmrestore/main.go +++ b/app/vmrestore/main.go @@ -71,7 +71,7 @@ func newDstFS() (*fslocal.FS, error) { MaxBytesPerSecond: *maxBytesPerSecond, } if err := fs.Init(); err != nil { - return nil, fmt.Errorf("cannot initialize local fs: %s", err) + return nil, fmt.Errorf("cannot initialize local fs: %w", err) } return fs, nil } @@ -79,7 +79,7 @@ func newDstFS() (*fslocal.FS, error) { func newSrcFS() (common.RemoteFS, error) { fs, err := actions.NewRemoteFS(*src) if err != nil { - return nil, fmt.Errorf("cannot parse `-src`=%q: %s", *src, err) + return nil, fmt.Errorf("cannot parse `-src`=%q: %w", *src, err) } return fs, nil } diff --git a/app/vmselect/netstorage/netstorage.go b/app/vmselect/netstorage/netstorage.go index 39d18277a..e24e7c490 100644 --- a/app/vmselect/netstorage/netstorage.go +++ b/app/vmselect/netstorage/netstorage.go @@ -98,7 +98,7 @@ func timeseriesWorker(workerID uint) { continue } if err := tsw.pts.Unpack(&rs, rss.tr, rss.fetchData); err != nil { - tsw.doneCh <- fmt.Errorf("error during time series unpacking: %s", err) + tsw.doneCh <- fmt.Errorf("error during time series unpacking: %w", err) continue } if len(rs.Timestamps) > 0 || !rss.fetchData { @@ -187,7 +187,7 @@ func unpackWorker() { sb := getSortBlock() if err := sb.unpackFrom(upw.br, upw.tr, upw.fetchData); err != nil { putSortBlock(sb) - upw.doneCh <- fmt.Errorf("cannot unpack block: %s", err) + upw.doneCh <- fmt.Errorf("cannot unpack block: %w", err) continue } upw.sb = sb @@ -200,7 +200,7 @@ func (pts *packedTimeseries) Unpack(dst *Result, tr storage.TimeRange, fetchData dst.reset() if err := dst.MetricName.Unmarshal(bytesutil.ToUnsafeBytes(pts.metricName)); err != nil { - return fmt.Errorf("cannot unmarshal metricName %q: %s", pts.metricName, err) + return fmt.Errorf("cannot unmarshal metricName %q: %w", pts.metricName, err) } // Feed workers with work @@ -329,7 +329,7 @@ func (sb *sortBlock) unpackFrom(br storage.BlockRef, tr storage.TimeRange, fetch br.MustReadBlock(&sb.b, fetchData) if fetchData { if err := sb.b.UnmarshalData(); err != nil { - return fmt.Errorf("cannot unmarshal block: %s", err) + return fmt.Errorf("cannot unmarshal block: %w", err) } } timestamps := sb.b.Timestamps() @@ -398,7 +398,7 @@ func DeleteSeries(sq *storage.SearchQuery) (int, error) { func GetLabels(deadline Deadline) ([]string, error) { labels, err := vmstorage.SearchTagKeys(*maxTagKeysPerSearch) if err != nil { - return nil, fmt.Errorf("error during labels search: %s", err) + return nil, fmt.Errorf("error during labels search: %w", err) } // Substitute "" with "__name__" @@ -424,7 +424,7 @@ func GetLabelValues(labelName string, deadline Deadline) ([]string, error) { // Search for tag values labelValues, err := vmstorage.SearchTagValues([]byte(labelName), *maxTagValuesPerSearch) if err != nil { - return nil, fmt.Errorf("error during label values search for labelName=%q: %s", labelName, err) + return nil, fmt.Errorf("error during label values search for labelName=%q: %w", labelName, err) } // Sort labelValues like Prometheus does @@ -437,7 +437,7 @@ func GetLabelValues(labelName string, deadline Deadline) ([]string, error) { func GetLabelEntries(deadline Deadline) ([]storage.TagEntry, error) { labelEntries, err := vmstorage.SearchTagEntries(*maxTagKeysPerSearch, *maxTagValuesPerSearch) if err != nil { - return nil, fmt.Errorf("error during label entries request: %s", err) + return nil, fmt.Errorf("error during label entries request: %w", err) } // Substitute "" with "__name__" @@ -464,7 +464,7 @@ func GetLabelEntries(deadline Deadline) ([]storage.TagEntry, error) { func GetTSDBStatusForDate(deadline Deadline, date uint64, topN int) (*storage.TSDBStatus, error) { status, err := vmstorage.GetTSDBStatusForDate(date, topN) if err != nil { - return nil, fmt.Errorf("error during tsdb status request: %s", err) + return nil, fmt.Errorf("error during tsdb status request: %w", err) } return status, nil } @@ -473,7 +473,7 @@ func GetTSDBStatusForDate(deadline Deadline, date uint64, topN int) (*storage.TS func GetSeriesCount(deadline Deadline) (uint64, error) { n, err := vmstorage.GetSeriesCount() if err != nil { - return 0, fmt.Errorf("error during series count request: %s", err) + return 0, fmt.Errorf("error during series count request: %w", err) } return n, nil } @@ -529,7 +529,7 @@ func ProcessSearchQuery(sq *storage.SearchQuery, fetchData bool, deadline Deadli m[string(metricName)] = append(brs, *sr.MetricBlockRef.BlockRef) } if err := sr.Error(); err != nil { - return nil, fmt.Errorf("search error after reading %d data blocks: %s", blocksRead, err) + return nil, fmt.Errorf("search error after reading %d data blocks: %w", blocksRead, err) } var rss Results @@ -555,7 +555,7 @@ func setupTfss(tagFilterss [][]storage.TagFilter) ([]*storage.TagFilters, error) for i := range tagFilters { tf := &tagFilters[i] if err := tfs.Add(tf.Key, tf.Value, tf.IsNegative, tf.IsRegexp); err != nil { - return nil, fmt.Errorf("cannot parse tag filter %s: %s", tf, err) + return nil, fmt.Errorf("cannot parse tag filter %s: %w", tf, err) } } tfss = append(tfss, tfs) diff --git a/app/vmselect/prometheus/prometheus.go b/app/vmselect/prometheus/prometheus.go index 5b704fa13..eb106f9b0 100644 --- a/app/vmselect/prometheus/prometheus.go +++ b/app/vmselect/prometheus/prometheus.go @@ -46,7 +46,7 @@ const defaultStep = 5 * 60 * 1000 func FederateHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error { ct := currentTime() if err := r.ParseForm(); err != nil { - return fmt.Errorf("cannot parse request form values: %s", err) + return fmt.Errorf("cannot parse request form values: %w", err) } matches := r.Form["match[]"] if len(matches) == 0 { @@ -82,7 +82,7 @@ func FederateHandler(startTime time.Time, w http.ResponseWriter, r *http.Request } rss, err := netstorage.ProcessSearchQuery(sq, true, deadline) if err != nil { - return fmt.Errorf("cannot fetch data for %q: %s", sq, err) + return fmt.Errorf("cannot fetch data for %q: %w", sq, err) } resultsCh := make(chan *quicktemplate.ByteBuffer) @@ -105,7 +105,7 @@ func FederateHandler(startTime time.Time, w http.ResponseWriter, r *http.Request err = <-doneCh if err != nil { - return fmt.Errorf("error during data fetching: %s", err) + return fmt.Errorf("error during data fetching: %w", err) } federateDuration.UpdateDuration(startTime) return nil @@ -117,7 +117,7 @@ var federateDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/fe func ExportHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error { ct := currentTime() if err := r.ParseForm(); err != nil { - return fmt.Errorf("cannot parse request form values: %s", err) + return fmt.Errorf("cannot parse request form values: %w", err) } matches := r.Form["match[]"] if len(matches) == 0 { @@ -143,7 +143,7 @@ func ExportHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) end = start + defaultStep } if err := exportHandler(w, matches, start, end, format, maxRowsPerLine, deadline); err != nil { - return fmt.Errorf("error when exporting data for queries=%q on the time range (start=%d, end=%d): %s", matches, start, end, err) + return fmt.Errorf("error when exporting data for queries=%q on the time range (start=%d, end=%d): %w", matches, start, end, err) } exportDuration.UpdateDuration(startTime) return nil @@ -202,7 +202,7 @@ func exportHandler(w http.ResponseWriter, matches []string, start, end int64, fo } rss, err := netstorage.ProcessSearchQuery(sq, true, deadline) if err != nil { - return fmt.Errorf("cannot fetch data for %q: %s", sq, err) + return fmt.Errorf("cannot fetch data for %q: %w", sq, err) } resultsCh := make(chan *quicktemplate.ByteBuffer, runtime.GOMAXPROCS(-1)) @@ -227,7 +227,7 @@ func exportHandler(w http.ResponseWriter, matches []string, start, end int64, fo } err = <-doneCh if err != nil { - return fmt.Errorf("error during data fetching: %s", err) + return fmt.Errorf("error during data fetching: %w", err) } return nil } @@ -237,7 +237,7 @@ func exportHandler(w http.ResponseWriter, matches []string, start, end int64, fo // See https://prometheus.io/docs/prometheus/latest/querying/api/#delete-series func DeleteHandler(startTime time.Time, r *http.Request) error { if err := r.ParseForm(); err != nil { - return fmt.Errorf("cannot parse request form values: %s", err) + return fmt.Errorf("cannot parse request form values: %w", err) } if r.FormValue("start") != "" || r.FormValue("end") != "" { return fmt.Errorf("start and end aren't supported. Remove these args from the query in order to delete all the matching metrics") @@ -255,7 +255,7 @@ func DeleteHandler(startTime time.Time, r *http.Request) error { } deletedCount, err := netstorage.DeleteSeries(sq) if err != nil { - return fmt.Errorf("cannot delete time series matching %q: %s", matches, err) + return fmt.Errorf("cannot delete time series matching %q: %w", matches, err) } if deletedCount > 0 { promql.ResetRollupResultCache() @@ -273,14 +273,14 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr deadline := getDeadlineForQuery(r) if err := r.ParseForm(); err != nil { - return fmt.Errorf("cannot parse form values: %s", err) + return fmt.Errorf("cannot parse form values: %w", err) } var labelValues []string if len(r.Form["match[]"]) == 0 && len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 { var err error labelValues, err = netstorage.GetLabelValues(labelName, deadline) if err != nil { - return fmt.Errorf(`cannot obtain label values for %q: %s`, labelName, err) + return fmt.Errorf(`cannot obtain label values for %q: %w`, labelName, err) } } else { // Extended functionality that allows filtering by label filters and time range @@ -302,7 +302,7 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr } labelValues, err = labelValuesWithMatches(labelName, matches, start, end, deadline) if err != nil { - return fmt.Errorf("cannot obtain label values for %q, match[]=%q, start=%d, end=%d: %s", labelName, matches, start, end, err) + return fmt.Errorf("cannot obtain label values for %q, match[]=%q, start=%d, end=%d: %w", labelName, matches, start, end, err) } } @@ -343,7 +343,7 @@ func labelValuesWithMatches(labelName string, matches []string, start, end int64 } rss, err := netstorage.ProcessSearchQuery(sq, false, deadline) if err != nil { - return nil, fmt.Errorf("cannot fetch data for %q: %s", sq, err) + return nil, fmt.Errorf("cannot fetch data for %q: %w", sq, err) } m := make(map[string]struct{}) @@ -358,7 +358,7 @@ func labelValuesWithMatches(labelName string, matches []string, start, end int64 mLock.Unlock() }) if err != nil { - return nil, fmt.Errorf("error when data fetching: %s", err) + return nil, fmt.Errorf("error when data fetching: %w", err) } labelValues := make([]string, 0, len(m)) @@ -376,7 +376,7 @@ func LabelsCountHandler(startTime time.Time, w http.ResponseWriter, r *http.Requ deadline := getDeadlineForQuery(r) labelEntries, err := netstorage.GetLabelEntries(deadline) if err != nil { - return fmt.Errorf(`cannot obtain label entries: %s`, err) + return fmt.Errorf(`cannot obtain label entries: %w`, err) } w.Header().Set("Content-Type", "application/json") WriteLabelsCountResponse(w, labelEntries) @@ -394,14 +394,14 @@ const secsPerDay = 3600 * 24 func TSDBStatusHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error { deadline := getDeadlineForQuery(r) if err := r.ParseForm(); err != nil { - return fmt.Errorf("cannot parse form values: %s", err) + return fmt.Errorf("cannot parse form values: %w", err) } date := fasttime.UnixDate() dateStr := r.FormValue("date") if len(dateStr) > 0 { t, err := time.Parse("2006-01-02", dateStr) if err != nil { - return fmt.Errorf("cannot parse `date` arg %q: %s", dateStr, err) + return fmt.Errorf("cannot parse `date` arg %q: %w", dateStr, err) } date = uint64(t.Unix()) / secsPerDay } @@ -410,7 +410,7 @@ func TSDBStatusHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque if len(topNStr) > 0 { n, err := strconv.Atoi(topNStr) if err != nil { - return fmt.Errorf("cannot parse `topN` arg %q: %s", topNStr, err) + return fmt.Errorf("cannot parse `topN` arg %q: %w", topNStr, err) } if n <= 0 { n = 1 @@ -422,7 +422,7 @@ func TSDBStatusHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque } status, err := netstorage.GetTSDBStatusForDate(deadline, date, topN) if err != nil { - return fmt.Errorf(`cannot obtain tsdb status for date=%d, topN=%d: %s`, date, topN, err) + return fmt.Errorf(`cannot obtain tsdb status for date=%d, topN=%d: %w`, date, topN, err) } w.Header().Set("Content-Type", "application/json") WriteTSDBStatusResponse(w, status) @@ -439,14 +439,14 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) deadline := getDeadlineForQuery(r) if err := r.ParseForm(); err != nil { - return fmt.Errorf("cannot parse form values: %s", err) + return fmt.Errorf("cannot parse form values: %w", err) } var labels []string if len(r.Form["match[]"]) == 0 && len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 { var err error labels, err = netstorage.GetLabels(deadline) if err != nil { - return fmt.Errorf("cannot obtain labels: %s", err) + return fmt.Errorf("cannot obtain labels: %w", err) } } else { // Extended functionality that allows filtering by label filters and time range @@ -466,7 +466,7 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) } labels, err = labelsWithMatches(matches, start, end, deadline) if err != nil { - return fmt.Errorf("cannot obtain labels for match[]=%q, start=%d, end=%d: %s", matches, start, end, err) + return fmt.Errorf("cannot obtain labels for match[]=%q, start=%d, end=%d: %w", matches, start, end, err) } } @@ -494,7 +494,7 @@ func labelsWithMatches(matches []string, start, end int64, deadline netstorage.D } rss, err := netstorage.ProcessSearchQuery(sq, false, deadline) if err != nil { - return nil, fmt.Errorf("cannot fetch data for %q: %s", sq, err) + return nil, fmt.Errorf("cannot fetch data for %q: %w", sq, err) } m := make(map[string]struct{}) @@ -510,7 +510,7 @@ func labelsWithMatches(matches []string, start, end int64, deadline netstorage.D mLock.Unlock() }) if err != nil { - return nil, fmt.Errorf("error when data fetching: %s", err) + return nil, fmt.Errorf("error when data fetching: %w", err) } labels := make([]string, 0, len(m)) @@ -528,7 +528,7 @@ func SeriesCountHandler(startTime time.Time, w http.ResponseWriter, r *http.Requ deadline := getDeadlineForQuery(r) n, err := netstorage.GetSeriesCount(deadline) if err != nil { - return fmt.Errorf("cannot obtain series count: %s", err) + return fmt.Errorf("cannot obtain series count: %w", err) } w.Header().Set("Content-Type", "application/json") WriteSeriesCountResponse(w, n) @@ -545,7 +545,7 @@ func SeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) ct := currentTime() if err := r.ParseForm(); err != nil { - return fmt.Errorf("cannot parse form values: %s", err) + return fmt.Errorf("cannot parse form values: %w", err) } matches := r.Form["match[]"] if len(matches) == 0 { @@ -580,7 +580,7 @@ func SeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) } rss, err := netstorage.ProcessSearchQuery(sq, false, deadline) if err != nil { - return fmt.Errorf("cannot fetch data for %q: %s", sq, err) + return fmt.Errorf("cannot fetch data for %q: %w", sq, err) } resultsCh := make(chan *quicktemplate.ByteBuffer) @@ -605,7 +605,7 @@ func SeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) } err = <-doneCh if err != nil { - return fmt.Errorf("error during data fetching: %s", err) + return fmt.Errorf("error during data fetching: %w", err) } seriesDuration.UpdateDuration(startTime) return nil @@ -652,17 +652,17 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e if childQuery, windowStr, offsetStr := promql.IsMetricSelectorWithRollup(query); childQuery != "" { window, err := parsePositiveDuration(windowStr, step) if err != nil { - return fmt.Errorf("cannot parse window: %s", err) + return fmt.Errorf("cannot parse window: %w", err) } offset, err := parseDuration(offsetStr, step) if err != nil { - return fmt.Errorf("cannot parse offset: %s", err) + return fmt.Errorf("cannot parse offset: %w", err) } start -= offset end := start start = end - window if err := exportHandler(w, []string{childQuery}, start, end, "promapi", 0, deadline); err != nil { - return fmt.Errorf("error when exporting data for query=%q on the time range (start=%d, end=%d): %s", childQuery, start, end, err) + return fmt.Errorf("error when exporting data for query=%q on the time range (start=%d, end=%d): %w", childQuery, start, end, err) } queryDuration.UpdateDuration(startTime) return nil @@ -670,24 +670,24 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e if childQuery, windowStr, stepStr, offsetStr := promql.IsRollup(query); childQuery != "" { newStep, err := parsePositiveDuration(stepStr, step) if err != nil { - return fmt.Errorf("cannot parse step: %s", err) + return fmt.Errorf("cannot parse step: %w", err) } if newStep > 0 { step = newStep } window, err := parsePositiveDuration(windowStr, step) if err != nil { - return fmt.Errorf("cannot parse window: %s", err) + return fmt.Errorf("cannot parse window: %w", err) } offset, err := parseDuration(offsetStr, step) if err != nil { - return fmt.Errorf("cannot parse offset: %s", err) + return fmt.Errorf("cannot parse offset: %w", err) } start -= offset end := start start = end - window if err := queryRangeHandler(w, childQuery, start, end, step, r, ct); err != nil { - return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %s", childQuery, start, end, step, err) + return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %w", childQuery, start, end, step, err) } queryDuration.UpdateDuration(startTime) return nil @@ -702,7 +702,7 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e } result, err := promql.Exec(&ec, query, true) if err != nil { - return fmt.Errorf("error when executing query=%q for (time=%d, step=%d): %s", query, start, step, err) + return fmt.Errorf("error when executing query=%q for (time=%d, step=%d): %w", query, start, step, err) } w.Header().Set("Content-Type", "application/json") @@ -750,7 +750,7 @@ func QueryRangeHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque return err } if err := queryRangeHandler(w, query, start, end, step, r, ct); err != nil { - return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %s", query, start, end, step, err) + return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %w", query, start, end, step, err) } queryRangeDuration.UpdateDuration(startTime) return nil @@ -788,7 +788,7 @@ func queryRangeHandler(w http.ResponseWriter, query string, start, end, step int } result, err := promql.Exec(&ec, query, false) if err != nil { - return fmt.Errorf("cannot execute query: %s", err) + return fmt.Errorf("cannot execute query: %w", err) } queryOffset := getLatencyOffsetMilliseconds() if ct-end < queryOffset { @@ -897,7 +897,7 @@ func getTime(r *http.Request, argKey string, defaultValue int64) (int64, error) // Try parsing duration relative to the current time d, err1 := time.ParseDuration(argValue) if err1 != nil { - return 0, fmt.Errorf("cannot parse %q=%q: %s", argKey, argValue, err) + return 0, fmt.Errorf("cannot parse %q=%q: %w", argKey, argValue, err) } if d > 0 { d = -d @@ -939,7 +939,7 @@ func getDuration(r *http.Request, argKey string, defaultValue int64) (int64, err // Try parsing string format d, err := time.ParseDuration(argValue) if err != nil { - return 0, fmt.Errorf("cannot parse %q=%q: %s", argKey, argValue, err) + return 0, fmt.Errorf("cannot parse %q=%q: %w", argKey, argValue, err) } secs = d.Seconds() } @@ -1001,7 +1001,7 @@ func getTagFilterssFromMatches(matches []string) ([][]storage.TagFilter, error) for _, match := range matches { tagFilters, err := promql.ParseMetricSelector(match) if err != nil { - return nil, fmt.Errorf("cannot parse %q: %s", match, err) + return nil, fmt.Errorf("cannot parse %q: %w", match, err) } tagFilterss = append(tagFilterss, tagFilters) } diff --git a/app/vmselect/promql/aggr_incremental_test.go b/app/vmselect/promql/aggr_incremental_test.go index b517989b3..781296511 100644 --- a/app/vmselect/promql/aggr_incremental_test.go +++ b/app/vmselect/promql/aggr_incremental_test.go @@ -119,7 +119,7 @@ func testIncrementalParallelAggr(iafc *incrementalAggrFuncContext, tssSrc, tssEx wg.Wait() tssActual := iafc.finalizeTimeseries() if err := expectTimeseriesEqual(tssActual, tssExpected); err != nil { - return fmt.Errorf("%s; tssActual=%v, tssExpected=%v", err, tssActual, tssExpected) + return fmt.Errorf("%w; tssActual=%v, tssExpected=%v", err, tssActual, tssExpected) } return nil } @@ -164,7 +164,7 @@ func expectTsEqual(actual, expected *timeseries) error { return fmt.Errorf("unexpected timestamps; got %v; want %v", actual.Timestamps, expected.Timestamps) } if err := compareValues(actual.Values, expected.Values); err != nil { - return fmt.Errorf("%s; actual %v; expected %v", err, actual.Values, expected.Values) + return fmt.Errorf("%w; actual %v; expected %v", err, actual.Values, expected.Values) } return nil } diff --git a/app/vmselect/promql/eval.go b/app/vmselect/promql/eval.go index a56e4ed1b..a279aa284 100644 --- a/app/vmselect/promql/eval.go +++ b/app/vmselect/promql/eval.go @@ -160,14 +160,14 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) { } rv, err := evalRollupFunc(ec, "default_rollup", rollupDefault, e, re, nil) if err != nil { - return nil, fmt.Errorf(`cannot evaluate %q: %s`, me.AppendString(nil), err) + return nil, fmt.Errorf(`cannot evaluate %q: %w`, me.AppendString(nil), err) } return rv, nil } if re, ok := e.(*metricsql.RollupExpr); ok { rv, err := evalRollupFunc(ec, "default_rollup", rollupDefault, e, re, nil) if err != nil { - return nil, fmt.Errorf(`cannot evaluate %q: %s`, re.AppendString(nil), err) + return nil, fmt.Errorf(`cannot evaluate %q: %w`, re.AppendString(nil), err) } return rv, nil } @@ -189,7 +189,7 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) { } rv, err := tf(tfa) if err != nil { - return nil, fmt.Errorf(`cannot evaluate %q: %s`, fe.AppendString(nil), err) + return nil, fmt.Errorf(`cannot evaluate %q: %w`, fe.AppendString(nil), err) } return rv, nil } @@ -203,7 +203,7 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) { } rv, err := evalRollupFunc(ec, fe.Name, rf, e, re, nil) if err != nil { - return nil, fmt.Errorf(`cannot evaluate %q: %s`, fe.AppendString(nil), err) + return nil, fmt.Errorf(`cannot evaluate %q: %w`, fe.AppendString(nil), err) } return rv, nil } @@ -240,7 +240,7 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) { } rv, err := af(afa) if err != nil { - return nil, fmt.Errorf(`cannot evaluate %q: %s`, ae.AppendString(nil), err) + return nil, fmt.Errorf(`cannot evaluate %q: %w`, ae.AppendString(nil), err) } return rv, nil } @@ -264,7 +264,7 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) { } rv, err := bf(bfa) if err != nil { - return nil, fmt.Errorf(`cannot evaluate %q: %s`, be.AppendString(nil), err) + return nil, fmt.Errorf(`cannot evaluate %q: %w`, be.AppendString(nil), err) } return rv, nil } @@ -375,7 +375,7 @@ func evalRollupFuncArgs(ec *EvalConfig, fe *metricsql.FuncExpr) ([]interface{}, } ts, err := evalExpr(ec, arg) if err != nil { - return nil, nil, fmt.Errorf("cannot evaluate arg #%d for %q: %s", i+1, fe.AppendString(nil), err) + return nil, nil, fmt.Errorf("cannot evaluate arg #%d for %q: %w", i+1, fe.AppendString(nil), err) } args[i] = ts } diff --git a/app/vmselect/promql/rollup.go b/app/vmselect/promql/rollup.go index 8ae3045b7..7820ab74c 100644 --- a/app/vmselect/promql/rollup.go +++ b/app/vmselect/promql/rollup.go @@ -285,7 +285,7 @@ func getRollupConfigs(name string, rf rollupFunc, expr metricsql.Expr, start, en case "aggr_over_time": aggrFuncNames, err := getRollupAggrFuncNames(expr) if err != nil { - return nil, nil, fmt.Errorf("invalid args to %s: %s", expr.AppendString(nil), err) + return nil, nil, fmt.Errorf("invalid args to %s: %w", expr.AppendString(nil), err) } for _, aggrFuncName := range aggrFuncNames { if rollupFuncsRemoveCounterResets[aggrFuncName] { diff --git a/app/vmselect/promql/rollup_result_cache.go b/app/vmselect/promql/rollup_result_cache.go index 5b41bb05f..8dc3d3ec7 100644 --- a/app/vmselect/promql/rollup_result_cache.go +++ b/app/vmselect/promql/rollup_result_cache.go @@ -286,7 +286,7 @@ var ( var buf [8]byte if _, err := rand.Read(buf[:]); err != nil { // do not use logger.Panicf, since it isn't initialized yet. - panic(fmt.Errorf("FATAL: cannot read random data for rollupResultCacheKeyPrefix: %s", err)) + panic(fmt.Errorf("FATAL: cannot read random data for rollupResultCacheKeyPrefix: %w", err)) } return encoding.UnmarshalUint64(buf[:]) }() @@ -414,7 +414,7 @@ func (mi *rollupResultCacheMetainfo) Unmarshal(src []byte) error { for i := 0; i < entriesLen; i++ { tail, err := mi.entries[i].Unmarshal(src) if err != nil { - return fmt.Errorf("cannot unmarshal entry #%d: %s", i, err) + return fmt.Errorf("cannot unmarshal entry #%d: %w", i, err) } src = tail } diff --git a/app/vmselect/promql/timeseries.go b/app/vmselect/promql/timeseries.go index c000f5430..b9c35ff40 100644 --- a/app/vmselect/promql/timeseries.go +++ b/app/vmselect/promql/timeseries.go @@ -217,7 +217,7 @@ func (ts *timeseries) unmarshalFastNoTimestamps(src []byte) ([]byte, error) { tail, err := unmarshalMetricNameFast(&ts.MetricName, src) if err != nil { - return tail, fmt.Errorf("cannot unmarshal MetricName: %s", err) + return tail, fmt.Errorf("cannot unmarshal MetricName: %w", err) } src = tail @@ -275,7 +275,7 @@ func unmarshalMetricNameFast(mn *storage.MetricName, src []byte) ([]byte, error) tail, metricGroup, err := unmarshalBytesFast(src) if err != nil { - return tail, fmt.Errorf("cannot unmarshal MetricGroup: %s", err) + return tail, fmt.Errorf("cannot unmarshal MetricGroup: %w", err) } src = tail mn.MetricGroup = metricGroup[:len(metricGroup):len(metricGroup)] @@ -292,13 +292,13 @@ func unmarshalMetricNameFast(mn *storage.MetricName, src []byte) ([]byte, error) for i := range mn.Tags { tail, key, err := unmarshalBytesFast(src) if err != nil { - return tail, fmt.Errorf("cannot unmarshal key for tag[%d]: %s", i, err) + return tail, fmt.Errorf("cannot unmarshal key for tag[%d]: %w", i, err) } src = tail tail, value, err := unmarshalBytesFast(src) if err != nil { - return tail, fmt.Errorf("cannot unmarshal value for tag[%d]: %s", i, err) + return tail, fmt.Errorf("cannot unmarshal value for tag[%d]: %w", i, err) } src = tail diff --git a/app/vmselect/promql/transform.go b/app/vmselect/promql/transform.go index 1f0f85812..3a25d1dc6 100644 --- a/app/vmselect/promql/transform.go +++ b/app/vmselect/promql/transform.go @@ -414,7 +414,7 @@ func transformHistogramShare(tfa *transformFuncArg) ([]*timeseries, error) { } les, err := getScalar(args[0], 0) if err != nil { - return nil, fmt.Errorf("cannot parse le: %s", err) + return nil, fmt.Errorf("cannot parse le: %w", err) } // Convert buckets with `vmrange` labels to buckets with `le` labels. @@ -425,7 +425,7 @@ func transformHistogramShare(tfa *transformFuncArg) ([]*timeseries, error) { if len(args) > 2 { s, err := getString(args[2], 2) if err != nil { - return nil, fmt.Errorf("cannot parse boundsLabel (arg #3): %s", err) + return nil, fmt.Errorf("cannot parse boundsLabel (arg #3): %w", err) } boundsLabel = s } @@ -513,7 +513,7 @@ func transformHistogramQuantile(tfa *transformFuncArg) ([]*timeseries, error) { } phis, err := getScalar(args[0], 0) if err != nil { - return nil, fmt.Errorf("cannot parse phi: %s", err) + return nil, fmt.Errorf("cannot parse phi: %w", err) } // Convert buckets with `vmrange` labels to buckets with `le` labels. @@ -524,7 +524,7 @@ func transformHistogramQuantile(tfa *transformFuncArg) ([]*timeseries, error) { if len(args) > 2 { s, err := getString(args[2], 2) if err != nil { - return nil, fmt.Errorf("cannot parse boundsLabel (arg #3): %s", err) + return nil, fmt.Errorf("cannot parse boundsLabel (arg #3): %w", err) } boundsLabel = s } @@ -1034,7 +1034,7 @@ func transformLabelMap(tfa *transformFuncArg) ([]*timeseries, error) { } label, err := getString(args[1], 1) if err != nil { - return nil, fmt.Errorf("cannot read label name: %s", err) + return nil, fmt.Errorf("cannot read label name: %w", err) } srcValues, dstValues, err := getStringPairs(args[2:]) if err != nil { @@ -1179,7 +1179,7 @@ func transformLabelTransform(tfa *transformFuncArg) ([]*timeseries, error) { r, err := metricsql.CompileRegexp(regex) if err != nil { - return nil, fmt.Errorf(`cannot compile regex %q: %s`, regex, err) + return nil, fmt.Errorf(`cannot compile regex %q: %w`, regex, err) } return labelReplace(args[0], label, r, label, replacement) } @@ -1208,7 +1208,7 @@ func transformLabelReplace(tfa *transformFuncArg) ([]*timeseries, error) { r, err := metricsql.CompileRegexpAnchored(regex) if err != nil { - return nil, fmt.Errorf(`cannot compile regex %q: %s`, regex, err) + return nil, fmt.Errorf(`cannot compile regex %q: %w`, regex, err) } return labelReplace(args[0], srcLabel, r, dstLabel, replacement) } @@ -1238,7 +1238,7 @@ func transformLabelValue(tfa *transformFuncArg) ([]*timeseries, error) { } labelName, err := getString(args[1], 1) if err != nil { - return nil, fmt.Errorf("cannot get label name: %s", err) + return nil, fmt.Errorf("cannot get label name: %w", err) } rvs := args[0] for _, ts := range rvs { @@ -1265,15 +1265,15 @@ func transformLabelMatch(tfa *transformFuncArg) ([]*timeseries, error) { } labelName, err := getString(args[1], 1) if err != nil { - return nil, fmt.Errorf("cannot get label name: %s", err) + return nil, fmt.Errorf("cannot get label name: %w", err) } labelRe, err := getString(args[2], 2) if err != nil { - return nil, fmt.Errorf("cannot get regexp: %s", err) + return nil, fmt.Errorf("cannot get regexp: %w", err) } r, err := metricsql.CompileRegexpAnchored(labelRe) if err != nil { - return nil, fmt.Errorf(`cannot compile regexp %q: %s`, labelRe, err) + return nil, fmt.Errorf(`cannot compile regexp %q: %w`, labelRe, err) } tss := args[0] rvs := tss[:0] @@ -1293,15 +1293,15 @@ func transformLabelMismatch(tfa *transformFuncArg) ([]*timeseries, error) { } labelName, err := getString(args[1], 1) if err != nil { - return nil, fmt.Errorf("cannot get label name: %s", err) + return nil, fmt.Errorf("cannot get label name: %w", err) } labelRe, err := getString(args[2], 2) if err != nil { - return nil, fmt.Errorf("cannot get regexp: %s", err) + return nil, fmt.Errorf("cannot get regexp: %w", err) } r, err := metricsql.CompileRegexpAnchored(labelRe) if err != nil { - return nil, fmt.Errorf(`cannot compile regexp %q: %s`, labelRe, err) + return nil, fmt.Errorf(`cannot compile regexp %q: %w`, labelRe, err) } tss := args[0] rvs := tss[:0] @@ -1401,7 +1401,7 @@ func newTransformFuncSortByLabel(isDesc bool) transformFunc { } label, err := getString(args[1], 1) if err != nil { - return nil, fmt.Errorf("cannot parse label name for sorting: %s", err) + return nil, fmt.Errorf("cannot parse label name for sorting: %w", err) } rvs := args[0] sort.SliceStable(rvs, func(i, j int) bool { diff --git a/app/vmstorage/main.go b/app/vmstorage/main.go index 8f37b72f0..b03bf04b8 100644 --- a/app/vmstorage/main.go +++ b/app/vmstorage/main.go @@ -171,7 +171,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool { w.Header().Set("Content-Type", "application/json") snapshotPath, err := Storage.CreateSnapshot() if err != nil { - err = fmt.Errorf("cannot create snapshot: %s", err) + err = fmt.Errorf("cannot create snapshot: %w", err) jsonResponseError(w, err) return true } @@ -185,7 +185,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool { w.Header().Set("Content-Type", "application/json") snapshots, err := Storage.ListSnapshots() if err != nil { - err = fmt.Errorf("cannot list snapshots: %s", err) + err = fmt.Errorf("cannot list snapshots: %w", err) jsonResponseError(w, err) return true } @@ -202,7 +202,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool { w.Header().Set("Content-Type", "application/json") snapshotName := r.FormValue("snapshot") if err := Storage.DeleteSnapshot(snapshotName); err != nil { - err = fmt.Errorf("cannot delete snapshot %q: %s", snapshotName, err) + err = fmt.Errorf("cannot delete snapshot %q: %w", snapshotName, err) jsonResponseError(w, err) return true } @@ -212,13 +212,13 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool { w.Header().Set("Content-Type", "application/json") snapshots, err := Storage.ListSnapshots() if err != nil { - err = fmt.Errorf("cannot list snapshots: %s", err) + err = fmt.Errorf("cannot list snapshots: %w", err) jsonResponseError(w, err) return true } for _, snapshotName := range snapshots { if err := Storage.DeleteSnapshot(snapshotName); err != nil { - err = fmt.Errorf("cannot delete snapshot %q: %s", snapshotName, err) + err = fmt.Errorf("cannot delete snapshot %q: %w", snapshotName, err) jsonResponseError(w, err) return true } diff --git a/lib/backup/actions/backup.go b/lib/backup/actions/backup.go index 6661af9c1..ab1797d5f 100644 --- a/lib/backup/actions/backup.go +++ b/lib/backup/actions/backup.go @@ -55,13 +55,13 @@ func (b *Backup) Run() error { } if err := dst.DeleteFile(fscommon.BackupCompleteFilename); err != nil { - return fmt.Errorf("cannot delete `backup complete` file at %s: %s", dst, err) + return fmt.Errorf("cannot delete `backup complete` file at %s: %w", dst, err) } if err := runBackup(src, dst, origin, concurrency); err != nil { return err } if err := dst.CreateFile(fscommon.BackupCompleteFilename, []byte("ok")); err != nil { - return fmt.Errorf("cannot create `backup complete` file at %s: %s", dst, err) + return fmt.Errorf("cannot create `backup complete` file at %s: %w", dst, err) } return nil } @@ -74,17 +74,17 @@ func runBackup(src *fslocal.FS, dst common.RemoteFS, origin common.OriginFS, con logger.Infof("obtaining list of parts at %s", src) srcParts, err := src.ListParts() if err != nil { - return fmt.Errorf("cannot list src parts: %s", err) + return fmt.Errorf("cannot list src parts: %w", err) } logger.Infof("obtaining list of parts at %s", dst) dstParts, err := dst.ListParts() if err != nil { - return fmt.Errorf("cannot list dst parts: %s", err) + return fmt.Errorf("cannot list dst parts: %w", err) } logger.Infof("obtaining list of parts at %s", origin) originParts, err := origin.ListParts() if err != nil { - return fmt.Errorf("cannot list origin parts: %s", err) + return fmt.Errorf("cannot list origin parts: %w", err) } backupSize := getPartsSize(srcParts) @@ -97,7 +97,7 @@ func runBackup(src *fslocal.FS, dst common.RemoteFS, origin common.OriginFS, con err = runParallel(concurrency, partsToDelete, func(p common.Part) error { logger.Infof("deleting %s from %s", &p, dst) if err := dst.DeletePart(p); err != nil { - return fmt.Errorf("cannot delete %s from %s: %s", &p, dst, err) + return fmt.Errorf("cannot delete %s from %s: %w", &p, dst, err) } atomic.AddUint64(&deletedParts, 1) return nil @@ -109,7 +109,7 @@ func runBackup(src *fslocal.FS, dst common.RemoteFS, origin common.OriginFS, con return err } if err := dst.RemoveEmptyDirs(); err != nil { - return fmt.Errorf("cannot remove empty directories at %s: %s", dst, err) + return fmt.Errorf("cannot remove empty directories at %s: %w", dst, err) } } @@ -122,7 +122,7 @@ func runBackup(src *fslocal.FS, dst common.RemoteFS, origin common.OriginFS, con err = runParallel(concurrency, originCopyParts, func(p common.Part) error { logger.Infof("server-side copying %s from %s to %s", &p, origin, dst) if err := dst.CopyPart(origin, p); err != nil { - return fmt.Errorf("cannot copy %s from %s to %s: %s", &p, origin, dst, err) + return fmt.Errorf("cannot copy %s from %s to %s: %w", &p, origin, dst, err) } atomic.AddUint64(&copiedParts, 1) return nil @@ -144,17 +144,17 @@ func runBackup(src *fslocal.FS, dst common.RemoteFS, origin common.OriginFS, con logger.Infof("uploading %s from %s to %s", &p, src, dst) rc, err := src.NewReadCloser(p) if err != nil { - return fmt.Errorf("cannot create reader for %s from %s: %s", &p, src, err) + return fmt.Errorf("cannot create reader for %s from %s: %w", &p, src, err) } sr := &statReader{ r: rc, bytesRead: &bytesUploaded, } if err := dst.UploadPart(p, sr); err != nil { - return fmt.Errorf("cannot upload %s to %s: %s", &p, dst, err) + return fmt.Errorf("cannot upload %s to %s: %w", &p, dst, err) } if err = rc.Close(); err != nil { - return fmt.Errorf("cannot close reader for %s from %s: %s", &p, src, err) + return fmt.Errorf("cannot close reader for %s from %s: %w", &p, src, err) } return nil }, func(elapsed time.Duration) { diff --git a/lib/backup/actions/restore.go b/lib/backup/actions/restore.go index 2903f8691..c735adbe2 100644 --- a/lib/backup/actions/restore.go +++ b/lib/backup/actions/restore.go @@ -43,11 +43,11 @@ func (r *Restore) Run() error { // Make sure VictoriaMetrics doesn't run during the restore process. if err := fs.MkdirAllIfNotExist(r.Dst.Dir); err != nil { - return fmt.Errorf("cannot create dir %q: %s", r.Dst.Dir, err) + return fmt.Errorf("cannot create dir %q: %w", r.Dst.Dir, err) } flockF, err := fs.CreateFlockFile(r.Dst.Dir) if err != nil { - return fmt.Errorf("cannot create lock file in %q; make sure VictoriaMetrics doesn't use the dir; error: %s", r.Dst.Dir, err) + return fmt.Errorf("cannot create lock file in %q; make sure VictoriaMetrics doesn't use the dir; error: %w", r.Dst.Dir, err) } defer fs.MustClose(flockF) @@ -71,12 +71,12 @@ func (r *Restore) Run() error { logger.Infof("obtaining list of parts at %s", src) srcParts, err := src.ListParts() if err != nil { - return fmt.Errorf("cannot list src parts: %s", err) + return fmt.Errorf("cannot list src parts: %w", err) } logger.Infof("obtaining list of parts at %s", dst) dstParts, err := dst.ListParts() if err != nil { - return fmt.Errorf("cannot list dst parts: %s", err) + return fmt.Errorf("cannot list dst parts: %w", err) } backupSize := getPartsSize(srcParts) @@ -129,7 +129,7 @@ func (r *Restore) Run() error { logger.Infof("deleting %s from %s", path, dst) size, err := dst.DeletePath(path) if err != nil { - return fmt.Errorf("cannot delete %s from %s: %s", path, dst, err) + return fmt.Errorf("cannot delete %s from %s: %w", path, dst, err) } deleteSize += size } @@ -137,14 +137,14 @@ func (r *Restore) Run() error { return err } if err := dst.RemoveEmptyDirs(); err != nil { - return fmt.Errorf("cannot remove empty directories at %s: %s", dst, err) + return fmt.Errorf("cannot remove empty directories at %s: %w", dst, err) } } // Re-read dstParts, since additional parts may be removed on the previous step. dstParts, err = dst.ListParts() if err != nil { - return fmt.Errorf("cannot list dst parts after the deletion: %s", err) + return fmt.Errorf("cannot list dst parts after the deletion: %w", err) } partsToCopy := common.PartsDifference(srcParts, dstParts) @@ -166,17 +166,17 @@ func (r *Restore) Run() error { logger.Infof("downloading %s from %s to %s", &p, src, dst) wc, err := dst.NewWriteCloser(p) if err != nil { - return fmt.Errorf("cannot create writer for %q to %s: %s", &p, dst, err) + return fmt.Errorf("cannot create writer for %q to %s: %w", &p, dst, err) } sw := &statWriter{ w: wc, bytesWritten: &bytesDownloaded, } if err := src.DownloadPart(p, sw); err != nil { - return fmt.Errorf("cannot download %s to %s: %s", &p, dst, err) + return fmt.Errorf("cannot download %s to %s: %w", &p, dst, err) } if err := wc.Close(); err != nil { - return fmt.Errorf("cannot close reader from %s from %s: %s", &p, src, err) + return fmt.Errorf("cannot close reader from %s from %s: %w", &p, src, err) } } return nil diff --git a/lib/backup/actions/util.go b/lib/backup/actions/util.go index e9c2756db..c28974f93 100644 --- a/lib/backup/actions/util.go +++ b/lib/backup/actions/util.go @@ -207,7 +207,7 @@ func NewRemoteFS(path string) (common.RemoteFS, error) { Dir: dir, } if err := fs.Init(); err != nil { - return nil, fmt.Errorf("cannot initialize connection to gcs: %s", err) + return nil, fmt.Errorf("cannot initialize connection to gcs: %w", err) } return fs, nil case "s3": @@ -226,7 +226,7 @@ func NewRemoteFS(path string) (common.RemoteFS, error) { Dir: dir, } if err := fs.Init(); err != nil { - return nil, fmt.Errorf("cannot initialize connection to s3: %s", err) + return nil, fmt.Errorf("cannot initialize connection to s3: %w", err) } return fs, nil default: diff --git a/lib/backup/fscommon/fscommon.go b/lib/backup/fscommon/fscommon.go index b6db48ad6..2729161c8 100644 --- a/lib/backup/fscommon/fscommon.go +++ b/lib/backup/fscommon/fscommon.go @@ -13,11 +13,11 @@ import ( func FsyncFile(path string) error { if err := fsync(path); err != nil { _ = os.RemoveAll(path) - return fmt.Errorf("cannot fsync file %q: %s", path, err) + return fmt.Errorf("cannot fsync file %q: %w", path, err) } dir := filepath.Dir(path) if err := fsync(dir); err != nil { - return fmt.Errorf("cannot fsync dir %q: %s", dir, err) + return fmt.Errorf("cannot fsync dir %q: %w", dir, err) } return nil } @@ -45,7 +45,7 @@ func fsync(path string) error { func AppendFiles(dst []string, dir string) ([]string, error) { d, err := os.Open(dir) if err != nil { - return nil, fmt.Errorf("cannot open %q: %s", dir, err) + return nil, fmt.Errorf("cannot open %q: %w", dir, err) } dst, err = appendFilesInternal(dst, d) if err1 := d.Close(); err1 != nil { @@ -58,14 +58,14 @@ func appendFilesInternal(dst []string, d *os.File) ([]string, error) { dir := d.Name() dfi, err := d.Stat() if err != nil { - return nil, fmt.Errorf("cannot stat %q: %s", dir, err) + return nil, fmt.Errorf("cannot stat %q: %w", dir, err) } if !dfi.IsDir() { return nil, fmt.Errorf("%q isn't a directory", dir) } fis, err := d.Readdir(-1) if err != nil { - return nil, fmt.Errorf("cannot read directory contents in %q: %s", dir, err) + return nil, fmt.Errorf("cannot read directory contents in %q: %w", dir, err) } for _, fi := range fis { name := fi.Name() @@ -82,7 +82,7 @@ func appendFilesInternal(dst []string, d *os.File) ([]string, error) { // Process directory dst, err = AppendFiles(dst, path) if err != nil { - return nil, fmt.Errorf("cannot list %q: %s", path, err) + return nil, fmt.Errorf("cannot list %q: %w", path, err) } continue } @@ -100,17 +100,17 @@ func appendFilesInternal(dst []string, d *os.File) ([]string, error) { // Skip symlink that points to nowhere. continue } - return nil, fmt.Errorf("cannot resolve symlink %q: %s", pathOrig, err) + return nil, fmt.Errorf("cannot resolve symlink %q: %w", pathOrig, err) } sfi, err := os.Stat(pathReal) if err != nil { - return nil, fmt.Errorf("cannot stat %q from symlink %q: %s", pathReal, path, err) + return nil, fmt.Errorf("cannot stat %q from symlink %q: %w", pathReal, path, err) } if sfi.IsDir() { // Symlink points to directory dstNew, err := AppendFiles(dst, pathReal) if err != nil { - return nil, fmt.Errorf("cannot list files at %q from symlink %q: %s", pathReal, path, err) + return nil, fmt.Errorf("cannot list files at %q from symlink %q: %w", pathReal, path, err) } pathReal += "/" for i := len(dst); i < len(dstNew); i++ { @@ -163,14 +163,14 @@ func removeEmptyDirsInternal(d *os.File) (bool, error) { dir := d.Name() dfi, err := d.Stat() if err != nil { - return false, fmt.Errorf("cannot stat %q: %s", dir, err) + return false, fmt.Errorf("cannot stat %q: %w", dir, err) } if !dfi.IsDir() { return false, fmt.Errorf("%q isn't a directory", dir) } fis, err := d.Readdir(-1) if err != nil { - return false, fmt.Errorf("cannot read directory contents in %q: %s", dir, err) + return false, fmt.Errorf("cannot read directory contents in %q: %w", dir, err) } dirEntries := 0 hasFlock := false @@ -184,7 +184,7 @@ func removeEmptyDirsInternal(d *os.File) (bool, error) { // Process directory ok, err := removeEmptyDirs(path) if err != nil { - return false, fmt.Errorf("cannot list %q: %s", path, err) + return false, fmt.Errorf("cannot list %q: %w", path, err) } if !ok { dirEntries++ @@ -209,21 +209,21 @@ func removeEmptyDirsInternal(d *os.File) (bool, error) { // Remove symlink that points to nowere. logger.Infof("removing broken symlink %q", pathOrig) if err := os.Remove(pathOrig); err != nil { - return false, fmt.Errorf("cannot remove %q: %s", pathOrig, err) + return false, fmt.Errorf("cannot remove %q: %w", pathOrig, err) } continue } - return false, fmt.Errorf("cannot resolve symlink %q: %s", pathOrig, err) + return false, fmt.Errorf("cannot resolve symlink %q: %w", pathOrig, err) } sfi, err := os.Stat(pathReal) if err != nil { - return false, fmt.Errorf("cannot stat %q from symlink %q: %s", pathReal, path, err) + return false, fmt.Errorf("cannot stat %q from symlink %q: %w", pathReal, path, err) } if sfi.IsDir() { // Symlink points to directory ok, err := removeEmptyDirs(pathReal) if err != nil { - return false, fmt.Errorf("cannot list files at %q from symlink %q: %s", pathReal, path, err) + return false, fmt.Errorf("cannot list files at %q from symlink %q: %w", pathReal, path, err) } if !ok { dirEntries++ @@ -231,7 +231,7 @@ func removeEmptyDirsInternal(d *os.File) (bool, error) { // Remove the symlink logger.Infof("removing symlink that points to empty dir %q", pathOrig) if err := os.Remove(pathOrig); err != nil { - return false, fmt.Errorf("cannot remove %q: %s", pathOrig, err) + return false, fmt.Errorf("cannot remove %q: %w", pathOrig, err) } } continue @@ -252,11 +252,11 @@ func removeEmptyDirsInternal(d *os.File) (bool, error) { if hasFlock { flockFilepath := dir + "/flock.lock" if err := os.Remove(flockFilepath); err != nil { - return false, fmt.Errorf("cannot remove %q: %s", flockFilepath, err) + return false, fmt.Errorf("cannot remove %q: %w", flockFilepath, err) } } if err := os.Remove(dir); err != nil { - return false, fmt.Errorf("cannot remove %q: %s", dir, err) + return false, fmt.Errorf("cannot remove %q: %w", dir, err) } return true, nil } diff --git a/lib/backup/fslocal/fslocal.go b/lib/backup/fslocal/fslocal.go index 913c52287..ced08c2cf 100644 --- a/lib/backup/fslocal/fslocal.go +++ b/lib/backup/fslocal/fslocal.go @@ -64,7 +64,7 @@ func (fs *FS) ListParts() ([]common.Part, error) { } fi, err := os.Stat(file) if err != nil { - return nil, fmt.Errorf("cannot stat %q: %s", file, err) + return nil, fmt.Errorf("cannot stat %q: %w", file, err) } path := file[len(dir):] size := uint64(fi.Size()) @@ -100,7 +100,7 @@ func (fs *FS) NewReadCloser(p common.Part) (io.ReadCloser, error) { path := fs.path(p) r, err := filestream.OpenReaderAt(path, int64(p.Offset), true) if err != nil { - return nil, fmt.Errorf("cannot open %q at %q: %s", p.Path, fs.Dir, err) + return nil, fmt.Errorf("cannot open %q at %q: %w", p.Path, fs.Dir, err) } lrc := &limitedReadCloser{ r: r, @@ -121,7 +121,7 @@ func (fs *FS) NewWriteCloser(p common.Part) (io.WriteCloser, error) { } w, err := filestream.OpenWriterAt(path, int64(p.Offset), true) if err != nil { - return nil, fmt.Errorf("cannot open writer for %q at offset %d: %s", path, p.Offset, err) + return nil, fmt.Errorf("cannot open writer for %q at offset %d: %w", path, p.Offset, err) } wc := &writeCloser{ w: w, @@ -148,16 +148,16 @@ func (fs *FS) DeletePath(path string) (uint64, error) { // The file could be deleted earlier via symlink. return 0, nil } - return 0, fmt.Errorf("cannot open %q at %q: %s", path, fullPath, err) + return 0, fmt.Errorf("cannot open %q at %q: %w", path, fullPath, err) } fi, err := f.Stat() _ = f.Close() if err != nil { - return 0, fmt.Errorf("cannot stat %q at %q: %s", path, fullPath, err) + return 0, fmt.Errorf("cannot stat %q at %q: %w", path, fullPath, err) } size := uint64(fi.Size()) if err := os.Remove(fullPath); err != nil { - return 0, fmt.Errorf("cannot remove %q: %s", fullPath, err) + return 0, fmt.Errorf("cannot remove %q: %w", fullPath, err) } return size, nil } @@ -170,7 +170,7 @@ func (fs *FS) RemoveEmptyDirs() error { func (fs *FS) mkdirAll(filePath string) error { dir := filepath.Dir(filePath) if err := os.MkdirAll(dir, 0700); err != nil { - return fmt.Errorf("cannot create directory %q: %s", dir, err) + return fmt.Errorf("cannot create directory %q: %w", dir, err) } return nil } diff --git a/lib/backup/fsremote/fsremote.go b/lib/backup/fsremote/fsremote.go index 5d4d6c9a8..4055ac9ea 100644 --- a/lib/backup/fsremote/fsremote.go +++ b/lib/backup/fsremote/fsremote.go @@ -60,7 +60,7 @@ func (fs *FS) ListParts() ([]common.Part, error) { // Check for correct part size. fi, err := os.Stat(file) if err != nil { - return nil, fmt.Errorf("cannot stat file %q for part %q: %s", file, p.Path, err) + return nil, fmt.Errorf("cannot stat file %q for part %q: %w", file, p.Path, err) } p.ActualSize = uint64(fi.Size()) parts = append(parts, p) @@ -72,7 +72,7 @@ func (fs *FS) ListParts() ([]common.Part, error) { func (fs *FS) DeletePart(p common.Part) error { path := fs.path(p) if err := os.Remove(path); err != nil { - return fmt.Errorf("cannot remove %q: %s", path, err) + return fmt.Errorf("cannot remove %q: %w", path, err) } return nil } @@ -103,12 +103,12 @@ func (fs *FS) CopyPart(srcFS common.OriginFS, p common.Part) error { // Cannot create hardlink. Just copy file contents srcFile, err := os.Open(srcPath) if err != nil { - return fmt.Errorf("cannot open file %q: %s", srcPath, err) + return fmt.Errorf("cannot open file %q: %w", srcPath, err) } dstFile, err := os.Create(dstPath) if err != nil { _ = srcFile.Close() - return fmt.Errorf("cannot create file %q: %s", dstPath, err) + return fmt.Errorf("cannot create file %q: %w", dstPath, err) } n, err := io.Copy(dstFile, srcFile) if err1 := dstFile.Close(); err1 != nil { @@ -137,14 +137,14 @@ func (fs *FS) DownloadPart(p common.Part, w io.Writer) error { path := fs.path(p) r, err := os.Open(path) if err != nil { - return fmt.Errorf("cannot open %q: %s", path, err) + return fmt.Errorf("cannot open %q: %w", path, err) } n, err := io.Copy(w, r) if err1 := r.Close(); err1 != nil && err == nil { err = err1 } if err != nil { - return fmt.Errorf("cannot download data from %q: %s", path, err) + return fmt.Errorf("cannot download data from %q: %w", path, err) } if uint64(n) != p.Size { return fmt.Errorf("wrong data size downloaded from %q; got %d bytes; want %d bytes", path, n, p.Size) @@ -160,7 +160,7 @@ func (fs *FS) UploadPart(p common.Part, r io.Reader) error { } w, err := os.Create(path) if err != nil { - return fmt.Errorf("cannot create file %q: %s", path, err) + return fmt.Errorf("cannot create file %q: %w", path, err) } n, err := io.Copy(w, r) if err1 := w.Close(); err1 != nil && err == nil { @@ -168,7 +168,7 @@ func (fs *FS) UploadPart(p common.Part, r io.Reader) error { } if err != nil { _ = os.RemoveAll(path) - return fmt.Errorf("cannot upload data to %q: %s", path, err) + return fmt.Errorf("cannot upload data to %q: %w", path, err) } if uint64(n) != p.Size { _ = os.RemoveAll(path) @@ -184,7 +184,7 @@ func (fs *FS) UploadPart(p common.Part, r io.Reader) error { func (fs *FS) mkdirAll(filePath string) error { dir := filepath.Dir(filePath) if err := os.MkdirAll(dir, 0700); err != nil { - return fmt.Errorf("cannot create directory %q: %s", dir, err) + return fmt.Errorf("cannot create directory %q: %w", dir, err) } return nil } @@ -200,7 +200,7 @@ func (fs *FS) DeleteFile(filePath string) error { path := filepath.Join(fs.Dir, filePath) err := os.Remove(path) if err != nil && !os.IsNotExist(err) { - return fmt.Errorf("cannot remove %q: %s", path, err) + return fmt.Errorf("cannot remove %q: %w", path, err) } return nil } @@ -214,7 +214,7 @@ func (fs *FS) CreateFile(filePath string, data []byte) error { return err } if err := ioutil.WriteFile(path, data, 0600); err != nil { - return fmt.Errorf("cannot write %d bytes to %q: %s", len(data), path, err) + return fmt.Errorf("cannot write %d bytes to %q: %w", len(data), path, err) } return nil } @@ -227,7 +227,7 @@ func (fs *FS) HasFile(filePath string) (bool, error) { if os.IsNotExist(err) { return false, nil } - return false, fmt.Errorf("cannot stat %q: %s", path, err) + return false, fmt.Errorf("cannot stat %q: %w", path, err) } if fi.IsDir() { return false, fmt.Errorf("%q is directory, while file is needed", path) diff --git a/lib/backup/gcsremote/gcs.go b/lib/backup/gcsremote/gcs.go index 5a362017f..2857c5e58 100644 --- a/lib/backup/gcsremote/gcs.go +++ b/lib/backup/gcsremote/gcs.go @@ -49,13 +49,13 @@ func (fs *FS) Init() error { creds := option.WithCredentialsFile(fs.CredsFilePath) c, err := storage.NewClient(ctx, creds) if err != nil { - return fmt.Errorf("cannot create gcs client with credsFile %q: %s", fs.CredsFilePath, err) + return fmt.Errorf("cannot create gcs client with credsFile %q: %w", fs.CredsFilePath, err) } client = c } else { c, err := storage.NewClient(ctx) if err != nil { - return fmt.Errorf("cannot create default gcs client: %q", err) + return fmt.Errorf("cannot create default gcs client: %w", err) } client = c } @@ -82,7 +82,7 @@ func (fs *FS) ListParts() ([]common.Part, error) { Prefix: dir, } if err := q.SetAttrSelection(selectAttrs); err != nil { - return nil, fmt.Errorf("error in SetAttrSelection: %s", err) + return nil, fmt.Errorf("error in SetAttrSelection: %w", err) } it := fs.bkt.Objects(ctx, q) var parts []common.Part @@ -92,7 +92,7 @@ func (fs *FS) ListParts() ([]common.Part, error) { return parts, nil } if err != nil { - return nil, fmt.Errorf("error when iterating objects at %q: %s", dir, err) + return nil, fmt.Errorf("error when iterating objects at %q: %w", dir, err) } file := attr.Name if !strings.HasPrefix(file, dir) { @@ -116,7 +116,7 @@ func (fs *FS) DeletePart(p common.Part) error { o := fs.object(p) ctx := context.Background() if err := o.Delete(ctx); err != nil { - return fmt.Errorf("cannot delete %q at %s (remote path %q): %s", p.Path, fs, o.ObjectName(), err) + return fmt.Errorf("cannot delete %q at %s (remote path %q): %w", p.Path, fs, o.ObjectName(), err) } return nil } @@ -140,7 +140,7 @@ func (fs *FS) CopyPart(srcFS common.OriginFS, p common.Part) error { ctx := context.Background() attr, err := copier.Run(ctx) if err != nil { - return fmt.Errorf("cannot copy %q from %s to %s: %s", p.Path, src, fs, err) + return fmt.Errorf("cannot copy %q from %s to %s: %w", p.Path, src, fs, err) } if uint64(attr.Size) != p.Size { return fmt.Errorf("unexpected %q size after copying from %s to %s; got %d bytes; want %d bytes", p.Path, src, fs, attr.Size, p.Size) @@ -154,14 +154,14 @@ func (fs *FS) DownloadPart(p common.Part, w io.Writer) error { ctx := context.Background() r, err := o.NewReader(ctx) if err != nil { - return fmt.Errorf("cannot open reader for %q at %s (remote path %q): %s", p.Path, fs, o.ObjectName(), err) + return fmt.Errorf("cannot open reader for %q at %s (remote path %q): %w", p.Path, fs, o.ObjectName(), err) } n, err := io.Copy(w, r) if err1 := r.Close(); err1 != nil && err == nil { err = err1 } if err != nil { - return fmt.Errorf("cannot download %q from at %s (remote path %q): %s", p.Path, fs, o.ObjectName(), err) + return fmt.Errorf("cannot download %q from at %s (remote path %q): %w", p.Path, fs, o.ObjectName(), err) } if uint64(n) != p.Size { return fmt.Errorf("wrong data size downloaded from %q at %s; got %d bytes; want %d bytes", p.Path, fs, n, p.Size) @@ -179,7 +179,7 @@ func (fs *FS) UploadPart(p common.Part, r io.Reader) error { err = err1 } if err != nil { - return fmt.Errorf("cannot upload data to %q at %s (remote path %q): %s", p.Path, fs, o.ObjectName(), err) + return fmt.Errorf("cannot upload data to %q at %s (remote path %q): %w", p.Path, fs, o.ObjectName(), err) } if uint64(n) != p.Size { return fmt.Errorf("wrong data size uploaded to %q at %s; got %d bytes; want %d bytes", p.Path, fs, n, p.Size) @@ -201,7 +201,7 @@ func (fs *FS) DeleteFile(filePath string) error { ctx := context.Background() if err := o.Delete(ctx); err != nil { if err != storage.ErrObjectNotExist { - return fmt.Errorf("cannot delete %q at %s (remote path %q): %s", filePath, fs, o.ObjectName(), err) + return fmt.Errorf("cannot delete %q at %s (remote path %q): %w", filePath, fs, o.ObjectName(), err) } } return nil @@ -218,14 +218,14 @@ func (fs *FS) CreateFile(filePath string, data []byte) error { n, err := w.Write(data) if err != nil { _ = w.Close() - return fmt.Errorf("cannot upload %d bytes to %q at %s (remote path %q): %s", len(data), filePath, fs, o.ObjectName(), err) + return fmt.Errorf("cannot upload %d bytes to %q at %s (remote path %q): %w", len(data), filePath, fs, o.ObjectName(), err) } if n != len(data) { _ = w.Close() return fmt.Errorf("wrong data size uploaded to %q at %s (remote path %q); got %d bytes; want %d bytes", filePath, fs, o.ObjectName(), n, len(data)) } if err := w.Close(); err != nil { - return fmt.Errorf("cannot close %q at %s (remote path %q): %s", filePath, fs, o.ObjectName(), err) + return fmt.Errorf("cannot close %q at %s (remote path %q): %w", filePath, fs, o.ObjectName(), err) } return nil } @@ -240,7 +240,7 @@ func (fs *FS) HasFile(filePath string) (bool, error) { if err == storage.ErrObjectNotExist { return false, nil } - return false, fmt.Errorf("unexpected error when obtaining attributes for %q at %s (remote path %q): %s", filePath, fs, o.ObjectName(), err) + return false, fmt.Errorf("unexpected error when obtaining attributes for %q at %s (remote path %q): %w", filePath, fs, o.ObjectName(), err) } return true, nil } diff --git a/lib/backup/s3remote/s3.go b/lib/backup/s3remote/s3.go index d66e42cc1..9db96c9c9 100644 --- a/lib/backup/s3remote/s3.go +++ b/lib/backup/s3remote/s3.go @@ -66,7 +66,7 @@ func (fs *FS) Init() error { } sess, err := session.NewSessionWithOptions(opts) if err != nil { - return fmt.Errorf("cannot create S3 session: %s", err) + return fmt.Errorf("cannot create S3 session: %w", err) } if len(fs.CustomEndpoint) > 0 { @@ -81,7 +81,7 @@ func (fs *FS) Init() error { ctx := context.Background() region, err := s3manager.GetBucketRegion(ctx, sess, fs.Bucket, "") if err != nil { - return fmt.Errorf("cannot determine region for bucket %q: %s", fs.Bucket, err) + return fmt.Errorf("cannot determine region for bucket %q: %w", fs.Bucket, err) } sess.Config.WithRegion(region) logger.Infof("bucket %q is stored at region %q; switching to this region", fs.Bucket, region) @@ -133,7 +133,7 @@ func (fs *FS) ListParts() ([]common.Part, error) { err = errOuter } if err != nil { - return nil, fmt.Errorf("error when listing s3 objects inside dir %q: %s", dir, err) + return nil, fmt.Errorf("error when listing s3 objects inside dir %q: %w", dir, err) } return parts, nil } @@ -147,7 +147,7 @@ func (fs *FS) DeletePart(p common.Part) error { } _, err := fs.s3.DeleteObject(input) if err != nil { - return fmt.Errorf("cannot delete %q at %s (remote path %q): %s", p.Path, fs, path, err) + return fmt.Errorf("cannot delete %q at %s (remote path %q): %w", p.Path, fs, path, err) } return nil } @@ -175,7 +175,7 @@ func (fs *FS) CopyPart(srcFS common.OriginFS, p common.Part) error { } _, err := fs.s3.CopyObject(input) if err != nil { - return fmt.Errorf("cannot copy %q from %s to %s (copySource %q): %s", p.Path, src, fs, copySource, err) + return fmt.Errorf("cannot copy %q from %s to %s (copySource %q): %w", p.Path, src, fs, copySource, err) } return nil } @@ -189,7 +189,7 @@ func (fs *FS) DownloadPart(p common.Part, w io.Writer) error { } o, err := fs.s3.GetObject(input) if err != nil { - return fmt.Errorf("cannot open %q at %s (remote path %q): %s", p.Path, fs, path, err) + return fmt.Errorf("cannot open %q at %s (remote path %q): %w", p.Path, fs, path, err) } r := o.Body n, err := io.Copy(w, r) @@ -197,7 +197,7 @@ func (fs *FS) DownloadPart(p common.Part, w io.Writer) error { err = err1 } if err != nil { - return fmt.Errorf("cannot download %q from at %s (remote path %q): %s", p.Path, fs, path, err) + return fmt.Errorf("cannot download %q from at %s (remote path %q): %w", p.Path, fs, path, err) } if uint64(n) != p.Size { return fmt.Errorf("wrong data size downloaded from %q at %s; got %d bytes; want %d bytes", p.Path, fs, n, p.Size) @@ -218,7 +218,7 @@ func (fs *FS) UploadPart(p common.Part, r io.Reader) error { } _, err := fs.uploader.Upload(input) if err != nil { - return fmt.Errorf("cannot upoad data to %q at %s (remote path %q): %s", p.Path, fs, path, err) + return fmt.Errorf("cannot upoad data to %q at %s (remote path %q): %w", p.Path, fs, path, err) } if uint64(sr.size) != p.Size { return fmt.Errorf("wrong data size uploaded to %q at %s; got %d bytes; want %d bytes", p.Path, fs, sr.size, p.Size) @@ -249,7 +249,7 @@ func (fs *FS) DeleteFile(filePath string) error { Key: aws.String(path), } if _, err := fs.s3.DeleteObject(input); err != nil { - return fmt.Errorf("cannot delete %q at %s (remote path %q): %s", filePath, fs, path, err) + return fmt.Errorf("cannot delete %q at %s (remote path %q): %w", filePath, fs, path, err) } return nil } @@ -269,7 +269,7 @@ func (fs *FS) CreateFile(filePath string, data []byte) error { } _, err := fs.uploader.Upload(input) if err != nil { - return fmt.Errorf("cannot upoad data to %q at %s (remote path %q): %s", filePath, fs, path, err) + return fmt.Errorf("cannot upoad data to %q at %s (remote path %q): %w", filePath, fs, path, err) } l := int64(len(data)) if sr.size != l { @@ -290,10 +290,10 @@ func (fs *FS) HasFile(filePath string) (bool, error) { if ae, ok := err.(awserr.Error); ok && ae.Code() == s3.ErrCodeNoSuchKey { return false, nil } - return false, fmt.Errorf("cannot open %q at %s (remote path %q): %s", filePath, fs, path, err) + return false, fmt.Errorf("cannot open %q at %s (remote path %q): %w", filePath, fs, path, err) } if err := o.Body.Close(); err != nil { - return false, fmt.Errorf("cannot close %q at %s (remote path %q): %s", filePath, fs, path, err) + return false, fmt.Errorf("cannot close %q at %s (remote path %q): %w", filePath, fs, path, err) } return true, nil } diff --git a/lib/encoding/encoding.go b/lib/encoding/encoding.go index 072bb487f..807f8eb17 100644 --- a/lib/encoding/encoding.go +++ b/lib/encoding/encoding.go @@ -76,7 +76,7 @@ func MarshalTimestamps(dst []byte, timestamps []int64, precisionBits uint8) (res func UnmarshalTimestamps(dst []int64, src []byte, mt MarshalType, firstTimestamp int64, itemsCount int) ([]int64, error) { dst, err := unmarshalInt64Array(dst, src, mt, firstTimestamp, itemsCount) if err != nil { - return nil, fmt.Errorf("cannot unmarshal %d timestamps from len(src)=%d bytes: %s", itemsCount, len(src), err) + return nil, fmt.Errorf("cannot unmarshal %d timestamps from len(src)=%d bytes: %w", itemsCount, len(src), err) } return dst, nil } @@ -97,7 +97,7 @@ func MarshalValues(dst []byte, values []int64, precisionBits uint8) (result []by func UnmarshalValues(dst []int64, src []byte, mt MarshalType, firstValue int64, itemsCount int) ([]int64, error) { dst, err := unmarshalInt64Array(dst, src, mt, firstValue, itemsCount) if err != nil { - return nil, fmt.Errorf("cannot unmarshal %d values from len(src)=%d bytes: %s", itemsCount, len(src), err) + return nil, fmt.Errorf("cannot unmarshal %d values from len(src)=%d bytes: %w", itemsCount, len(src), err) } return dst, nil } @@ -166,36 +166,36 @@ func unmarshalInt64Array(dst []int64, src []byte, mt MarshalType, firstValue int bb := bbPool.Get() bb.B, err = DecompressZSTD(bb.B[:0], src) if err != nil { - return nil, fmt.Errorf("cannot decompress zstd data of size %d: %s; src_zstd=%X", len(src), err, src) + return nil, fmt.Errorf("cannot decompress zstd data of size %d: %w; src_zstd=%X", len(src), err, src) } dst, err = unmarshalInt64NearestDelta(dst, bb.B, firstValue, itemsCount) bbPool.Put(bb) if err != nil { - return nil, fmt.Errorf("cannot unmarshal nearest delta data after zstd decompression: %s; src_zstd=%X", err, src) + return nil, fmt.Errorf("cannot unmarshal nearest delta data after zstd decompression: %w; src_zstd=%X", err, src) } return dst, nil case MarshalTypeZSTDNearestDelta2: bb := bbPool.Get() bb.B, err = DecompressZSTD(bb.B[:0], src) if err != nil { - return nil, fmt.Errorf("cannot decompress zstd data of size %d: %s; src_zstd=%X", len(src), err, src) + return nil, fmt.Errorf("cannot decompress zstd data of size %d: %w; src_zstd=%X", len(src), err, src) } dst, err = unmarshalInt64NearestDelta2(dst, bb.B, firstValue, itemsCount) bbPool.Put(bb) if err != nil { - return nil, fmt.Errorf("cannot unmarshal nearest delta2 data after zstd decompression: %s; src_zstd=%X", err, src) + return nil, fmt.Errorf("cannot unmarshal nearest delta2 data after zstd decompression: %w; src_zstd=%X", err, src) } return dst, nil case MarshalTypeNearestDelta: dst, err = unmarshalInt64NearestDelta(dst, src, firstValue, itemsCount) if err != nil { - return nil, fmt.Errorf("cannot unmarshal nearest delta data: %s", err) + return nil, fmt.Errorf("cannot unmarshal nearest delta data: %w", err) } return dst, nil case MarshalTypeNearestDelta2: dst, err = unmarshalInt64NearestDelta2(dst, src, firstValue, itemsCount) if err != nil { - return nil, fmt.Errorf("cannot unmarshal nearest delta2 data: %s", err) + return nil, fmt.Errorf("cannot unmarshal nearest delta2 data: %w", err) } return dst, nil case MarshalTypeConst: @@ -219,7 +219,7 @@ func unmarshalInt64Array(dst []int64, src []byte, mt MarshalType, firstValue int v := firstValue tail, d, err := UnmarshalVarInt64(src) if err != nil { - return nil, fmt.Errorf("cannot unmarshal delta value for delta const: %s", err) + return nil, fmt.Errorf("cannot unmarshal delta value for delta const: %w", err) } if len(tail) > 0 { return nil, fmt.Errorf("unexpected trailing data after delta const (d=%d): %d bytes", d, len(tail)) diff --git a/lib/encoding/encoding_timing_test.go b/lib/encoding/encoding_timing_test.go index 46c5b7603..7b100e2d2 100644 --- a/lib/encoding/encoding_timing_test.go +++ b/lib/encoding/encoding_timing_test.go @@ -34,7 +34,7 @@ func BenchmarkUnmarshalGaugeArray(b *testing.B) { for pb.Next() { dst, err = unmarshalInt64Array(dst[:0], benchMarshaledGaugeArray, MarshalTypeZSTDNearestDelta, benchGaugeArray[0], len(benchGaugeArray)) if err != nil { - panic(fmt.Errorf("cannot unmarshal gauge array: %s", err)) + panic(fmt.Errorf("cannot unmarshal gauge array: %w", err)) } atomic.AddUint64(&Sink, uint64(len(dst))) } @@ -81,7 +81,7 @@ func BenchmarkUnmarshalDeltaConstArray(b *testing.B) { for pb.Next() { dst, err = unmarshalInt64Array(dst[:0], benchMarshaledDeltaConstArray, MarshalTypeDeltaConst, benchDeltaConstArray[0], len(benchDeltaConstArray)) if err != nil { - panic(fmt.Errorf("cannot unmarshal delta const array: %s", err)) + panic(fmt.Errorf("cannot unmarshal delta const array: %w", err)) } atomic.AddUint64(&Sink, uint64(len(dst))) } @@ -128,7 +128,7 @@ func BenchmarkUnmarshalConstArray(b *testing.B) { for pb.Next() { dst, err = unmarshalInt64Array(dst[:0], benchMarshaledConstArray, MarshalTypeConst, benchConstArray[0], len(benchConstArray)) if err != nil { - panic(fmt.Errorf("cannot unmarshal const array: %s", err)) + panic(fmt.Errorf("cannot unmarshal const array: %w", err)) } atomic.AddUint64(&Sink, uint64(len(dst))) } @@ -173,7 +173,7 @@ func BenchmarkUnmarshalZeroConstArray(b *testing.B) { for pb.Next() { dst, err = unmarshalInt64Array(dst[:0], benchMarshaledZeroConstArray, MarshalTypeConst, benchZeroConstArray[0], len(benchZeroConstArray)) if err != nil { - panic(fmt.Errorf("cannot unmarshal zero const array: %s", err)) + panic(fmt.Errorf("cannot unmarshal zero const array: %w", err)) } atomic.AddUint64(&Sink, uint64(len(dst))) } @@ -212,7 +212,7 @@ func BenchmarkUnmarshalInt64Array(b *testing.B) { for pb.Next() { dst, err = unmarshalInt64Array(dst[:0], benchMarshaledInt64Array, benchMarshalType, benchInt64Array[0], len(benchInt64Array)) if err != nil { - panic(fmt.Errorf("cannot unmarshal int64 array: %s", err)) + panic(fmt.Errorf("cannot unmarshal int64 array: %w", err)) } atomic.AddUint64(&Sink, uint64(len(dst))) } diff --git a/lib/encoding/int.go b/lib/encoding/int.go index bdbe5efe1..28f16badf 100644 --- a/lib/encoding/int.go +++ b/lib/encoding/int.go @@ -229,7 +229,7 @@ func MarshalBytes(dst, b []byte) []byte { func UnmarshalBytes(src []byte) ([]byte, []byte, error) { tail, n, err := UnmarshalVarUint64(src) if err != nil { - return nil, nil, fmt.Errorf("cannot unmarshal string size: %d", err) + return nil, nil, fmt.Errorf("cannot unmarshal string size: %w", err) } src = tail if uint64(len(src)) < n { diff --git a/lib/encoding/int_timing_test.go b/lib/encoding/int_timing_test.go index 60a632e08..6adc40ca0 100644 --- a/lib/encoding/int_timing_test.go +++ b/lib/encoding/int_timing_test.go @@ -135,7 +135,7 @@ func benchmarkUnmarshalVarInt64s(b *testing.B, maxValue int64) { for pb.Next() { tail, err := UnmarshalVarInt64s(dst, data) if err != nil { - panic(fmt.Errorf("unexpected error: %s", err)) + panic(fmt.Errorf("unexpected error: %w", err)) } if len(tail) > 0 { panic(fmt.Errorf("unexpected non-empty tail with len=%d: %X", len(tail), tail)) diff --git a/lib/encoding/nearest_delta.go b/lib/encoding/nearest_delta.go index 5ff7f9b61..13d78cd09 100644 --- a/lib/encoding/nearest_delta.go +++ b/lib/encoding/nearest_delta.go @@ -60,7 +60,7 @@ func unmarshalInt64NearestDelta(dst []int64, src []byte, firstValue int64, items tail, err := UnmarshalVarInt64s(is.A, src) if err != nil { - return nil, fmt.Errorf("cannot unmarshal nearest delta from %d bytes; src=%X: %s", len(src), src, err) + return nil, fmt.Errorf("cannot unmarshal nearest delta from %d bytes; src=%X: %w", len(src), src, err) } if len(tail) > 0 { return nil, fmt.Errorf("unexpected tail left after unmarshaling %d items from %d bytes; tail size=%d; src=%X; tail=%X", itemsCount, len(src), len(tail), src, tail) diff --git a/lib/encoding/nearest_delta2.go b/lib/encoding/nearest_delta2.go index 33d355e55..1011b578e 100644 --- a/lib/encoding/nearest_delta2.go +++ b/lib/encoding/nearest_delta2.go @@ -63,7 +63,7 @@ func unmarshalInt64NearestDelta2(dst []int64, src []byte, firstValue int64, item tail, err := UnmarshalVarInt64s(is.A, src) if err != nil { - return nil, fmt.Errorf("cannot unmarshal nearest delta from %d bytes; src=%X: %s", len(src), src, err) + return nil, fmt.Errorf("cannot unmarshal nearest delta from %d bytes; src=%X: %w", len(src), src, err) } if len(tail) > 0 { return nil, fmt.Errorf("unexpected tail left after unmarshaling %d items from %d bytes; tail size=%d; src=%X; tail=%X", itemsCount, len(src), len(tail), src, tail) diff --git a/lib/encoding/nearest_delta2_timing_test.go b/lib/encoding/nearest_delta2_timing_test.go index 322cb873e..ee58e4c83 100644 --- a/lib/encoding/nearest_delta2_timing_test.go +++ b/lib/encoding/nearest_delta2_timing_test.go @@ -35,7 +35,7 @@ func BenchmarkUnmarshalInt64NearestDelta2(b *testing.B) { for pb.Next() { dst, err = unmarshalInt64NearestDelta2(dst[:0], benchInt64NearestDelta2Data, 0, len(benchInt64Array)) if err != nil { - panic(fmt.Errorf("unexpected error: %s", err)) + panic(fmt.Errorf("unexpected error: %w", err)) } atomic.AddUint64(&Sink, uint64(len(dst))) } diff --git a/lib/encoding/nearest_delta_timing_test.go b/lib/encoding/nearest_delta_timing_test.go index d2704b099..3eb0c3126 100644 --- a/lib/encoding/nearest_delta_timing_test.go +++ b/lib/encoding/nearest_delta_timing_test.go @@ -35,7 +35,7 @@ func BenchmarkUnmarshalInt64NearestDelta(b *testing.B) { for pb.Next() { dst, err = unmarshalInt64NearestDelta(dst[:0], benchInt64NearestDeltaData, 0, len(benchInt64Array)) if err != nil { - panic(fmt.Errorf("unexpected error: %s", err)) + panic(fmt.Errorf("unexpected error: %w", err)) } atomic.AddUint64(&Sink, uint64(len(dst))) } diff --git a/lib/filestream/filestream.go b/lib/filestream/filestream.go index 0633cad95..f4c0d790f 100644 --- a/lib/filestream/filestream.go +++ b/lib/filestream/filestream.go @@ -63,7 +63,7 @@ func OpenReaderAt(path string, offset int64, nocache bool) (*Reader, error) { n, err := r.f.Seek(offset, io.SeekStart) if err != nil { r.MustClose() - return nil, fmt.Errorf("cannot seek to offset=%d for %q: %s", offset, path, err) + return nil, fmt.Errorf("cannot seek to offset=%d for %q: %w", offset, path, err) } if n != offset { r.MustClose() @@ -78,7 +78,7 @@ func OpenReaderAt(path string, offset int64, nocache bool) (*Reader, error) { func Open(path string, nocache bool) (*Reader, error) { f, err := os.Open(path) if err != nil { - return nil, fmt.Errorf("cannot open file %q: %s", path, err) + return nil, fmt.Errorf("cannot open file %q: %w", path, err) } r := &Reader{ f: f, @@ -124,7 +124,7 @@ func (r *Reader) Read(p []byte) (int, error) { return n, err } if err := r.st.adviseDontNeed(n, false); err != nil { - return n, fmt.Errorf("advise error for %q: %s", r.f.Name(), err) + return n, fmt.Errorf("advise error for %q: %w", r.f.Name(), err) } return n, nil } @@ -172,12 +172,12 @@ type Writer struct { func OpenWriterAt(path string, offset int64, nocache bool) (*Writer, error) { f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0600) if err != nil { - return nil, fmt.Errorf("cannot open %q: %s", path, err) + return nil, fmt.Errorf("cannot open %q: %w", path, err) } n, err := f.Seek(offset, io.SeekStart) if err != nil { _ = f.Close() - return nil, fmt.Errorf("cannot seek to offset=%d in %q: %s", offset, path, err) + return nil, fmt.Errorf("cannot seek to offset=%d in %q: %w", offset, path, err) } if n != offset { _ = f.Close() @@ -192,7 +192,7 @@ func OpenWriterAt(path string, offset int64, nocache bool) (*Writer, error) { func Create(path string, nocache bool) (*Writer, error) { f, err := os.Create(path) if err != nil { - return nil, fmt.Errorf("cannot create file %q: %s", path, err) + return nil, fmt.Errorf("cannot create file %q: %w", path, err) } return newWriter(f, nocache), nil } @@ -248,7 +248,7 @@ func (w *Writer) Write(p []byte) (int, error) { return n, err } if err := w.st.adviseDontNeed(n, true); err != nil { - return n, fmt.Errorf("advise error for %q: %s", w.f.Name(), err) + return n, fmt.Errorf("advise error for %q: %w", w.f.Name(), err) } return n, nil } diff --git a/lib/filestream/filestream_freebsd.go b/lib/filestream/filestream_freebsd.go index 332d7dda5..32a3c2aa9 100644 --- a/lib/filestream/filestream_freebsd.go +++ b/lib/filestream/filestream_freebsd.go @@ -18,11 +18,11 @@ func (st *streamTracker) adviseDontNeed(n int, fdatasync bool) error { blockSize := st.length - (st.length % dontNeedBlockSize) if fdatasync { if err := unixFdatasync(int(st.fd)); err != nil { - return fmt.Errorf("unix.Fdatasync error: %s", err) + return fmt.Errorf("unix.Fdatasync error: %w", err) } } if err := unix.Fadvise(int(st.fd), int64(st.offset), int64(blockSize), unix.FADV_DONTNEED); err != nil { - return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, %d, %d) error: %s", st.offset, blockSize, err) + return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, %d, %d) error: %w", st.offset, blockSize, err) } st.offset += blockSize st.length -= blockSize @@ -35,7 +35,7 @@ func (st *streamTracker) close() error { } // Advise the whole file as it shouldn't be cached. if err := unix.Fadvise(int(st.fd), 0, 0, unix.FADV_DONTNEED); err != nil { - return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, 0, 0) error: %s", err) + return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, 0, 0) error: %w", err) } return nil } diff --git a/lib/filestream/filestream_linux.go b/lib/filestream/filestream_linux.go index 07f2bb4fe..bce316029 100644 --- a/lib/filestream/filestream_linux.go +++ b/lib/filestream/filestream_linux.go @@ -16,11 +16,11 @@ func (st *streamTracker) adviseDontNeed(n int, fdatasync bool) error { blockSize := st.length - (st.length % dontNeedBlockSize) if fdatasync { if err := unix.Fdatasync(int(st.fd)); err != nil { - return fmt.Errorf("unix.Fdatasync error: %s", err) + return fmt.Errorf("unix.Fdatasync error: %w", err) } } if err := unix.Fadvise(int(st.fd), int64(st.offset), int64(blockSize), unix.FADV_DONTNEED); err != nil { - return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, %d, %d) error: %s", st.offset, blockSize, err) + return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, %d, %d) error: %w", st.offset, blockSize, err) } st.offset += blockSize st.length -= blockSize @@ -33,7 +33,7 @@ func (st *streamTracker) close() error { } // Advise the whole file as it shouldn't be cached. if err := unix.Fadvise(int(st.fd), 0, 0, unix.FADV_DONTNEED); err != nil { - return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, 0, 0) error: %s", err) + return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, 0, 0) error: %w", err) } return nil } diff --git a/lib/fs/fs.go b/lib/fs/fs.go index a911e9c1c..bedb813d4 100644 --- a/lib/fs/fs.go +++ b/lib/fs/fs.go @@ -48,12 +48,12 @@ func WriteFileAtomically(path string, data []byte) error { tmpPath := fmt.Sprintf("%s.tmp.%d", path, n) f, err := filestream.Create(tmpPath, false) if err != nil { - return fmt.Errorf("cannot create file %q: %s", tmpPath, err) + return fmt.Errorf("cannot create file %q: %w", tmpPath, err) } if _, err := f.Write(data); err != nil { f.MustClose() MustRemoveAll(tmpPath) - return fmt.Errorf("cannot write %d bytes to file %q: %s", len(data), tmpPath, err) + return fmt.Errorf("cannot write %d bytes to file %q: %w", len(data), tmpPath, err) } // Sync and close the file. @@ -63,14 +63,14 @@ func WriteFileAtomically(path string, data []byte) error { if err := os.Rename(tmpPath, path); err != nil { // do not call MustRemoveAll(tmpPath) here, so the user could inspect // the file contents during investigating the issue. - return fmt.Errorf("cannot move %q to %q: %s", tmpPath, path, err) + return fmt.Errorf("cannot move %q to %q: %w", tmpPath, path, err) } // Sync the containing directory, so the file is guaranteed to appear in the directory. // See https://www.quora.com/When-should-you-fsync-the-containing-directory-in-addition-to-the-file-itself absPath, err := filepath.Abs(path) if err != nil { - return fmt.Errorf("cannot obtain absolute path to %q: %s", path, err) + return fmt.Errorf("cannot obtain absolute path to %q: %w", path, err) } parentDirPath := filepath.Dir(absPath) MustSyncPath(parentDirPath) @@ -204,12 +204,12 @@ func MustRemoveAllWithDoneCallback(path string, done func()) { // HardLinkFiles makes hard links for all the files from srcDir in dstDir. func HardLinkFiles(srcDir, dstDir string) error { if err := mkdirSync(dstDir); err != nil { - return fmt.Errorf("cannot create dstDir=%q: %s", dstDir, err) + return fmt.Errorf("cannot create dstDir=%q: %w", dstDir, err) } d, err := os.Open(srcDir) if err != nil { - return fmt.Errorf("cannot open srcDir=%q: %s", srcDir, err) + return fmt.Errorf("cannot open srcDir=%q: %w", srcDir, err) } defer func() { if err := d.Close(); err != nil { @@ -219,7 +219,7 @@ func HardLinkFiles(srcDir, dstDir string) error { fis, err := d.Readdir(-1) if err != nil { - return fmt.Errorf("cannot read files in scrDir=%q: %s", srcDir, err) + return fmt.Errorf("cannot read files in scrDir=%q: %w", srcDir, err) } for _, fi := range fis { if IsDirOrSymlink(fi) { @@ -248,7 +248,7 @@ func SymlinkRelative(srcPath, dstPath string) error { baseDir := filepath.Dir(dstPath) srcPathRel, err := filepath.Rel(baseDir, srcPath) if err != nil { - return fmt.Errorf("cannot make relative path for srcPath=%q: %s", srcPath, err) + return fmt.Errorf("cannot make relative path for srcPath=%q: %w", srcPath, err) } return os.Symlink(srcPathRel, dstPath) } @@ -260,7 +260,7 @@ func ReadFullData(r io.Reader, data []byte) error { if err == io.EOF { return io.EOF } - return fmt.Errorf("cannot read %d bytes; read only %d bytes; error: %s", len(data), n, err) + return fmt.Errorf("cannot read %d bytes; read only %d bytes; error: %w", len(data), n, err) } if n != len(data) { logger.Panicf("BUG: io.ReadFull read only %d bytes; must read %d bytes", n, len(data)) @@ -288,10 +288,10 @@ func CreateFlockFile(dir string) (*os.File, error) { flockFile := dir + "/flock.lock" flockF, err := os.Create(flockFile) if err != nil { - return nil, fmt.Errorf("cannot create lock file %q: %s", flockFile, err) + return nil, fmt.Errorf("cannot create lock file %q: %w", flockFile, err) } if err := unix.Flock(int(flockF.Fd()), unix.LOCK_EX|unix.LOCK_NB); err != nil { - return nil, fmt.Errorf("cannot acquire lock on file %q: %s", flockFile, err) + return nil, fmt.Errorf("cannot acquire lock on file %q: %w", flockFile, err) } return flockF, nil } diff --git a/lib/fs/reader_at.go b/lib/fs/reader_at.go index 3cd00afb5..6a39865fb 100644 --- a/lib/fs/reader_at.go +++ b/lib/fs/reader_at.go @@ -154,7 +154,7 @@ func (r *ReaderAt) MustClose() { func OpenReaderAt(path string) (*ReaderAt, error) { f, err := os.Open(path) if err != nil { - return nil, fmt.Errorf("cannot open file %q for reader: %s", path, err) + return nil, fmt.Errorf("cannot open file %q for reader: %w", path, err) } var r ReaderAt r.f = f @@ -162,7 +162,7 @@ func OpenReaderAt(path string) (*ReaderAt, error) { if !*disableMmap { fi, err := f.Stat() if err != nil { - return nil, fmt.Errorf("error in stat: %s", err) + return nil, fmt.Errorf("error in stat: %w", err) } size := fi.Size() bm := &pageCacheBitmap{ @@ -178,7 +178,7 @@ func OpenReaderAt(path string) (*ReaderAt, error) { data, err := mmapFile(f, size) if err != nil { MustClose(f) - return nil, fmt.Errorf("cannot init reader for %q: %s", path, err) + return nil, fmt.Errorf("cannot init reader for %q: %w", path, err) } r.mmapData = data } @@ -228,7 +228,7 @@ func mmapFile(f *os.File, size int64) ([]byte, error) { } data, err := unix.Mmap(int(f.Fd()), 0, int(size), unix.PROT_READ, unix.MAP_SHARED) if err != nil { - return nil, fmt.Errorf("cannot mmap file with size %d: %s", size, err) + return nil, fmt.Errorf("cannot mmap file with size %d: %w", size, err) } return data[:sizeOrig], nil } diff --git a/lib/mergeset/block_header.go b/lib/mergeset/block_header.go index 67007796b..5404c2af6 100644 --- a/lib/mergeset/block_header.go +++ b/lib/mergeset/block_header.go @@ -61,7 +61,7 @@ func (bh *blockHeader) Unmarshal(src []byte) ([]byte, error) { // Unmarshal commonPrefix tail, cp, err := encoding.UnmarshalBytes(src) if err != nil { - return tail, fmt.Errorf("cannot unmarshal commonPrefix: %s", err) + return tail, fmt.Errorf("cannot unmarshal commonPrefix: %w", err) } bh.commonPrefix = append(bh.commonPrefix[:0], cp...) src = tail @@ -69,7 +69,7 @@ func (bh *blockHeader) Unmarshal(src []byte) ([]byte, error) { // Unmarshal firstItem tail, fi, err := encoding.UnmarshalBytes(src) if err != nil { - return tail, fmt.Errorf("cannot unmarshal firstItem: %s", err) + return tail, fmt.Errorf("cannot unmarshal firstItem: %w", err) } bh.firstItem = append(bh.firstItem[:0], fi...) src = tail @@ -81,7 +81,7 @@ func (bh *blockHeader) Unmarshal(src []byte) ([]byte, error) { bh.marshalType = marshalType(src[0]) src = src[1:] if err := checkMarshalType(bh.marshalType); err != nil { - return src, fmt.Errorf("unexpected marshalType: %s", err) + return src, fmt.Errorf("unexpected marshalType: %w", err) } // Unmarshal itemsCount @@ -148,7 +148,7 @@ func unmarshalBlockHeaders(dst []blockHeader, src []byte, blockHeadersCount int) for i := 0; i < blockHeadersCount; i++ { tail, err := dst[dstLen+i].Unmarshal(src) if err != nil { - return dst, fmt.Errorf("cannot unmarshal block header: %s", err) + return dst, fmt.Errorf("cannot unmarshal block header: %w", err) } src = tail } diff --git a/lib/mergeset/block_stream_reader.go b/lib/mergeset/block_stream_reader.go index 64faa9f47..0949c507f 100644 --- a/lib/mergeset/block_stream_reader.go +++ b/lib/mergeset/block_stream_reader.go @@ -131,31 +131,31 @@ func (bsr *blockStreamReader) InitFromFilePart(path string) error { path = filepath.Clean(path) if err := bsr.ph.ParseFromPath(path); err != nil { - return fmt.Errorf("cannot parse partHeader data from %q: %s", path, err) + return fmt.Errorf("cannot parse partHeader data from %q: %w", path, err) } metaindexPath := path + "/metaindex.bin" metaindexFile, err := filestream.Open(metaindexPath, true) if err != nil { - return fmt.Errorf("cannot open metaindex file in stream mode: %s", err) + return fmt.Errorf("cannot open metaindex file in stream mode: %w", err) } bsr.mrs, err = unmarshalMetaindexRows(bsr.mrs[:0], metaindexFile) metaindexFile.MustClose() if err != nil { - return fmt.Errorf("cannot unmarshal metaindex rows from file %q: %s", metaindexPath, err) + return fmt.Errorf("cannot unmarshal metaindex rows from file %q: %w", metaindexPath, err) } indexPath := path + "/index.bin" indexFile, err := filestream.Open(indexPath, true) if err != nil { - return fmt.Errorf("cannot open index file in stream mode: %s", err) + return fmt.Errorf("cannot open index file in stream mode: %w", err) } itemsPath := path + "/items.bin" itemsFile, err := filestream.Open(itemsPath, true) if err != nil { indexFile.MustClose() - return fmt.Errorf("cannot open items file in stream mode: %s", err) + return fmt.Errorf("cannot open items file in stream mode: %w", err) } lensPath := path + "/lens.bin" @@ -163,7 +163,7 @@ func (bsr *blockStreamReader) InitFromFilePart(path string) error { if err != nil { indexFile.MustClose() itemsFile.MustClose() - return fmt.Errorf("cannot open lens file in stream mode: %s", err) + return fmt.Errorf("cannot open lens file in stream mode: %w", err) } bsr.path = path @@ -200,7 +200,7 @@ func (bsr *blockStreamReader) Next() bool { err = fmt.Errorf("unexpected last item; got %X; want %X", lastItem, bsr.ph.lastItem) } } else { - err = fmt.Errorf("cannot read the next index block: %s", err) + err = fmt.Errorf("cannot read the next index block: %w", err) } bsr.err = err return false @@ -212,18 +212,18 @@ func (bsr *blockStreamReader) Next() bool { bsr.sb.itemsData = bytesutil.Resize(bsr.sb.itemsData, int(bsr.bh.itemsBlockSize)) if err := fs.ReadFullData(bsr.itemsReader, bsr.sb.itemsData); err != nil { - bsr.err = fmt.Errorf("cannot read compressed items block with size %d: %s", bsr.bh.itemsBlockSize, err) + bsr.err = fmt.Errorf("cannot read compressed items block with size %d: %w", bsr.bh.itemsBlockSize, err) return false } bsr.sb.lensData = bytesutil.Resize(bsr.sb.lensData, int(bsr.bh.lensBlockSize)) if err := fs.ReadFullData(bsr.lensReader, bsr.sb.lensData); err != nil { - bsr.err = fmt.Errorf("cannot read compressed lens block with size %d: %s", bsr.bh.lensBlockSize, err) + bsr.err = fmt.Errorf("cannot read compressed lens block with size %d: %w", bsr.bh.lensBlockSize, err) return false } if err := bsr.Block.UnmarshalData(&bsr.sb, bsr.bh.firstItem, bsr.bh.commonPrefix, bsr.bh.itemsCount, bsr.bh.marshalType); err != nil { - bsr.err = fmt.Errorf("cannot unmarshal inmemoryBlock from storageBlock with firstItem=%X, commonPrefix=%X, itemsCount=%d, marshalType=%d: %s", + bsr.err = fmt.Errorf("cannot unmarshal inmemoryBlock from storageBlock with firstItem=%X, commonPrefix=%X, itemsCount=%d, marshalType=%d: %w", bsr.bh.firstItem, bsr.bh.commonPrefix, bsr.bh.itemsCount, bsr.bh.marshalType, err) return false } @@ -260,14 +260,14 @@ func (bsr *blockStreamReader) readNextBHS() error { // Read compressed index block. bsr.packedBuf = bytesutil.Resize(bsr.packedBuf, int(mr.indexBlockSize)) if err := fs.ReadFullData(bsr.indexReader, bsr.packedBuf); err != nil { - return fmt.Errorf("cannot read compressed index block with size %d: %s", mr.indexBlockSize, err) + return fmt.Errorf("cannot read compressed index block with size %d: %w", mr.indexBlockSize, err) } // Unpack the compressed index block. var err error bsr.unpackedBuf, err = encoding.DecompressZSTD(bsr.unpackedBuf[:0], bsr.packedBuf) if err != nil { - return fmt.Errorf("cannot decompress index block with size %d: %s", mr.indexBlockSize, err) + return fmt.Errorf("cannot decompress index block with size %d: %w", mr.indexBlockSize, err) } // Unmarshal the unpacked index block into bsr.bhs. @@ -280,7 +280,7 @@ func (bsr *blockStreamReader) readNextBHS() error { for i := 0; i < int(mr.blockHeadersCount); i++ { tail, err := bsr.bhs[i].Unmarshal(b) if err != nil { - return fmt.Errorf("cannot unmarshal blockHeader #%d in the index block #%d: %s", len(bsr.bhs), bsr.mrIdx, err) + return fmt.Errorf("cannot unmarshal blockHeader #%d in the index block #%d: %w", len(bsr.bhs), bsr.mrIdx, err) } b = tail } diff --git a/lib/mergeset/block_stream_writer.go b/lib/mergeset/block_stream_writer.go index 0dca15ffd..205da28f6 100644 --- a/lib/mergeset/block_stream_writer.go +++ b/lib/mergeset/block_stream_writer.go @@ -84,7 +84,7 @@ func (bsw *blockStreamWriter) InitFromFilePart(path string, nocache bool, compre // Create the directory if err := fs.MkdirAllFailIfExist(path); err != nil { - return fmt.Errorf("cannot create directory %q: %s", path, err) + return fmt.Errorf("cannot create directory %q: %w", path, err) } // Create part files in the directory. @@ -95,7 +95,7 @@ func (bsw *blockStreamWriter) InitFromFilePart(path string, nocache bool, compre metaindexFile, err := filestream.Create(metaindexPath, false) if err != nil { fs.MustRemoveAll(path) - return fmt.Errorf("cannot create metaindex file: %s", err) + return fmt.Errorf("cannot create metaindex file: %w", err) } indexPath := path + "/index.bin" @@ -103,7 +103,7 @@ func (bsw *blockStreamWriter) InitFromFilePart(path string, nocache bool, compre if err != nil { metaindexFile.MustClose() fs.MustRemoveAll(path) - return fmt.Errorf("cannot create index file: %s", err) + return fmt.Errorf("cannot create index file: %w", err) } itemsPath := path + "/items.bin" @@ -112,7 +112,7 @@ func (bsw *blockStreamWriter) InitFromFilePart(path string, nocache bool, compre metaindexFile.MustClose() indexFile.MustClose() fs.MustRemoveAll(path) - return fmt.Errorf("cannot create items file: %s", err) + return fmt.Errorf("cannot create items file: %w", err) } lensPath := path + "/lens.bin" @@ -122,7 +122,7 @@ func (bsw *blockStreamWriter) InitFromFilePart(path string, nocache bool, compre indexFile.MustClose() itemsFile.MustClose() fs.MustRemoveAll(path) - return fmt.Errorf("cannot create lens file: %s", err) + return fmt.Errorf("cannot create lens file: %w", err) } bsw.reset() diff --git a/lib/mergeset/encoding.go b/lib/mergeset/encoding.go index 851096f4f..3c9561e5e 100644 --- a/lib/mergeset/encoding.go +++ b/lib/mergeset/encoding.go @@ -267,7 +267,7 @@ func (ib *inmemoryBlock) UnmarshalData(sb *storageBlock, firstItem, commonPrefix switch mt { case marshalTypePlain: if err := ib.unmarshalDataPlain(sb, firstItem, itemsCount); err != nil { - return fmt.Errorf("cannot unmarshal plain data: %s", err) + return fmt.Errorf("cannot unmarshal plain data: %w", err) } if !ib.isSorted() { return fmt.Errorf("plain data block contains unsorted items; items:\n%s", ib.debugItemsString()) @@ -289,7 +289,7 @@ func (ib *inmemoryBlock) UnmarshalData(sb *storageBlock, firstItem, commonPrefix // Unmarshal lens data. bb.B, err = encoding.DecompressZSTD(bb.B[:0], sb.lensData) if err != nil { - return fmt.Errorf("cannot decompress lensData: %s", err) + return fmt.Errorf("cannot decompress lensData: %w", err) } lb := getLensBuffer(int(2 * itemsCount)) @@ -304,7 +304,7 @@ func (ib *inmemoryBlock) UnmarshalData(sb *storageBlock, firstItem, commonPrefix // Unmarshal prefixLens tail, err := encoding.UnmarshalVarUint64s(is.A, bb.B) if err != nil { - return fmt.Errorf("cannot unmarshal prefixLens from lensData: %s", err) + return fmt.Errorf("cannot unmarshal prefixLens from lensData: %w", err) } prefixLens[0] = 0 for i, xLen := range is.A { @@ -314,7 +314,7 @@ func (ib *inmemoryBlock) UnmarshalData(sb *storageBlock, firstItem, commonPrefix // Unmarshal lens tail, err = encoding.UnmarshalVarUint64s(is.A, tail) if err != nil { - return fmt.Errorf("cannot unmarshal lens from lensData: %s", err) + return fmt.Errorf("cannot unmarshal lens from lensData: %w", err) } if len(tail) > 0 { return fmt.Errorf("unexpected tail left unmarshaling %d lens; tail size=%d; contents=%X", itemsCount, len(tail), tail) @@ -331,7 +331,7 @@ func (ib *inmemoryBlock) UnmarshalData(sb *storageBlock, firstItem, commonPrefix // Unmarshal items data. bb.B, err = encoding.DecompressZSTD(bb.B[:0], sb.itemsData) if err != nil { - return fmt.Errorf("cannot decompress lensData: %s", err) + return fmt.Errorf("cannot decompress lensData: %w", err) } data := bytesutil.Resize(ib.data, maxInmemoryBlockSize) if n := int(itemsCount) - cap(ib.items); n > 0 { diff --git a/lib/mergeset/merge.go b/lib/mergeset/merge.go index 103612b25..afad1ad53 100644 --- a/lib/mergeset/merge.go +++ b/lib/mergeset/merge.go @@ -30,7 +30,7 @@ type PrepareBlockCallback func(data []byte, items [][]byte) ([]byte, [][]byte) func mergeBlockStreams(ph *partHeader, bsw *blockStreamWriter, bsrs []*blockStreamReader, prepareBlock PrepareBlockCallback, stopCh <-chan struct{}, itemsMerged *uint64) error { bsm := bsmPool.Get().(*blockStreamMerger) if err := bsm.Init(bsrs, prepareBlock); err != nil { - return fmt.Errorf("cannot initialize blockStreamMerger: %s", err) + return fmt.Errorf("cannot initialize blockStreamMerger: %w", err) } err := bsm.Merge(bsw, ph, stopCh, itemsMerged) bsm.reset() @@ -42,7 +42,7 @@ func mergeBlockStreams(ph *partHeader, bsw *blockStreamWriter, bsrs []*blockStre if err == errForciblyStopped { return err } - return fmt.Errorf("cannot merge %d block streams: %s: %s", len(bsrs), bsrs, err) + return fmt.Errorf("cannot merge %d block streams: %s: %w", len(bsrs), bsrs, err) } var bsmPool = &sync.Pool{ @@ -88,7 +88,7 @@ func (bsm *blockStreamMerger) Init(bsrs []*blockStreamReader, prepareBlock Prepa } if err := bsr.Error(); err != nil { - return fmt.Errorf("cannot obtain the next block from blockStreamReader %q: %s", bsr.path, err) + return fmt.Errorf("cannot obtain the next block from blockStreamReader %q: %w", bsr.path, err) } } heap.Init(&bsm.bsrHeap) @@ -143,7 +143,7 @@ again: goto again } if err := bsr.Error(); err != nil { - return fmt.Errorf("cannot read storageBlock: %s", err) + return fmt.Errorf("cannot read storageBlock: %w", err) } goto again } diff --git a/lib/mergeset/merge_test.go b/lib/mergeset/merge_test.go index 1cae67384..a4d7cc058 100644 --- a/lib/mergeset/merge_test.go +++ b/lib/mergeset/merge_test.go @@ -121,7 +121,7 @@ func testMergeBlockStreamsSerial(blocksToMerge, maxItemsPerBlock int) error { var bsw blockStreamWriter bsw.InitFromInmemoryPart(&dstIP) if err := mergeBlockStreams(&dstIP.ph, &bsw, bsrs, nil, nil, &itemsMerged); err != nil { - return fmt.Errorf("cannot merge block streams: %s", err) + return fmt.Errorf("cannot merge block streams: %w", err) } if itemsMerged != uint64(len(items)) { return fmt.Errorf("unexpected itemsMerged; got %d; want %d", itemsMerged, len(items)) @@ -130,7 +130,7 @@ func testMergeBlockStreamsSerial(blocksToMerge, maxItemsPerBlock int) error { // Verify the resulting part (dstIP) contains all the items // in the correct order. if err := testCheckItems(&dstIP, items); err != nil { - return fmt.Errorf("error checking items: %s", err) + return fmt.Errorf("error checking items: %w", err) } return nil } @@ -164,7 +164,7 @@ func testCheckItems(dstIP *inmemoryPart, items []string) error { } } if err := dstBsr.Error(); err != nil { - return fmt.Errorf("unexpected error in dstBsr: %s", err) + return fmt.Errorf("unexpected error in dstBsr: %w", err) } if !reflect.DeepEqual(items, dstItems) { return fmt.Errorf("unequal items\ngot\n%q\nwant\n%q", dstItems, items) diff --git a/lib/mergeset/metaindex_row.go b/lib/mergeset/metaindex_row.go index 40ba4acda..27569a321 100644 --- a/lib/mergeset/metaindex_row.go +++ b/lib/mergeset/metaindex_row.go @@ -44,7 +44,7 @@ func (mr *metaindexRow) Unmarshal(src []byte) ([]byte, error) { // Unmarshal firstItem tail, fi, err := encoding.UnmarshalBytes(src) if err != nil { - return tail, fmt.Errorf("cannot unmarshal firstItem: %s", err) + return tail, fmt.Errorf("cannot unmarshal firstItem: %w", err) } mr.firstItem = append(mr.firstItem[:0], fi...) src = tail @@ -85,11 +85,11 @@ func unmarshalMetaindexRows(dst []metaindexRow, r io.Reader) ([]metaindexRow, er // since it is quite small. compressedData, err := ioutil.ReadAll(r) if err != nil { - return dst, fmt.Errorf("cannot read metaindex data: %s", err) + return dst, fmt.Errorf("cannot read metaindex data: %w", err) } data, err := encoding.DecompressZSTD(nil, compressedData) if err != nil { - return dst, fmt.Errorf("cannot decompress metaindex data with size %d bytes: %s", len(compressedData), err) + return dst, fmt.Errorf("cannot decompress metaindex data with size %d bytes: %w", len(compressedData), err) } dstLen := len(dst) @@ -102,7 +102,7 @@ func unmarshalMetaindexRows(dst []metaindexRow, r io.Reader) ([]metaindexRow, er mr := &dst[len(dst)-1] tail, err := mr.Unmarshal(data) if err != nil { - return dst, fmt.Errorf("cannot unmarshal metaindexRow #%d from metaindex data: %s", len(dst)-dstLen, err) + return dst, fmt.Errorf("cannot unmarshal metaindexRow #%d from metaindex data: %w", len(dst)-dstLen, err) } data = tail } diff --git a/lib/mergeset/part.go b/lib/mergeset/part.go index c04a95ffb..a1033479a 100644 --- a/lib/mergeset/part.go +++ b/lib/mergeset/part.go @@ -67,13 +67,13 @@ func openFilePart(path string) (*part, error) { var ph partHeader if err := ph.ParseFromPath(path); err != nil { - return nil, fmt.Errorf("cannot parse path to part: %s", err) + return nil, fmt.Errorf("cannot parse path to part: %w", err) } metaindexPath := path + "/metaindex.bin" metaindexFile, err := filestream.Open(metaindexPath, true) if err != nil { - return nil, fmt.Errorf("cannot open %q: %s", metaindexPath, err) + return nil, fmt.Errorf("cannot open %q: %w", metaindexPath, err) } metaindexSize := fs.MustFileSize(metaindexPath) @@ -81,7 +81,7 @@ func openFilePart(path string) (*part, error) { indexFile, err := fs.OpenReaderAt(indexPath) if err != nil { metaindexFile.MustClose() - return nil, fmt.Errorf("cannot open %q: %s", indexPath, err) + return nil, fmt.Errorf("cannot open %q: %w", indexPath, err) } indexSize := fs.MustFileSize(indexPath) @@ -90,7 +90,7 @@ func openFilePart(path string) (*part, error) { if err != nil { metaindexFile.MustClose() indexFile.MustClose() - return nil, fmt.Errorf("cannot open %q: %s", itemsPath, err) + return nil, fmt.Errorf("cannot open %q: %w", itemsPath, err) } itemsSize := fs.MustFileSize(itemsPath) @@ -100,7 +100,7 @@ func openFilePart(path string) (*part, error) { metaindexFile.MustClose() indexFile.MustClose() itemsFile.MustClose() - return nil, fmt.Errorf("cannot open %q: %s", lensPath, err) + return nil, fmt.Errorf("cannot open %q: %w", lensPath, err) } lensSize := fs.MustFileSize(lensPath) @@ -112,7 +112,7 @@ func newPart(ph *partHeader, path string, size uint64, metaindexReader filestrea var errors []error mrs, err := unmarshalMetaindexRows(nil, metaindexReader) if err != nil { - errors = append(errors, fmt.Errorf("cannot unmarshal metaindexRows: %s", err)) + errors = append(errors, fmt.Errorf("cannot unmarshal metaindexRows: %w", err)) } metaindexReader.MustClose() @@ -131,7 +131,7 @@ func newPart(ph *partHeader, path string, size uint64, metaindexReader filestrea if len(errors) > 0 { // Return only the first error, since it has no sense in returning all errors. - err := fmt.Errorf("error opening part %s: %s", p.path, errors[0]) + err := fmt.Errorf("error opening part %s: %w", p.path, errors[0]) p.MustClose() return nil, err } diff --git a/lib/mergeset/part_header.go b/lib/mergeset/part_header.go index f8c25a117..de9a23866 100644 --- a/lib/mergeset/part_header.go +++ b/lib/mergeset/part_header.go @@ -54,7 +54,7 @@ func (hs *hexString) UnmarshalJSON(data []byte) error { data = data[1 : len(data)-1] b, err := hex.DecodeString(string(data)) if err != nil { - return fmt.Errorf("cannot hex-decode %q: %s", data, err) + return fmt.Errorf("cannot hex-decode %q: %w", data, err) } *hs = b return nil @@ -101,7 +101,7 @@ func (ph *partHeader) ParseFromPath(partPath string) error { // Read itemsCount from partName. itemsCount, err := strconv.ParseUint(a[0], 10, 64) if err != nil { - return fmt.Errorf("cannot parse itemsCount from partName %q: %s", partName, err) + return fmt.Errorf("cannot parse itemsCount from partName %q: %w", partName, err) } ph.itemsCount = itemsCount if ph.itemsCount <= 0 { @@ -111,7 +111,7 @@ func (ph *partHeader) ParseFromPath(partPath string) error { // Read blocksCount from partName. blocksCount, err := strconv.ParseUint(a[1], 10, 64) if err != nil { - return fmt.Errorf("cannot parse blocksCount from partName %q: %s", partName, err) + return fmt.Errorf("cannot parse blocksCount from partName %q: %w", partName, err) } ph.blocksCount = blocksCount if ph.blocksCount <= 0 { @@ -126,12 +126,12 @@ func (ph *partHeader) ParseFromPath(partPath string) error { metadataPath := partPath + "/metadata.json" metadata, err := ioutil.ReadFile(metadataPath) if err != nil { - return fmt.Errorf("cannot read %q: %s", metadataPath, err) + return fmt.Errorf("cannot read %q: %w", metadataPath, err) } var phj partHeaderJSON if err := json.Unmarshal(metadata, &phj); err != nil { - return fmt.Errorf("cannot parse %q: %s", metadataPath, err) + return fmt.Errorf("cannot parse %q: %w", metadataPath, err) } if ph.itemsCount != phj.ItemsCount { return fmt.Errorf("invalid ItemsCount in %q; got %d; want %d", metadataPath, phj.ItemsCount, ph.itemsCount) @@ -161,11 +161,11 @@ func (ph *partHeader) WriteMetadata(partPath string) error { } metadata, err := json.MarshalIndent(&phj, "", "\t") if err != nil { - return fmt.Errorf("cannot marshal metadata: %s", err) + return fmt.Errorf("cannot marshal metadata: %w", err) } metadataPath := partPath + "/metadata.json" if err := fs.WriteFileAtomically(metadataPath, metadata); err != nil { - return fmt.Errorf("cannot create %q: %s", metadataPath, err) + return fmt.Errorf("cannot create %q: %w", metadataPath, err) } return nil } diff --git a/lib/mergeset/part_search.go b/lib/mergeset/part_search.go index a3d7542b7..01d067688 100644 --- a/lib/mergeset/part_search.go +++ b/lib/mergeset/part_search.go @@ -279,7 +279,7 @@ func (ps *partSearch) nextBHS() error { var err error idxb, err = ps.readIndexBlock(mr) if err != nil { - return fmt.Errorf("cannot read index block: %s", err) + return fmt.Errorf("cannot read index block: %w", err) } ps.idxbCache.Put(idxbKey, idxb) } @@ -294,12 +294,12 @@ func (ps *partSearch) readIndexBlock(mr *metaindexRow) (*indexBlock, error) { var err error ps.indexBuf, err = encoding.DecompressZSTD(ps.indexBuf[:0], ps.compressedIndexBuf) if err != nil { - return nil, fmt.Errorf("cannot decompress index block with size %d bytes: %s", len(ps.compressedIndexBuf), err) + return nil, fmt.Errorf("cannot decompress index block with size %d bytes: %w", len(ps.compressedIndexBuf), err) } idxb := getIndexBlock() idxb.bhs, err = unmarshalBlockHeaders(idxb.bhs[:0], ps.indexBuf, int(mr.blockHeadersCount)) if err != nil { - return nil, fmt.Errorf("cannot unmarshal block headers from index block (offset=%d, size=%d): %s", mr.indexBlockOffset, mr.indexBlockSize, err) + return nil, fmt.Errorf("cannot unmarshal block headers from index block (offset=%d, size=%d): %w", mr.indexBlockOffset, mr.indexBlockSize, err) } return idxb, nil } @@ -340,7 +340,7 @@ func (ps *partSearch) readInmemoryBlock(bh *blockHeader) (*inmemoryBlock, error) ib := getInmemoryBlock() if err := ib.UnmarshalData(&ps.sb, bh.firstItem, bh.commonPrefix, bh.itemsCount, bh.marshalType); err != nil { - return nil, fmt.Errorf("cannot unmarshal storage block with %d items: %s", bh.itemsCount, err) + return nil, fmt.Errorf("cannot unmarshal storage block with %d items: %w", bh.itemsCount, err) } return ib, nil diff --git a/lib/mergeset/part_search_test.go b/lib/mergeset/part_search_test.go index 37abbca10..37b87f13d 100644 --- a/lib/mergeset/part_search_test.go +++ b/lib/mergeset/part_search_test.go @@ -72,7 +72,7 @@ func testPartSearchSerial(p *part, items []string) error { return fmt.Errorf("unexpected item found past the end of all the items: %X", ps.Item) } if err := ps.Error(); err != nil { - return fmt.Errorf("unexpected error: %s", err) + return fmt.Errorf("unexpected error: %w", err) } // Search for the item bigger than the items[len(items)-1] @@ -83,7 +83,7 @@ func testPartSearchSerial(p *part, items []string) error { return fmt.Errorf("unexpected item found: %X; want nothing", ps.Item) } if err := ps.Error(); err != nil { - return fmt.Errorf("unexpected error when searching past the last item: %s", err) + return fmt.Errorf("unexpected error when searching past the last item: %w", err) } // Search for inner items @@ -107,7 +107,7 @@ func testPartSearchSerial(p *part, items []string) error { return fmt.Errorf("unexpected item found past the end of all the items for idx %d out of %d items; loop %d: got %X", n, len(items), loop, ps.Item) } if err := ps.Error(); err != nil { - return fmt.Errorf("unexpected error on loop %d: %s", loop, err) + return fmt.Errorf("unexpected error on loop %d: %w", loop, err) } } @@ -121,7 +121,7 @@ func testPartSearchSerial(p *part, items []string) error { return fmt.Errorf("unexpected item found at position %d: got %X; want %X", i, ps.Item, item) } if err := ps.Error(); err != nil { - return fmt.Errorf("unexpected error when searching for items[%d]=%X: %s", i, item, err) + return fmt.Errorf("unexpected error when searching for items[%d]=%X: %w", i, item, err) } } @@ -136,7 +136,7 @@ func testPartSearchSerial(p *part, items []string) error { return fmt.Errorf("unexpected item found at position %d: got %X; want %X", i, ps.Item, item) } if err := ps.Error(); err != nil { - return fmt.Errorf("unexpected error when searching for items[%d]=%X: %s", i, item, err) + return fmt.Errorf("unexpected error when searching for items[%d]=%X: %w", i, item, err) } } @@ -151,7 +151,7 @@ func newTestPart(blocksCount, maxItemsPerBlock int) (*part, []string, error) { var bsw blockStreamWriter bsw.InitFromInmemoryPart(&ip) if err := mergeBlockStreams(&ip.ph, &bsw, bsrs, nil, nil, &itemsMerged); err != nil { - return nil, nil, fmt.Errorf("cannot merge blocks: %s", err) + return nil, nil, fmt.Errorf("cannot merge blocks: %w", err) } if itemsMerged != uint64(len(items)) { return nil, nil, fmt.Errorf("unexpected itemsMerged; got %d; want %d", itemsMerged, len(items)) @@ -159,7 +159,7 @@ func newTestPart(blocksCount, maxItemsPerBlock int) (*part, []string, error) { size := ip.size() p, err := newPart(&ip.ph, "partName", size, ip.metaindexData.NewReader(), &ip.indexData, &ip.itemsData, &ip.lensData) if err != nil { - return nil, nil, fmt.Errorf("cannot create part: %s", err) + return nil, nil, fmt.Errorf("cannot create part: %w", err) } return p, items, nil } diff --git a/lib/mergeset/table.go b/lib/mergeset/table.go index 14fa70410..f8b3f014d 100644 --- a/lib/mergeset/table.go +++ b/lib/mergeset/table.go @@ -169,7 +169,7 @@ func OpenTable(path string, flushCallback func(), prepareBlock PrepareBlockCallb // Create a directory for the table if it doesn't exist yet. if err := fs.MkdirAllIfNotExist(path); err != nil { - return nil, fmt.Errorf("cannot create directory %q: %s", path, err) + return nil, fmt.Errorf("cannot create directory %q: %w", path, err) } // Protect from concurrent opens. @@ -181,7 +181,7 @@ func OpenTable(path string, flushCallback func(), prepareBlock PrepareBlockCallb // Open table parts. pws, err := openParts(path) if err != nil { - return nil, fmt.Errorf("cannot open table parts at %q: %s", path, err) + return nil, fmt.Errorf("cannot open table parts at %q: %w", path, err) } tb := &Table{ @@ -481,13 +481,13 @@ func (tb *Table) convertToV1280() { func (tb *Table) mergePartsOptimal(pws []*partWrapper, stopCh <-chan struct{}) error { for len(pws) > defaultPartsToMerge { if err := tb.mergeParts(pws[:defaultPartsToMerge], stopCh, false); err != nil { - return fmt.Errorf("cannot merge %d parts: %s", defaultPartsToMerge, err) + return fmt.Errorf("cannot merge %d parts: %w", defaultPartsToMerge, err) } pws = pws[defaultPartsToMerge:] } if len(pws) > 0 { if err := tb.mergeParts(pws, stopCh, false); err != nil { - return fmt.Errorf("cannot merge %d parts: %s", len(pws), err) + return fmt.Errorf("cannot merge %d parts: %w", len(pws), err) } } return nil @@ -761,7 +761,7 @@ func (tb *Table) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isOuterP bsr.InitFromInmemoryPart(pw.mp) } else { if err := bsr.InitFromFilePart(pw.p.path); err != nil { - return fmt.Errorf("cannot open source part for merging: %s", err) + return fmt.Errorf("cannot open source part for merging: %w", err) } } bsrs = append(bsrs, bsr) @@ -786,7 +786,7 @@ func (tb *Table) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isOuterP bsw := getBlockStreamWriter() compressLevel := getCompressLevelForPartItems(outItemsCount, outBlocksCount) if err := bsw.InitFromFilePart(tmpPartPath, nocache, compressLevel); err != nil { - return fmt.Errorf("cannot create destination part %q: %s", tmpPartPath, err) + return fmt.Errorf("cannot create destination part %q: %w", tmpPartPath, err) } // Merge parts into a temporary location. @@ -797,10 +797,10 @@ func (tb *Table) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isOuterP if err == errForciblyStopped { return err } - return fmt.Errorf("error when merging parts to %q: %s", tmpPartPath, err) + return fmt.Errorf("error when merging parts to %q: %w", tmpPartPath, err) } if err := ph.WriteMetadata(tmpPartPath); err != nil { - return fmt.Errorf("cannot write metadata to destination part %q: %s", tmpPartPath, err) + return fmt.Errorf("cannot write metadata to destination part %q: %w", tmpPartPath, err) } // Close bsrs (aka source parts). @@ -821,18 +821,18 @@ func (tb *Table) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isOuterP fmt.Fprintf(&bb, "%s -> %s\n", tmpPartPath, dstPartPath) txnPath := fmt.Sprintf("%s/txn/%016X", tb.path, mergeIdx) if err := fs.WriteFileAtomically(txnPath, bb.B); err != nil { - return fmt.Errorf("cannot create transaction file %q: %s", txnPath, err) + return fmt.Errorf("cannot create transaction file %q: %w", txnPath, err) } // Run the created transaction. if err := runTransaction(&tb.snapshotLock, tb.path, txnPath); err != nil { - return fmt.Errorf("cannot execute transaction %q: %s", txnPath, err) + return fmt.Errorf("cannot execute transaction %q: %w", txnPath, err) } // Open the merged part. newP, err := openFilePart(dstPartPath) if err != nil { - return fmt.Errorf("cannot open merged part %q: %s", dstPartPath, err) + return fmt.Errorf("cannot open merged part %q: %w", dstPartPath, err) } newPSize := newP.size newPW := &partWrapper{ @@ -950,7 +950,7 @@ func openParts(path string) ([]*partWrapper, error) { } d, err := os.Open(path) if err != nil { - return nil, fmt.Errorf("cannot open difrectory: %s", err) + return nil, fmt.Errorf("cannot open difrectory: %w", err) } defer fs.MustClose(d) @@ -958,19 +958,19 @@ func openParts(path string) ([]*partWrapper, error) { // Snapshots cannot be created yet, so use fakeSnapshotLock. var fakeSnapshotLock sync.RWMutex if err := runTransactions(&fakeSnapshotLock, path); err != nil { - return nil, fmt.Errorf("cannot run transactions: %s", err) + return nil, fmt.Errorf("cannot run transactions: %w", err) } txnDir := path + "/txn" fs.MustRemoveAll(txnDir) if err := fs.MkdirAllFailIfExist(txnDir); err != nil { - return nil, fmt.Errorf("cannot create %q: %s", txnDir, err) + return nil, fmt.Errorf("cannot create %q: %w", txnDir, err) } tmpDir := path + "/tmp" fs.MustRemoveAll(tmpDir) if err := fs.MkdirAllFailIfExist(tmpDir); err != nil { - return nil, fmt.Errorf("cannot create %q: %s", tmpDir, err) + return nil, fmt.Errorf("cannot create %q: %w", tmpDir, err) } fs.MustSyncPath(path) @@ -978,7 +978,7 @@ func openParts(path string) ([]*partWrapper, error) { // Open parts. fis, err := d.Readdir(-1) if err != nil { - return nil, fmt.Errorf("cannot read directory: %s", err) + return nil, fmt.Errorf("cannot read directory: %w", err) } var pws []*partWrapper for _, fi := range fis { @@ -995,7 +995,7 @@ func openParts(path string) ([]*partWrapper, error) { p, err := openFilePart(partPath) if err != nil { mustCloseParts(pws) - return nil, fmt.Errorf("cannot open part %q: %s", partPath, err) + return nil, fmt.Errorf("cannot open part %q: %w", partPath, err) } pw := &partWrapper{ p: p, @@ -1028,11 +1028,11 @@ func (tb *Table) CreateSnapshotAt(dstDir string) error { srcDir := tb.path srcDir, err = filepath.Abs(srcDir) if err != nil { - return fmt.Errorf("cannot obtain absolute dir for %q: %s", srcDir, err) + return fmt.Errorf("cannot obtain absolute dir for %q: %w", srcDir, err) } dstDir, err = filepath.Abs(dstDir) if err != nil { - return fmt.Errorf("cannot obtain absolute dir for %q: %s", dstDir, err) + return fmt.Errorf("cannot obtain absolute dir for %q: %w", dstDir, err) } if strings.HasPrefix(dstDir, srcDir+"/") { return fmt.Errorf("cannot create snapshot %q inside the data dir %q", dstDir, srcDir) @@ -1047,18 +1047,18 @@ func (tb *Table) CreateSnapshotAt(dstDir string) error { defer tb.snapshotLock.Unlock() if err := fs.MkdirAllFailIfExist(dstDir); err != nil { - return fmt.Errorf("cannot create snapshot dir %q: %s", dstDir, err) + return fmt.Errorf("cannot create snapshot dir %q: %w", dstDir, err) } d, err := os.Open(srcDir) if err != nil { - return fmt.Errorf("cannot open difrectory: %s", err) + return fmt.Errorf("cannot open difrectory: %w", err) } defer fs.MustClose(d) fis, err := d.Readdir(-1) if err != nil { - return fmt.Errorf("cannot read directory: %s", err) + return fmt.Errorf("cannot read directory: %w", err) } for _, fi := range fis { fn := fi.Name() @@ -1068,7 +1068,7 @@ func (tb *Table) CreateSnapshotAt(dstDir string) error { srcPath := srcDir + "/" + fn dstPath := dstDir + "/" + fn if err := os.Link(srcPath, dstPath); err != nil { - return fmt.Errorf("cannot hard link from %q to %q: %s", srcPath, dstPath, err) + return fmt.Errorf("cannot hard link from %q to %q: %w", srcPath, dstPath, err) } default: // Skip other non-directories. @@ -1082,7 +1082,7 @@ func (tb *Table) CreateSnapshotAt(dstDir string) error { srcPartPath := srcDir + "/" + fn dstPartPath := dstDir + "/" + fn if err := fs.HardLinkFiles(srcPartPath, dstPartPath); err != nil { - return fmt.Errorf("cannot create hard links from %q to %q: %s", srcPartPath, dstPartPath, err) + return fmt.Errorf("cannot create hard links from %q to %q: %w", srcPartPath, dstPartPath, err) } } @@ -1107,13 +1107,13 @@ func runTransactions(txnLock *sync.RWMutex, path string) error { if os.IsNotExist(err) { return nil } - return fmt.Errorf("cannot open %q: %s", txnDir, err) + return fmt.Errorf("cannot open %q: %w", txnDir, err) } defer fs.MustClose(d) fis, err := d.Readdir(-1) if err != nil { - return fmt.Errorf("cannot read directory %q: %s", d.Name(), err) + return fmt.Errorf("cannot read directory %q: %w", d.Name(), err) } // Sort transaction files by id, since transactions must be ordered. @@ -1129,7 +1129,7 @@ func runTransactions(txnLock *sync.RWMutex, path string) error { } txnPath := txnDir + "/" + fn if err := runTransaction(txnLock, path, txnPath); err != nil { - return fmt.Errorf("cannot run transaction from %q: %s", txnPath, err) + return fmt.Errorf("cannot run transaction from %q: %w", txnPath, err) } } return nil @@ -1143,7 +1143,7 @@ func runTransaction(txnLock *sync.RWMutex, pathPrefix, txnPath string) error { data, err := ioutil.ReadFile(txnPath) if err != nil { - return fmt.Errorf("cannot read transaction file: %s", err) + return fmt.Errorf("cannot read transaction file: %w", err) } if len(data) > 0 && data[len(data)-1] == '\n' { data = data[:len(data)-1] @@ -1164,7 +1164,7 @@ func runTransaction(txnLock *sync.RWMutex, pathPrefix, txnPath string) error { for _, path := range rmPaths { path, err := validatePath(pathPrefix, path) if err != nil { - return fmt.Errorf("invalid path to remove: %s", err) + return fmt.Errorf("invalid path to remove: %w", err) } removeWG.Add(1) fs.MustRemoveAllWithDoneCallback(path, removeWG.Done) @@ -1175,15 +1175,15 @@ func runTransaction(txnLock *sync.RWMutex, pathPrefix, txnPath string) error { dstPath := mvPaths[1] srcPath, err = validatePath(pathPrefix, srcPath) if err != nil { - return fmt.Errorf("invalid source path to rename: %s", err) + return fmt.Errorf("invalid source path to rename: %w", err) } dstPath, err = validatePath(pathPrefix, dstPath) if err != nil { - return fmt.Errorf("invalid destination path to rename: %s", err) + return fmt.Errorf("invalid destination path to rename: %w", err) } if fs.IsPathExist(srcPath) { if err := os.Rename(srcPath, dstPath); err != nil { - return fmt.Errorf("cannot rename %q to %q: %s", srcPath, dstPath, err) + return fmt.Errorf("cannot rename %q to %q: %w", srcPath, dstPath, err) } } else if !fs.IsPathExist(dstPath) { // Emit info message for the expected condition after unclean shutdown on NFS disk. @@ -1217,12 +1217,12 @@ func validatePath(pathPrefix, path string) (string, error) { pathPrefix, err = filepath.Abs(pathPrefix) if err != nil { - return path, fmt.Errorf("cannot determine absolute path for pathPrefix=%q: %s", pathPrefix, err) + return path, fmt.Errorf("cannot determine absolute path for pathPrefix=%q: %w", pathPrefix, err) } path, err = filepath.Abs(path) if err != nil { - return path, fmt.Errorf("cannot determine absolute path for %q: %s", path, err) + return path, fmt.Errorf("cannot determine absolute path for %q: %w", path, err) } if !strings.HasPrefix(path, pathPrefix+"/") { return path, fmt.Errorf("invalid path %q; must start with %q", path, pathPrefix+"/") diff --git a/lib/mergeset/table_search.go b/lib/mergeset/table_search.go index e0f39aa1b..150f38b44 100644 --- a/lib/mergeset/table_search.go +++ b/lib/mergeset/table_search.go @@ -104,7 +104,7 @@ func (ts *TableSearch) Seek(k []byte) { } if len(errors) > 0 { // Return only the first error, since it has no sense in returning all errors. - ts.err = fmt.Errorf("cannot seek %q: %s", k, errors[0]) + ts.err = fmt.Errorf("cannot seek %q: %w", k, errors[0]) return } if len(ts.psHeap) == 0 { @@ -149,7 +149,7 @@ func (ts *TableSearch) NextItem() bool { ts.err = ts.nextBlock() if ts.err != nil { if ts.err != io.EOF { - ts.err = fmt.Errorf("cannot obtain the next block to search in the table: %s", ts.err) + ts.err = fmt.Errorf("cannot obtain the next block to search in the table: %w", ts.err) } return false } diff --git a/lib/mergeset/table_search_test.go b/lib/mergeset/table_search_test.go index 74bf64363..56de4794e 100644 --- a/lib/mergeset/table_search_test.go +++ b/lib/mergeset/table_search_test.go @@ -98,7 +98,7 @@ func testTableSearchConcurrent(tb *Table, items []string) error { select { case err := <-ch: if err != nil { - return fmt.Errorf("unexpected error: %s", err) + return fmt.Errorf("unexpected error: %w", err) } case <-time.After(time.Second * 5): return fmt.Errorf("timeout") @@ -139,7 +139,7 @@ func testTableSearchSerial(tb *Table, items []string) error { return fmt.Errorf("superflouos item found at position %d when searching for %q: %q", n, key, ts.Item) } if err := ts.Error(); err != nil { - return fmt.Errorf("unexpected error when searching for %q: %s", key, err) + return fmt.Errorf("unexpected error when searching for %q: %w", key, err) } } ts.MustClose() @@ -153,13 +153,13 @@ func newTestTable(path string, itemsCount int) (*Table, []string, error) { } tb, err := OpenTable(path, flushCallback, nil) if err != nil { - return nil, nil, fmt.Errorf("cannot open table: %s", err) + return nil, nil, fmt.Errorf("cannot open table: %w", err) } items := make([]string, itemsCount) for i := 0; i < itemsCount; i++ { item := fmt.Sprintf("%d:%d", rand.Intn(1e9), i) if err := tb.AddItems([][]byte{[]byte(item)}); err != nil { - return nil, nil, fmt.Errorf("cannot add item: %s", err) + return nil, nil, fmt.Errorf("cannot add item: %w", err) } items[i] = item } diff --git a/lib/mergeset/table_search_timing_test.go b/lib/mergeset/table_search_timing_test.go index 3301c05d5..3918cb5cc 100644 --- a/lib/mergeset/table_search_timing_test.go +++ b/lib/mergeset/table_search_timing_test.go @@ -27,7 +27,7 @@ func benchmarkTableSearch(b *testing.B, itemsCount int) { tb, items, err := newTestTable(path, itemsCount) if err != nil { - panic(fmt.Errorf("cannot create test table at %q with %d items: %s", path, itemsCount, err)) + panic(fmt.Errorf("cannot create test table at %q with %d items: %w", path, itemsCount, err)) } // Force finishing pending merges @@ -106,7 +106,7 @@ func benchmarkTableSearchKeysExt(b *testing.B, tb *Table, keys [][]byte, stripSu } } if err := ts.Error(); err != nil { - panic(fmt.Errorf("BUG: unexpected error for searchKeys[%d]=%q: %s", i, searchKey, err)) + panic(fmt.Errorf("BUG: unexpected error for searchKeys[%d]=%q: %w", i, searchKey, err)) } } } diff --git a/lib/persistentqueue/persistentqueue.go b/lib/persistentqueue/persistentqueue.go index a6db60ee9..e0e9b110c 100644 --- a/lib/persistentqueue/persistentqueue.go +++ b/lib/persistentqueue/persistentqueue.go @@ -177,7 +177,7 @@ func tryOpeningQueue(path, name string, chunkFileSize, maxBlockSize, maxPendingB } if err := fs.MkdirAllIfNotExist(path); err != nil { - return nil, fmt.Errorf("cannot create directory %q: %s", path, err) + return nil, fmt.Errorf("cannot create directory %q: %w", path, err) } // Read metainfo. @@ -193,13 +193,13 @@ func tryOpeningQueue(path, name string, chunkFileSize, maxBlockSize, maxPendingB mi.Reset() mi.Name = q.name if err := mi.WriteToFile(metainfoPath); err != nil { - return nil, fmt.Errorf("cannot create %q: %s", metainfoPath, err) + return nil, fmt.Errorf("cannot create %q: %w", metainfoPath, err) } // Create initial chunk file. filepath := q.chunkFilePath(0) if err := fs.WriteFileAtomically(filepath, nil); err != nil { - return nil, fmt.Errorf("cannot create %q: %s", filepath, err) + return nil, fmt.Errorf("cannot create %q: %w", filepath, err) } } if mi.Name != q.name { @@ -209,7 +209,7 @@ func tryOpeningQueue(path, name string, chunkFileSize, maxBlockSize, maxPendingB // Locate reader and writer chunks in the path. fis, err := ioutil.ReadDir(path) if err != nil { - return nil, fmt.Errorf("cannot read contents of the directory %q: %s", path, err) + return nil, fmt.Errorf("cannot read contents of the directory %q: %w", path, err) } for _, fi := range fis { fname := fi.Name() @@ -406,11 +406,11 @@ func (q *Queue) writeBlockLocked(block []byte) error { q.writerPath = q.chunkFilePath(q.writerOffset) w, err := filestream.Create(q.writerPath, false) if err != nil { - return fmt.Errorf("cannot create chunk file %q: %s", q.writerPath, err) + return fmt.Errorf("cannot create chunk file %q: %w", q.writerPath, err) } q.writer = w if err := q.flushMetainfo(); err != nil { - return fmt.Errorf("cannot flush metainfo: %s", err) + return fmt.Errorf("cannot flush metainfo: %w", err) } } @@ -421,12 +421,12 @@ func (q *Queue) writeBlockLocked(block []byte) error { err := q.write(header.B) headerBufPool.Put(header) if err != nil { - return fmt.Errorf("cannot write header with size 8 bytes to %q: %s", q.writerPath, err) + return fmt.Errorf("cannot write header with size 8 bytes to %q: %w", q.writerPath, err) } // Write block contents. if err := q.write(block); err != nil { - return fmt.Errorf("cannot write block contents with size %d bytes to %q: %s", len(block), q.writerPath, err) + return fmt.Errorf("cannot write block contents with size %d bytes to %q: %w", len(block), q.writerPath, err) } q.blocksWritten.Inc() q.bytesWritten.Add(len(block)) @@ -474,11 +474,11 @@ func (q *Queue) readBlockLocked(dst []byte) ([]byte, error) { q.readerPath = q.chunkFilePath(q.readerOffset) r, err := filestream.Open(q.readerPath, true) if err != nil { - return dst, fmt.Errorf("cannot open chunk file %q: %s", q.readerPath, err) + return dst, fmt.Errorf("cannot open chunk file %q: %w", q.readerPath, err) } q.reader = r if err := q.flushMetainfo(); err != nil { - return dst, fmt.Errorf("cannot flush metainfo: %s", err) + return dst, fmt.Errorf("cannot flush metainfo: %w", err) } } @@ -489,7 +489,7 @@ func (q *Queue) readBlockLocked(dst []byte) ([]byte, error) { blockLen := encoding.UnmarshalUint64(header.B) headerBufPool.Put(header) if err != nil { - return dst, fmt.Errorf("cannot read header with size 8 bytes from %q: %s", q.readerPath, err) + return dst, fmt.Errorf("cannot read header with size 8 bytes from %q: %w", q.readerPath, err) } if blockLen > q.maxBlockSize { return dst, fmt.Errorf("too big block size read from %q: %d bytes; cannot exceed %d bytes", q.readerPath, blockLen, q.maxBlockSize) @@ -499,7 +499,7 @@ func (q *Queue) readBlockLocked(dst []byte) ([]byte, error) { dstLen := len(dst) dst = bytesutil.Resize(dst, dstLen+int(blockLen)) if err := q.readFull(dst[dstLen:]); err != nil { - return dst, fmt.Errorf("cannot read block contents with size %d bytes from %q: %s", blockLen, q.readerPath, err) + return dst, fmt.Errorf("cannot read block contents with size %d bytes from %q: %w", blockLen, q.readerPath, err) } q.blocksRead.Inc() q.bytesRead.Add(int(blockLen)) @@ -546,7 +546,7 @@ func (q *Queue) flushMetainfo() error { } metainfoPath := q.metainfoPath() if err := mi.WriteToFile(metainfoPath); err != nil { - return fmt.Errorf("cannot write metainfo to %q: %s", metainfoPath, err) + return fmt.Errorf("cannot write metainfo to %q: %w", metainfoPath, err) } return nil } @@ -567,10 +567,10 @@ func (mi *metainfo) Reset() { func (mi *metainfo) WriteToFile(path string) error { data, err := json.Marshal(mi) if err != nil { - return fmt.Errorf("cannot marshal persistent queue metainfo %#v: %s", mi, err) + return fmt.Errorf("cannot marshal persistent queue metainfo %#v: %w", mi, err) } if err := ioutil.WriteFile(path, data, 0600); err != nil { - return fmt.Errorf("cannot write persistent queue metainfo to %q: %s", path, err) + return fmt.Errorf("cannot write persistent queue metainfo to %q: %w", path, err) } return nil } @@ -582,10 +582,10 @@ func (mi *metainfo) ReadFromFile(path string) error { if os.IsNotExist(err) { return err } - return fmt.Errorf("cannot read %q: %s", path, err) + return fmt.Errorf("cannot read %q: %w", path, err) } if err := json.Unmarshal(data, mi); err != nil { - return fmt.Errorf("cannot unmarshal persistent queue metainfo from %q: %s", path, err) + return fmt.Errorf("cannot unmarshal persistent queue metainfo from %q: %w", path, err) } if mi.ReaderOffset > mi.WriterOffset { return fmt.Errorf("invalid data read from %q: readerOffset=%d cannot exceed writerOffset=%d", path, mi.ReaderOffset, mi.WriterOffset) diff --git a/lib/persistentqueue/persistentqueue_test.go b/lib/persistentqueue/persistentqueue_test.go index 7a18af4ab..e4b83d540 100644 --- a/lib/persistentqueue/persistentqueue_test.go +++ b/lib/persistentqueue/persistentqueue_test.go @@ -495,20 +495,20 @@ func TestQueueLimitedSize(t *testing.T) { func mustCreateFile(path, contents string) { if err := ioutil.WriteFile(path, []byte(contents), 0600); err != nil { - panic(fmt.Errorf("cannot create file %q with %d bytes contents: %s", path, len(contents), err)) + panic(fmt.Errorf("cannot create file %q with %d bytes contents: %w", path, len(contents), err)) } } func mustCreateDir(path string) { mustDeleteDir(path) if err := os.MkdirAll(path, 0700); err != nil { - panic(fmt.Errorf("cannot create dir %q: %s", path, err)) + panic(fmt.Errorf("cannot create dir %q: %w", path, err)) } } func mustDeleteDir(path string) { if err := os.RemoveAll(path); err != nil { - panic(fmt.Errorf("cannot remove dir %q: %s", path, err)) + panic(fmt.Errorf("cannot remove dir %q: %w", path, err)) } } @@ -516,6 +516,6 @@ func mustCreateEmptyMetainfo(path, name string) { var mi metainfo mi.Name = name if err := mi.WriteToFile(path + "/metainfo.json"); err != nil { - panic(fmt.Errorf("cannot create metainfo: %s", err)) + panic(fmt.Errorf("cannot create metainfo: %w", err)) } } diff --git a/lib/promauth/config.go b/lib/promauth/config.go index 76c305967..838b42be6 100644 --- a/lib/promauth/config.go +++ b/lib/promauth/config.go @@ -93,7 +93,7 @@ func NewConfig(baseDir string, basicAuth *BasicAuthConfig, bearerToken, bearerTo path := getFilepath(baseDir, basicAuth.PasswordFile) pass, err := readPasswordFromFile(path) if err != nil { - return nil, fmt.Errorf("cannot read password from `password_file`=%q set in `basic_auth` section: %s", basicAuth.PasswordFile, err) + return nil, fmt.Errorf("cannot read password from `password_file`=%q set in `basic_auth` section: %w", basicAuth.PasswordFile, err) } password = pass } @@ -109,7 +109,7 @@ func NewConfig(baseDir string, basicAuth *BasicAuthConfig, bearerToken, bearerTo path := getFilepath(baseDir, bearerTokenFile) token, err := readPasswordFromFile(path) if err != nil { - return nil, fmt.Errorf("cannot read bearer token from `bearer_token_file`=%q: %s", bearerTokenFile, err) + return nil, fmt.Errorf("cannot read bearer token from `bearer_token_file`=%q: %w", bearerTokenFile, err) } bearerToken = token } @@ -131,7 +131,7 @@ func NewConfig(baseDir string, basicAuth *BasicAuthConfig, bearerToken, bearerTo keyPath := getFilepath(baseDir, tlsConfig.KeyFile) cert, err := tls.LoadX509KeyPair(certPath, keyPath) if err != nil { - return nil, fmt.Errorf("cannot load TLS certificate from `cert_file`=%q, `key_file`=%q: %s", tlsConfig.CertFile, tlsConfig.KeyFile, err) + return nil, fmt.Errorf("cannot load TLS certificate from `cert_file`=%q, `key_file`=%q: %w", tlsConfig.CertFile, tlsConfig.KeyFile, err) } tlsCertificate = &cert } @@ -139,7 +139,7 @@ func NewConfig(baseDir string, basicAuth *BasicAuthConfig, bearerToken, bearerTo path := getFilepath(baseDir, tlsConfig.CAFile) data, err := ioutil.ReadFile(path) if err != nil { - return nil, fmt.Errorf("cannot read `ca_file` %q: %s", tlsConfig.CAFile, err) + return nil, fmt.Errorf("cannot read `ca_file` %q: %w", tlsConfig.CAFile, err) } tlsRootCA = x509.NewCertPool() if !tlsRootCA.AppendCertsFromPEM(data) { diff --git a/lib/prompbmarshal/util.go b/lib/prompbmarshal/util.go index 2dcbc5f81..ef766e02a 100644 --- a/lib/prompbmarshal/util.go +++ b/lib/prompbmarshal/util.go @@ -14,7 +14,7 @@ func MarshalWriteRequest(dst []byte, wr *WriteRequest) []byte { dst = dst[:dstLen+size] n, err := wr.MarshalToSizedBuffer(dst[dstLen:]) if err != nil { - panic(fmt.Errorf("BUG: unexpected error when marshaling WriteRequest: %s", err)) + panic(fmt.Errorf("BUG: unexpected error when marshaling WriteRequest: %w", err)) } return dst[:dstLen+n] } diff --git a/lib/promrelabel/config.go b/lib/promrelabel/config.go index ec0fc91f9..f0134c0ac 100644 --- a/lib/promrelabel/config.go +++ b/lib/promrelabel/config.go @@ -26,11 +26,11 @@ type RelabelConfig struct { func LoadRelabelConfigs(path string) ([]ParsedRelabelConfig, error) { data, err := ioutil.ReadFile(path) if err != nil { - return nil, fmt.Errorf("cannot read `relabel_configs` from %q: %s", path, err) + return nil, fmt.Errorf("cannot read `relabel_configs` from %q: %w", path, err) } var rcs []RelabelConfig if err := yaml.UnmarshalStrict(data, &rcs); err != nil { - return nil, fmt.Errorf("cannot unmarshal `relabel_configs` from %q: %s", path, err) + return nil, fmt.Errorf("cannot unmarshal `relabel_configs` from %q: %w", path, err) } return ParseRelabelConfigs(nil, rcs) } @@ -44,7 +44,7 @@ func ParseRelabelConfigs(dst []ParsedRelabelConfig, rcs []RelabelConfig) ([]Pars var err error dst, err = parseRelabelConfig(dst, &rcs[i]) if err != nil { - return dst, fmt.Errorf("error when parsing `relabel_config` #%d: %s", i+1, err) + return dst, fmt.Errorf("error when parsing `relabel_config` #%d: %w", i+1, err) } } return dst, nil @@ -67,7 +67,7 @@ func parseRelabelConfig(dst []ParsedRelabelConfig, rc *RelabelConfig) ([]ParsedR } re, err := regexp.Compile(regex) if err != nil { - return dst, fmt.Errorf("cannot parse `regex` %q: %s", regex, err) + return dst, fmt.Errorf("cannot parse `regex` %q: %w", regex, err) } regexCompiled = re } diff --git a/lib/promscrape/client.go b/lib/promscrape/client.go index f4e28936b..8642e57a9 100644 --- a/lib/promscrape/client.go +++ b/lib/promscrape/client.go @@ -94,13 +94,13 @@ func (c *client) ReadData(dst []byte) ([]byte, error) { fasthttp.ReleaseResponse(resp) if err == fasthttp.ErrTimeout { scrapesTimedout.Inc() - return dst, fmt.Errorf("error when scraping %q with timeout %s: %s", c.scrapeURL, c.hc.ReadTimeout, err) + return dst, fmt.Errorf("error when scraping %q with timeout %s: %w", c.scrapeURL, c.hc.ReadTimeout, err) } if err == fasthttp.ErrBodyTooLarge { return dst, fmt.Errorf("the response from %q exceeds -promscrape.maxScrapeSize=%d; "+ "either reduce the response size for the target or increase -promscrape.maxScrapeSize", c.scrapeURL, *maxScrapeSize) } - return dst, fmt.Errorf("error when scraping %q: %s", c.scrapeURL, err) + return dst, fmt.Errorf("error when scraping %q: %w", c.scrapeURL, err) } dstLen := len(dst) if ce := resp.Header.Peek("Content-Encoding"); string(ce) == "gzip" { @@ -109,7 +109,7 @@ func (c *client) ReadData(dst []byte) ([]byte, error) { if err != nil { fasthttp.ReleaseResponse(resp) scrapesGunzipFailed.Inc() - return dst, fmt.Errorf("cannot ungzip response from %q: %s", c.scrapeURL, err) + return dst, fmt.Errorf("cannot ungzip response from %q: %w", c.scrapeURL, err) } scrapesGunzipped.Inc() } else { @@ -146,7 +146,7 @@ again: // Retry request if the server closed the keep-alive connection during the first attempt. attempts++ if attempts > 3 { - return fmt.Errorf("the server closed 3 subsequent connections: %s", err) + return fmt.Errorf("the server closed 3 subsequent connections: %w", err) } goto again } diff --git a/lib/promscrape/config.go b/lib/promscrape/config.go index b71b88f2c..39071ad69 100644 --- a/lib/promscrape/config.go +++ b/lib/promscrape/config.go @@ -99,11 +99,11 @@ type StaticConfig struct { func loadStaticConfigs(path string) ([]StaticConfig, error) { data, err := ioutil.ReadFile(path) if err != nil { - return nil, fmt.Errorf("cannot read `static_configs` from %q: %s", path, err) + return nil, fmt.Errorf("cannot read `static_configs` from %q: %w", path, err) } var stcs []StaticConfig if err := yaml.UnmarshalStrict(data, &stcs); err != nil { - return nil, fmt.Errorf("cannot unmarshal `static_configs` from %q: %s", path, err) + return nil, fmt.Errorf("cannot unmarshal `static_configs` from %q: %w", path, err) } return stcs, nil } @@ -112,11 +112,11 @@ func loadStaticConfigs(path string) ([]StaticConfig, error) { func loadConfig(path string) (cfg *Config, data []byte, err error) { data, err = ioutil.ReadFile(path) if err != nil { - return nil, nil, fmt.Errorf("cannot read Prometheus config from %q: %s", path, err) + return nil, nil, fmt.Errorf("cannot read Prometheus config from %q: %w", path, err) } var cfgObj Config if err := cfgObj.parse(data, path); err != nil { - return nil, nil, fmt.Errorf("cannot parse Prometheus config from %q: %s", path, err) + return nil, nil, fmt.Errorf("cannot parse Prometheus config from %q: %w", path, err) } if *dryRun { // This is a dirty hack for checking Prometheus config only. @@ -130,18 +130,18 @@ func loadConfig(path string) (cfg *Config, data []byte, err error) { func (cfg *Config) parse(data []byte, path string) error { if err := unmarshalMaybeStrict(data, cfg); err != nil { - return fmt.Errorf("cannot unmarshal data: %s", err) + return fmt.Errorf("cannot unmarshal data: %w", err) } absPath, err := filepath.Abs(path) if err != nil { - return fmt.Errorf("cannot obtain abs path for %q: %s", path, err) + return fmt.Errorf("cannot obtain abs path for %q: %w", path, err) } cfg.baseDir = filepath.Dir(absPath) for i := range cfg.ScrapeConfigs { sc := &cfg.ScrapeConfigs[i] swc, err := getScrapeWorkConfig(sc, cfg.baseDir, &cfg.Global) if err != nil { - return fmt.Errorf("cannot parse `scrape_config` #%d: %s", i+1, err) + return fmt.Errorf("cannot parse `scrape_config` #%d: %w", i+1, err) } sc.swc = swc } @@ -378,17 +378,17 @@ func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConf params := sc.Params ac, err := promauth.NewConfig(baseDir, sc.BasicAuth, sc.BearerToken, sc.BearerTokenFile, sc.TLSConfig) if err != nil { - return nil, fmt.Errorf("cannot parse auth config for `job_name` %q: %s", jobName, err) + return nil, fmt.Errorf("cannot parse auth config for `job_name` %q: %w", jobName, err) } var relabelConfigs []promrelabel.ParsedRelabelConfig relabelConfigs, err = promrelabel.ParseRelabelConfigs(relabelConfigs[:0], sc.RelabelConfigs) if err != nil { - return nil, fmt.Errorf("cannot parse `relabel_configs` for `job_name` %q: %s", jobName, err) + return nil, fmt.Errorf("cannot parse `relabel_configs` for `job_name` %q: %w", jobName, err) } var metricRelabelConfigs []promrelabel.ParsedRelabelConfig metricRelabelConfigs, err = promrelabel.ParseRelabelConfigs(metricRelabelConfigs[:0], sc.MetricRelabelConfigs) if err != nil { - return nil, fmt.Errorf("cannot parse `metric_relabel_configs` for `job_name` %q: %s", jobName, err) + return nil, fmt.Errorf("cannot parse `metric_relabel_configs` for `job_name` %q: %w", jobName, err) } swc := &scrapeWorkConfig{ scrapeInterval: scrapeInterval, @@ -580,7 +580,7 @@ func appendScrapeWork(dst []ScrapeWork, swc *scrapeWorkConfig, target string, ex paramsStr := url.Values(paramsRelabeled).Encode() scrapeURL := fmt.Sprintf("%s://%s%s%s%s", schemeRelabeled, addressRelabeled, metricsPathRelabeled, optionalQuestion, paramsStr) if _, err := url.Parse(scrapeURL); err != nil { - return dst, fmt.Errorf("invalid url %q for scheme=%q (%q), target=%q (%q), metrics_path=%q (%q) for `job_name` %q: %s", + return dst, fmt.Errorf("invalid url %q for scheme=%q (%q), target=%q (%q), metrics_path=%q (%q) for `job_name` %q: %w", scrapeURL, swc.scheme, schemeRelabeled, target, addressRelabeled, swc.metricsPath, metricsPathRelabeled, swc.jobName, err) } // Set missing "instance" label according to https://www.robustperception.io/life-of-a-label diff --git a/lib/promscrape/config_test.go b/lib/promscrape/config_test.go index fafdc17dc..e3c3e1be2 100644 --- a/lib/promscrape/config_test.go +++ b/lib/promscrape/config_test.go @@ -135,7 +135,7 @@ scrape_configs: func getFileSDScrapeWork(data []byte, path string) ([]ScrapeWork, error) { var cfg Config if err := cfg.parse(data, path); err != nil { - return nil, fmt.Errorf("cannot parse data: %s", err) + return nil, fmt.Errorf("cannot parse data: %w", err) } return cfg.getFileSDScrapeWork(nil), nil } @@ -143,7 +143,7 @@ func getFileSDScrapeWork(data []byte, path string) ([]ScrapeWork, error) { func getStaticScrapeWork(data []byte, path string) ([]ScrapeWork, error) { var cfg Config if err := cfg.parse(data, path); err != nil { - return nil, fmt.Errorf("cannot parse data: %s", err) + return nil, fmt.Errorf("cannot parse data: %w", err) } return cfg.getStaticScrapeWork(), nil } diff --git a/lib/promscrape/discovery/consul/agent.go b/lib/promscrape/discovery/consul/agent.go index a1f612efb..341bf6bbd 100644 --- a/lib/promscrape/discovery/consul/agent.go +++ b/lib/promscrape/discovery/consul/agent.go @@ -22,7 +22,7 @@ type AgentConfig struct { func parseAgent(data []byte) (*Agent, error) { var a Agent if err := json.Unmarshal(data, &a); err != nil { - return nil, fmt.Errorf("cannot unmarshal agent info from %q: %s", data, err) + return nil, fmt.Errorf("cannot unmarshal agent info from %q: %w", data, err) } return &a, nil } diff --git a/lib/promscrape/discovery/consul/api.go b/lib/promscrape/discovery/consul/api.go index cbcd93ccf..8010616ac 100644 --- a/lib/promscrape/discovery/consul/api.go +++ b/lib/promscrape/discovery/consul/api.go @@ -47,7 +47,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { } ac, err := promauth.NewConfig(baseDir, ba, token, "", sdc.TLSConfig) if err != nil { - return nil, fmt.Errorf("cannot parse auth config: %s", err) + return nil, fmt.Errorf("cannot parse auth config: %w", err) } apiServer := sdc.Server if apiServer == "" { @@ -62,7 +62,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { } client, err := discoveryutils.NewClient(apiServer, ac) if err != nil { - return nil, fmt.Errorf("cannot create HTTP client for %q: %s", apiServer, err) + return nil, fmt.Errorf("cannot create HTTP client for %q: %w", apiServer, err) } tagSeparator := "," if sdc.TagSeparator != nil { @@ -92,7 +92,7 @@ func getToken(token *string) (string, error) { if tokenFile := os.Getenv("CONSUL_HTTP_TOKEN_FILE"); tokenFile != "" { data, err := ioutil.ReadFile(tokenFile) if err != nil { - return "", fmt.Errorf("cannot read consul token file %q; probably, `token` arg is missing in `consul_sd_config`? error: %s", tokenFile, err) + return "", fmt.Errorf("cannot read consul token file %q; probably, `token` arg is missing in `consul_sd_config`? error: %w", tokenFile, err) } return string(data), nil } @@ -108,7 +108,7 @@ func getDatacenter(client *discoveryutils.Client, dc string) (string, error) { // See https://www.consul.io/api/agent.html#read-configuration data, err := client.GetAPIResponse("/v1/agent/self") if err != nil { - return "", fmt.Errorf("cannot query consul agent info: %s", err) + return "", fmt.Errorf("cannot query consul agent info: %w", err) } a, err := parseAgent(data) if err != nil { diff --git a/lib/promscrape/discovery/consul/consul.go b/lib/promscrape/discovery/consul/consul.go index 9886d4e09..25b8c501a 100644 --- a/lib/promscrape/discovery/consul/consul.go +++ b/lib/promscrape/discovery/consul/consul.go @@ -30,11 +30,11 @@ type SDConfig struct { func GetLabels(sdc *SDConfig, baseDir string) ([]map[string]string, error) { cfg, err := getAPIConfig(sdc, baseDir) if err != nil { - return nil, fmt.Errorf("cannot get API config: %s", err) + return nil, fmt.Errorf("cannot get API config: %w", err) } ms, err := getServiceNodesLabels(cfg) if err != nil { - return nil, fmt.Errorf("error when fetching service nodes data from Consul: %s", err) + return nil, fmt.Errorf("error when fetching service nodes data from Consul: %w", err) } return ms, nil } diff --git a/lib/promscrape/discovery/consul/service_node.go b/lib/promscrape/discovery/consul/service_node.go index 950591ece..2b3b9821a 100644 --- a/lib/promscrape/discovery/consul/service_node.go +++ b/lib/promscrape/discovery/consul/service_node.go @@ -28,11 +28,11 @@ func getAllServiceNodes(cfg *apiConfig) ([]ServiceNode, error) { // See https://www.consul.io/api/catalog.html#list-services data, err := getAPIResponse(cfg, "/v1/catalog/services") if err != nil { - return nil, fmt.Errorf("cannot obtain services: %s", err) + return nil, fmt.Errorf("cannot obtain services: %w", err) } var m map[string][]string if err := json.Unmarshal(data, &m); err != nil { - return nil, fmt.Errorf("cannot parse services response %q: %s", data, err) + return nil, fmt.Errorf("cannot parse services response %q: %w", data, err) } serviceNames := make(map[string]bool) for serviceName, tags := range m { @@ -125,7 +125,7 @@ func getServiceNodes(cfg *apiConfig, serviceName string) ([]ServiceNode, error) } data, err := getAPIResponse(cfg, path) if err != nil { - return nil, fmt.Errorf("cannot obtain instances for serviceName=%q: %s", serviceName, err) + return nil, fmt.Errorf("cannot obtain instances for serviceName=%q: %w", serviceName, err) } return parseServiceNodes(data) } @@ -173,7 +173,7 @@ type Check struct { func parseServiceNodes(data []byte) ([]ServiceNode, error) { var sns []ServiceNode if err := json.Unmarshal(data, &sns); err != nil { - return nil, fmt.Errorf("cannot unmarshal ServiceNodes from %q: %s", data, err) + return nil, fmt.Errorf("cannot unmarshal ServiceNodes from %q: %w", data, err) } return sns, nil } diff --git a/lib/promscrape/discovery/ec2/api.go b/lib/promscrape/discovery/ec2/api.go index e640fa2fa..38e0d6bc0 100644 --- a/lib/promscrape/discovery/ec2/api.go +++ b/lib/promscrape/discovery/ec2/api.go @@ -36,7 +36,7 @@ func newAPIConfig(sdc *SDConfig) (*apiConfig, error) { if len(region) == 0 { r, err := getDefaultRegion() if err != nil { - return nil, fmt.Errorf("cannot determine default ec2 region; probably, `region` param in `ec2_sd_configs` is missing; the error: %s", err) + return nil, fmt.Errorf("cannot determine default ec2 region; probably, `region` param in `ec2_sd_configs` is missing; the error: %w", err) } region = r } @@ -88,7 +88,7 @@ func getDefaultRegion() (string, error) { } var id IdentityDocument if err := json.Unmarshal(data, &id); err != nil { - return "", fmt.Errorf("cannot parse identity document: %s", err) + return "", fmt.Errorf("cannot parse identity document: %w", err) } return id.Region, nil } @@ -109,28 +109,28 @@ func getMetadataByPath(apiPath string) ([]byte, error) { sessionTokenURL := "http://169.254.169.254/latest/api/token" req, err := http.NewRequest("PUT", sessionTokenURL, nil) if err != nil { - return nil, fmt.Errorf("cannot create request for IMDSv2 session token at url %q: %s", sessionTokenURL, err) + return nil, fmt.Errorf("cannot create request for IMDSv2 session token at url %q: %w", sessionTokenURL, err) } req.Header.Set("X-aws-ec2-metadata-token-ttl-seconds", "60") resp, err := client.Do(req) if err != nil { - return nil, fmt.Errorf("cannot obtain IMDSv2 session token from %q; probably, `region` is missing in `ec2_sd_config`; error: %s", sessionTokenURL, err) + return nil, fmt.Errorf("cannot obtain IMDSv2 session token from %q; probably, `region` is missing in `ec2_sd_config`; error: %w", sessionTokenURL, err) } token, err := readResponseBody(resp, sessionTokenURL) if err != nil { - return nil, fmt.Errorf("cannot read IMDSv2 session token from %q; probably, `region` is missing in `ec2_sd_config`; error: %s", sessionTokenURL, err) + return nil, fmt.Errorf("cannot read IMDSv2 session token from %q; probably, `region` is missing in `ec2_sd_config`; error: %w", sessionTokenURL, err) } // Use session token in the request. apiURL := "http://169.254.169.254/latest/" + apiPath req, err = http.NewRequest("GET", apiURL, nil) if err != nil { - return nil, fmt.Errorf("cannot create request to %q: %s", apiURL, err) + return nil, fmt.Errorf("cannot create request to %q: %w", apiURL, err) } req.Header.Set("X-aws-ec2-metadata-token", string(token)) resp, err = client.Do(req) if err != nil { - return nil, fmt.Errorf("cannot obtain response for %q: %s", apiURL, err) + return nil, fmt.Errorf("cannot obtain response for %q: %w", apiURL, err) } return readResponseBody(resp, apiURL) } @@ -158,11 +158,11 @@ func getAPIResponse(cfg *apiConfig, action, nextPageToken string) ([]byte, error apiURL += "&Version=2013-10-15" req, err := newSignedRequest(apiURL, "ec2", cfg.region, cfg.accessKey, cfg.secretKey) if err != nil { - return nil, fmt.Errorf("cannot create signed request: %s", err) + return nil, fmt.Errorf("cannot create signed request: %w", err) } resp, err := discoveryutils.GetHTTPClient().Do(req) if err != nil { - return nil, fmt.Errorf("cannot perform http request to %q: %s", apiURL, err) + return nil, fmt.Errorf("cannot perform http request to %q: %w", apiURL, err) } return readResponseBody(resp, apiURL) } @@ -171,7 +171,7 @@ func readResponseBody(resp *http.Response, apiURL string) ([]byte, error) { data, err := ioutil.ReadAll(resp.Body) _ = resp.Body.Close() if err != nil { - return nil, fmt.Errorf("cannot read response from %q: %s", apiURL, err) + return nil, fmt.Errorf("cannot read response from %q: %w", apiURL, err) } if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("unexpected status code for %q; got %d; want %d; response body: %q", diff --git a/lib/promscrape/discovery/ec2/ec2.go b/lib/promscrape/discovery/ec2/ec2.go index f49902b5b..86dbf0f6a 100644 --- a/lib/promscrape/discovery/ec2/ec2.go +++ b/lib/promscrape/discovery/ec2/ec2.go @@ -34,11 +34,11 @@ type Filter struct { func GetLabels(sdc *SDConfig) ([]map[string]string, error) { cfg, err := getAPIConfig(sdc) if err != nil { - return nil, fmt.Errorf("cannot get API config: %s", err) + return nil, fmt.Errorf("cannot get API config: %w", err) } ms, err := getInstancesLabels(cfg) if err != nil { - return nil, fmt.Errorf("error when fetching instances data from EC2: %s", err) + return nil, fmt.Errorf("error when fetching instances data from EC2: %w", err) } return ms, nil } diff --git a/lib/promscrape/discovery/ec2/instance.go b/lib/promscrape/discovery/ec2/instance.go index 333d443b9..4e3b62a56 100644 --- a/lib/promscrape/discovery/ec2/instance.go +++ b/lib/promscrape/discovery/ec2/instance.go @@ -31,11 +31,11 @@ func getReservations(cfg *apiConfig) ([]Reservation, error) { for { data, err := getAPIResponse(cfg, action, pageToken) if err != nil { - return nil, fmt.Errorf("cannot obtain instances: %s", err) + return nil, fmt.Errorf("cannot obtain instances: %w", err) } ir, err := parseInstancesResponse(data) if err != nil { - return nil, fmt.Errorf("cannot parse instance list: %s", err) + return nil, fmt.Errorf("cannot parse instance list: %w", err) } rs = append(rs, ir.ReservationSet.Items...) if len(ir.NextPageToken) == 0 { @@ -121,7 +121,7 @@ type Tag struct { func parseInstancesResponse(data []byte) (*InstancesResponse, error) { var v InstancesResponse if err := xml.Unmarshal(data, &v); err != nil { - return nil, fmt.Errorf("cannot unmarshal InstancesResponse from %q: %s", data, err) + return nil, fmt.Errorf("cannot unmarshal InstancesResponse from %q: %w", data, err) } return &v, nil } diff --git a/lib/promscrape/discovery/ec2/sign.go b/lib/promscrape/discovery/ec2/sign.go index 8f0244313..30e16d0b5 100644 --- a/lib/promscrape/discovery/ec2/sign.go +++ b/lib/promscrape/discovery/ec2/sign.go @@ -24,7 +24,7 @@ func newSignedRequest(apiURL, service, region, accessKey, secretKey string) (*ht func newSignedRequestWithTime(apiURL, service, region, accessKey, secretKey string, t time.Time) (*http.Request, error) { uri, err := url.Parse(apiURL) if err != nil { - return nil, fmt.Errorf("cannot parse %q: %s", apiURL, err) + return nil, fmt.Errorf("cannot parse %q: %w", apiURL, err) } // Create canonicalRequest @@ -65,7 +65,7 @@ func newSignedRequestWithTime(apiURL, service, region, accessKey, secretKey stri req, err := http.NewRequest("GET", apiURL, nil) if err != nil { - return nil, fmt.Errorf("cannot create request from %q: %s", apiURL, err) + return nil, fmt.Errorf("cannot create request from %q: %w", apiURL, err) } req.Header.Set("x-amz-date", amzdate) req.Header.Set("Authorization", authHeader) diff --git a/lib/promscrape/discovery/gce/api.go b/lib/promscrape/discovery/gce/api.go index b8aba9c1c..182f46371 100644 --- a/lib/promscrape/discovery/gce/api.go +++ b/lib/promscrape/discovery/gce/api.go @@ -36,13 +36,13 @@ func newAPIConfig(sdc *SDConfig) (*apiConfig, error) { ctx := context.Background() client, err := google.DefaultClient(ctx, "https://www.googleapis.com/auth/compute.readonly") if err != nil { - return nil, fmt.Errorf("cannot create oauth2 client for gce: %s", err) + return nil, fmt.Errorf("cannot create oauth2 client for gce: %w", err) } project := sdc.Project if len(project) == 0 { proj, err := getCurrentProject() if err != nil { - return nil, fmt.Errorf("cannot determine the current project; make sure `vmagent` runs inside GCE; error: %s", err) + return nil, fmt.Errorf("cannot determine the current project; make sure `vmagent` runs inside GCE; error: %w", err) } project = proj logger.Infof("autodetected the current GCE project: %q", project) @@ -52,7 +52,7 @@ func newAPIConfig(sdc *SDConfig) (*apiConfig, error) { // Autodetect the current zone. zone, err := getCurrentZone() if err != nil { - return nil, fmt.Errorf("cannot determine the current zone; make sure `vmagent` runs inside GCE; error: %s", err) + return nil, fmt.Errorf("cannot determine the current zone; make sure `vmagent` runs inside GCE; error: %w", err) } zones = append(zones, zone) logger.Infof("autodetected the current GCE zone: %q", zone) @@ -60,7 +60,7 @@ func newAPIConfig(sdc *SDConfig) (*apiConfig, error) { // Autodetect zones for project. zs, err := getZonesForProject(client, project, sdc.Filter) if err != nil { - return nil, fmt.Errorf("cannot obtain zones for project %q: %s", project, err) + return nil, fmt.Errorf("cannot obtain zones for project %q: %w", project, err) } zones = zs logger.Infof("autodetected all the zones for the GCE project %q: %q", project, zones) @@ -88,7 +88,7 @@ func getAPIResponse(client *http.Client, apiURL, filter, pageToken string) ([]by apiURL = appendNonEmptyQueryArg(apiURL, "pageToken", pageToken) resp, err := client.Get(apiURL) if err != nil { - return nil, fmt.Errorf("cannot query %q: %s", apiURL, err) + return nil, fmt.Errorf("cannot query %q: %w", apiURL, err) } return readResponseBody(resp, apiURL) } @@ -97,7 +97,7 @@ func readResponseBody(resp *http.Response, apiURL string) ([]byte, error) { data, err := ioutil.ReadAll(resp.Body) _ = resp.Body.Close() if err != nil { - return nil, fmt.Errorf("cannot read response from %q: %s", apiURL, err) + return nil, fmt.Errorf("cannot read response from %q: %w", apiURL, err) } if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("unexpected status code for %q; got %d; want %d; response body: %q", @@ -144,12 +144,12 @@ func getGCEMetadata(path string) ([]byte, error) { metadataURL := "http://metadata.google.internal/computeMetadata/v1/" + path req, err := http.NewRequest("GET", metadataURL, nil) if err != nil { - return nil, fmt.Errorf("cannot create http request for %q: %s", metadataURL, err) + return nil, fmt.Errorf("cannot create http request for %q: %w", metadataURL, err) } req.Header.Set("Metadata-Flavor", "Google") resp, err := discoveryutils.GetHTTPClient().Do(req) if err != nil { - return nil, fmt.Errorf("cannot obtain response to %q: %s", metadataURL, err) + return nil, fmt.Errorf("cannot obtain response to %q: %w", metadataURL, err) } return readResponseBody(resp, metadataURL) } diff --git a/lib/promscrape/discovery/gce/gce.go b/lib/promscrape/discovery/gce/gce.go index 52044edff..9227fe813 100644 --- a/lib/promscrape/discovery/gce/gce.go +++ b/lib/promscrape/discovery/gce/gce.go @@ -51,7 +51,7 @@ func (z *ZoneYAML) UnmarshalYAML(unmarshal func(interface{}) error) error { func GetLabels(sdc *SDConfig) ([]map[string]string, error) { cfg, err := getAPIConfig(sdc) if err != nil { - return nil, fmt.Errorf("cannot get API config: %s", err) + return nil, fmt.Errorf("cannot get API config: %w", err) } ms := getInstancesLabels(cfg) return ms, nil diff --git a/lib/promscrape/discovery/gce/instance.go b/lib/promscrape/discovery/gce/instance.go index b75e17002..345f7189c 100644 --- a/lib/promscrape/discovery/gce/instance.go +++ b/lib/promscrape/discovery/gce/instance.go @@ -58,11 +58,11 @@ func getInstancesForProjectAndZone(client *http.Client, project, zone, filter st for { data, err := getAPIResponse(client, instsURL, filter, pageToken) if err != nil { - return nil, fmt.Errorf("cannot obtain instances: %s", err) + return nil, fmt.Errorf("cannot obtain instances: %w", err) } il, err := parseInstanceList(data) if err != nil { - return nil, fmt.Errorf("cannot parse instance list from %q: %s", instsURL, err) + return nil, fmt.Errorf("cannot parse instance list from %q: %w", instsURL, err) } insts = append(insts, il.Items...) if len(il.NextPageToken) == 0 { @@ -125,7 +125,7 @@ type MetadataEntry struct { func parseInstanceList(data []byte) (*InstanceList, error) { var il InstanceList if err := json.Unmarshal(data, &il); err != nil { - return nil, fmt.Errorf("cannot unmarshal InstanceList from %q: %s", data, err) + return nil, fmt.Errorf("cannot unmarshal InstanceList from %q: %w", data, err) } return &il, nil } diff --git a/lib/promscrape/discovery/gce/zone.go b/lib/promscrape/discovery/gce/zone.go index 98dbfcff8..b949bab9e 100644 --- a/lib/promscrape/discovery/gce/zone.go +++ b/lib/promscrape/discovery/gce/zone.go @@ -14,11 +14,11 @@ func getZonesForProject(client *http.Client, project, filter string) ([]string, for { data, err := getAPIResponse(client, zonesURL, filter, pageToken) if err != nil { - return nil, fmt.Errorf("cannot obtain zones: %s", err) + return nil, fmt.Errorf("cannot obtain zones: %w", err) } zl, err := parseZoneList(data) if err != nil { - return nil, fmt.Errorf("cannot parse zone list from %q: %s", zonesURL, err) + return nil, fmt.Errorf("cannot parse zone list from %q: %w", zonesURL, err) } for _, z := range zl.Items { zones = append(zones, z.Name) @@ -45,7 +45,7 @@ type Zone struct { func parseZoneList(data []byte) (*ZoneList, error) { var zl ZoneList if err := json.Unmarshal(data, &zl); err != nil { - return nil, fmt.Errorf("cannot unmarshal ZoneList from %q: %s", data, err) + return nil, fmt.Errorf("cannot unmarshal ZoneList from %q: %w", data, err) } return &zl, nil } diff --git a/lib/promscrape/discovery/kubernetes/api.go b/lib/promscrape/discovery/kubernetes/api.go index 55da6b240..caa330643 100644 --- a/lib/promscrape/discovery/kubernetes/api.go +++ b/lib/promscrape/discovery/kubernetes/api.go @@ -29,7 +29,7 @@ func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { ac, err := promauth.NewConfig(baseDir, sdc.BasicAuth, sdc.BearerToken, sdc.BearerTokenFile, sdc.TLSConfig) if err != nil { - return nil, fmt.Errorf("cannot parse auth config: %s", err) + return nil, fmt.Errorf("cannot parse auth config: %w", err) } apiServer := sdc.APIServer if len(apiServer) == 0 { @@ -52,13 +52,13 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { } acNew, err := promauth.NewConfig(".", nil, "", "/var/run/secrets/kubernetes.io/serviceaccount/token", &tlsConfig) if err != nil { - return nil, fmt.Errorf("cannot initialize service account auth: %s; probably, `kubernetes_sd_config->api_server` is missing in Prometheus configs?", err) + return nil, fmt.Errorf("cannot initialize service account auth: %w; probably, `kubernetes_sd_config->api_server` is missing in Prometheus configs?", err) } ac = acNew } client, err := discoveryutils.NewClient(apiServer, ac) if err != nil { - return nil, fmt.Errorf("cannot create HTTP client for %q: %s", apiServer, err) + return nil, fmt.Errorf("cannot create HTTP client for %q: %w", apiServer, err) } cfg := &apiConfig{ client: client, diff --git a/lib/promscrape/discovery/kubernetes/endpoints.go b/lib/promscrape/discovery/kubernetes/endpoints.go index 40b9c6b83..1bb3aaeb8 100644 --- a/lib/promscrape/discovery/kubernetes/endpoints.go +++ b/lib/promscrape/discovery/kubernetes/endpoints.go @@ -53,11 +53,11 @@ func getEndpoints(cfg *apiConfig) ([]Endpoints, error) { func getEndpointsByPath(cfg *apiConfig, path string) ([]Endpoints, error) { data, err := getAPIResponse(cfg, "endpoints", path) if err != nil { - return nil, fmt.Errorf("cannot obtain endpoints data from API server: %s", err) + return nil, fmt.Errorf("cannot obtain endpoints data from API server: %w", err) } epl, err := parseEndpointsList(data) if err != nil { - return nil, fmt.Errorf("cannot parse endpoints response from API server: %s", err) + return nil, fmt.Errorf("cannot parse endpoints response from API server: %w", err) } return epl.Items, nil } @@ -119,7 +119,7 @@ type EndpointPort struct { func parseEndpointsList(data []byte) (*EndpointsList, error) { var esl EndpointsList if err := json.Unmarshal(data, &esl); err != nil { - return nil, fmt.Errorf("cannot unmarshal EndpointsList from %q: %s", data, err) + return nil, fmt.Errorf("cannot unmarshal EndpointsList from %q: %w", data, err) } return &esl, nil } diff --git a/lib/promscrape/discovery/kubernetes/ingress.go b/lib/promscrape/discovery/kubernetes/ingress.go index b8f479772..5aad6d903 100644 --- a/lib/promscrape/discovery/kubernetes/ingress.go +++ b/lib/promscrape/discovery/kubernetes/ingress.go @@ -43,11 +43,11 @@ func getIngresses(cfg *apiConfig) ([]Ingress, error) { func getIngressesByPath(cfg *apiConfig, path string) ([]Ingress, error) { data, err := getAPIResponse(cfg, "ingress", path) if err != nil { - return nil, fmt.Errorf("cannot obtain ingresses data from API server: %s", err) + return nil, fmt.Errorf("cannot obtain ingresses data from API server: %w", err) } igl, err := parseIngressList(data) if err != nil { - return nil, fmt.Errorf("cannot parse ingresses response from API server: %s", err) + return nil, fmt.Errorf("cannot parse ingresses response from API server: %w", err) } return igl.Items, nil } @@ -108,7 +108,7 @@ type HTTPIngressPath struct { func parseIngressList(data []byte) (*IngressList, error) { var il IngressList if err := json.Unmarshal(data, &il); err != nil { - return nil, fmt.Errorf("cannot unmarshal IngressList from %q: %s", data, err) + return nil, fmt.Errorf("cannot unmarshal IngressList from %q: %w", data, err) } return &il, nil } diff --git a/lib/promscrape/discovery/kubernetes/kubernetes.go b/lib/promscrape/discovery/kubernetes/kubernetes.go index 320351626..f1844792f 100644 --- a/lib/promscrape/discovery/kubernetes/kubernetes.go +++ b/lib/promscrape/discovery/kubernetes/kubernetes.go @@ -39,7 +39,7 @@ type Selector struct { func GetLabels(sdc *SDConfig, baseDir string) ([]map[string]string, error) { cfg, err := getAPIConfig(sdc, baseDir) if err != nil { - return nil, fmt.Errorf("cannot create API config: %s", err) + return nil, fmt.Errorf("cannot create API config: %w", err) } switch sdc.Role { case "node": diff --git a/lib/promscrape/discovery/kubernetes/node.go b/lib/promscrape/discovery/kubernetes/node.go index df521f12f..411b25978 100644 --- a/lib/promscrape/discovery/kubernetes/node.go +++ b/lib/promscrape/discovery/kubernetes/node.go @@ -11,11 +11,11 @@ import ( func getNodesLabels(cfg *apiConfig) ([]map[string]string, error) { data, err := getAPIResponse(cfg, "node", "/api/v1/nodes") if err != nil { - return nil, fmt.Errorf("cannot obtain nodes data from API server: %s", err) + return nil, fmt.Errorf("cannot obtain nodes data from API server: %w", err) } nl, err := parseNodeList(data) if err != nil { - return nil, fmt.Errorf("cannot parse nodes response from API server: %s", err) + return nil, fmt.Errorf("cannot parse nodes response from API server: %w", err) } var ms []map[string]string for _, n := range nl.Items { @@ -67,7 +67,7 @@ type NodeDaemonEndpoints struct { func parseNodeList(data []byte) (*NodeList, error) { var nl NodeList if err := json.Unmarshal(data, &nl); err != nil { - return nil, fmt.Errorf("cannot unmarshal NodeList from %q: %s", data, err) + return nil, fmt.Errorf("cannot unmarshal NodeList from %q: %w", data, err) } return &nl, nil } diff --git a/lib/promscrape/discovery/kubernetes/pod.go b/lib/promscrape/discovery/kubernetes/pod.go index 521c57a25..3bc3495b0 100644 --- a/lib/promscrape/discovery/kubernetes/pod.go +++ b/lib/promscrape/discovery/kubernetes/pod.go @@ -47,11 +47,11 @@ func getPods(cfg *apiConfig) ([]Pod, error) { func getPodsByPath(cfg *apiConfig, path string) ([]Pod, error) { data, err := getAPIResponse(cfg, "pod", path) if err != nil { - return nil, fmt.Errorf("cannot obtain pods data from API server: %s", err) + return nil, fmt.Errorf("cannot obtain pods data from API server: %w", err) } pl, err := parsePodList(data) if err != nil { - return nil, fmt.Errorf("cannot parse pods response from API server: %s", err) + return nil, fmt.Errorf("cannot parse pods response from API server: %w", err) } return pl.Items, nil } @@ -118,7 +118,7 @@ type PodCondition struct { func parsePodList(data []byte) (*PodList, error) { var pl PodList if err := json.Unmarshal(data, &pl); err != nil { - return nil, fmt.Errorf("cannot unmarshal PodList from %q: %s", data, err) + return nil, fmt.Errorf("cannot unmarshal PodList from %q: %w", data, err) } return &pl, nil } diff --git a/lib/promscrape/discovery/kubernetes/service.go b/lib/promscrape/discovery/kubernetes/service.go index f14c8c9d1..5ce94a4c9 100644 --- a/lib/promscrape/discovery/kubernetes/service.go +++ b/lib/promscrape/discovery/kubernetes/service.go @@ -45,11 +45,11 @@ func getServices(cfg *apiConfig) ([]Service, error) { func getServicesByPath(cfg *apiConfig, path string) ([]Service, error) { data, err := getAPIResponse(cfg, "service", path) if err != nil { - return nil, fmt.Errorf("cannot obtain services data from API server: %s", err) + return nil, fmt.Errorf("cannot obtain services data from API server: %w", err) } sl, err := parseServiceList(data) if err != nil { - return nil, fmt.Errorf("cannot parse services response from API server: %s", err) + return nil, fmt.Errorf("cannot parse services response from API server: %w", err) } return sl.Items, nil } @@ -92,7 +92,7 @@ type ServicePort struct { func parseServiceList(data []byte) (*ServiceList, error) { var sl ServiceList if err := json.Unmarshal(data, &sl); err != nil { - return nil, fmt.Errorf("cannot unmarshal ServiceList from %q: %s", data, err) + return nil, fmt.Errorf("cannot unmarshal ServiceList from %q: %w", data, err) } return &sl, nil } diff --git a/lib/promscrape/discoveryutils/client.go b/lib/promscrape/discoveryutils/client.go index b8b70f938..99f557b97 100644 --- a/lib/promscrape/discoveryutils/client.go +++ b/lib/promscrape/discoveryutils/client.go @@ -112,13 +112,13 @@ func (c *Client) GetAPIResponse(path string) ([]byte, error) { var resp fasthttp.Response // There is no need in calling DoTimeout, since the timeout is already set in c.hc.ReadTimeout above. if err := c.hc.Do(&req, &resp); err != nil { - return nil, fmt.Errorf("cannot fetch %q: %s", requestURL, err) + return nil, fmt.Errorf("cannot fetch %q: %w", requestURL, err) } var data []byte if ce := resp.Header.Peek("Content-Encoding"); string(ce) == "gzip" { dst, err := fasthttp.AppendGunzipBytes(nil, resp.Body()) if err != nil { - return nil, fmt.Errorf("cannot ungzip response from %q: %s", requestURL, err) + return nil, fmt.Errorf("cannot ungzip response from %q: %w", requestURL, err) } data = dst } else { diff --git a/lib/promscrape/scrapework_test.go b/lib/promscrape/scrapework_test.go index bdc273303..3fd1eb4a7 100644 --- a/lib/promscrape/scrapework_test.go +++ b/lib/promscrape/scrapework_test.go @@ -32,7 +32,7 @@ func TestScrapeWorkScrapeInternalFailure(t *testing.T) { var pushDataErr error sw.PushData = func(wr *prompbmarshal.WriteRequest) { if err := expectEqualTimeseries(wr.Timeseries, timeseriesExpected); err != nil { - pushDataErr = fmt.Errorf("unexpected data pushed: %s\ngot\n%#v\nwant\n%#v", err, wr.Timeseries, timeseriesExpected) + pushDataErr = fmt.Errorf("unexpected data pushed: %w\ngot\n%#v\nwant\n%#v", err, wr.Timeseries, timeseriesExpected) } pushDataCalls++ } @@ -72,7 +72,7 @@ func TestScrapeWorkScrapeInternalSuccess(t *testing.T) { var pushDataErr error sw.PushData = func(wr *prompbmarshal.WriteRequest) { if err := expectEqualTimeseries(wr.Timeseries, timeseriesExpected); err != nil { - pushDataErr = fmt.Errorf("unexpected data pushed: %s\ngot\n%#v\nwant\n%#v", err, wr.Timeseries, timeseriesExpected) + pushDataErr = fmt.Errorf("unexpected data pushed: %w\ngot\n%#v\nwant\n%#v", err, wr.Timeseries, timeseriesExpected) } pushDataCalls++ } @@ -336,11 +336,11 @@ func parseData(data string) []prompbmarshal.TimeSeries { func expectEqualTimeseries(tss, tssExpected []prompbmarshal.TimeSeries) error { m, err := timeseriesToMap(tss) if err != nil { - return fmt.Errorf("invalid generated timeseries: %s", err) + return fmt.Errorf("invalid generated timeseries: %w", err) } mExpected, err := timeseriesToMap(tssExpected) if err != nil { - return fmt.Errorf("invalid expected timeseries: %s", err) + return fmt.Errorf("invalid expected timeseries: %w", err) } if len(m) != len(mExpected) { return fmt.Errorf("unexpected time series len; got %d; want %d", len(m), len(mExpected)) diff --git a/lib/promscrape/scrapework_timing_test.go b/lib/promscrape/scrapework_timing_test.go index f8d76eab6..554da3338 100644 --- a/lib/promscrape/scrapework_timing_test.go +++ b/lib/promscrape/scrapework_timing_test.go @@ -42,7 +42,7 @@ vm_tcplistener_write_calls_total{name="https", addr=":443"} 132356 timestamp := int64(0) for pb.Next() { if err := sw.scrapeInternal(timestamp); err != nil { - panic(fmt.Errorf("unexpected error: %s", err)) + panic(fmt.Errorf("unexpected error: %w", err)) } timestamp++ } diff --git a/lib/protoparser/csvimport/column_descriptor.go b/lib/protoparser/csvimport/column_descriptor.go index 1ee35088b..ab2c84e62 100644 --- a/lib/protoparser/csvimport/column_descriptor.go +++ b/lib/protoparser/csvimport/column_descriptor.go @@ -63,7 +63,7 @@ func ParseColumnDescriptors(s string) ([]ColumnDescriptor, error) { } pos, err := strconv.Atoi(a[0]) if err != nil { - return nil, fmt.Errorf("cannot parse part from the entry #%d %q: %s", i+1, col, err) + return nil, fmt.Errorf("cannot parse part from the entry #%d %q: %w", i+1, col, err) } if pos <= 0 { return nil, fmt.Errorf(" cannot be smaller than 1; got %d for entry #%d %q", pos, i+1, col) @@ -82,7 +82,7 @@ func ParseColumnDescriptors(s string) ([]ColumnDescriptor, error) { } parseTimestamp, err := parseTimeFormat(a[2]) if err != nil { - return nil, fmt.Errorf("cannot parse time format from the entry #%d %q: %s", i+1, col, err) + return nil, fmt.Errorf("cannot parse time format from the entry #%d %q: %w", i+1, col, err) } cd.ParseTimestamp = parseTimestamp hasTimeCol = true @@ -156,7 +156,7 @@ func parseUnixTimestampNanoseconds(s string) (int64, error) { func parseRFC3339(s string) (int64, error) { t, err := time.Parse(time.RFC3339, s) if err != nil { - return 0, fmt.Errorf("cannot parse time in RFC3339 from %q: %s", s, err) + return 0, fmt.Errorf("cannot parse time in RFC3339 from %q: %w", s, err) } return t.UnixNano() / 1e6, nil } @@ -165,7 +165,7 @@ func newParseCustomTimeFunc(format string) func(s string) (int64, error) { return func(s string) (int64, error) { t, err := time.Parse(format, s) if err != nil { - return 0, fmt.Errorf("cannot parse time in custom format %q from %q: %s", format, s, err) + return 0, fmt.Errorf("cannot parse time in custom format %q from %q: %w", format, s, err) } return t.UnixNano() / 1e6, nil } diff --git a/lib/protoparser/csvimport/parser.go b/lib/protoparser/csvimport/parser.go index f6425f07b..5e74216e3 100644 --- a/lib/protoparser/csvimport/parser.go +++ b/lib/protoparser/csvimport/parser.go @@ -91,7 +91,7 @@ func parseRows(sc *scanner, dst []Row, tags []Tag, metrics []metric, cds []Colum if parseTimestamp := cd.ParseTimestamp; parseTimestamp != nil { timestamp, err := parseTimestamp(sc.Column) if err != nil { - sc.Error = fmt.Errorf("cannot parse timestamp from %q: %s", sc.Column, err) + sc.Error = fmt.Errorf("cannot parse timestamp from %q: %w", sc.Column, err) break } r.Timestamp = timestamp diff --git a/lib/protoparser/csvimport/streamparser.go b/lib/protoparser/csvimport/streamparser.go index 47f66c6d0..dda5daef1 100644 --- a/lib/protoparser/csvimport/streamparser.go +++ b/lib/protoparser/csvimport/streamparser.go @@ -30,13 +30,13 @@ func ParseStream(req *http.Request, callback func(rows []Row) error) error { format := q.Get("format") cds, err := ParseColumnDescriptors(format) if err != nil { - return fmt.Errorf("cannot parse the provided csv format: %s", err) + return fmt.Errorf("cannot parse the provided csv format: %w", err) } r := req.Body if req.Header.Get("Content-Encoding") == "gzip" { zr, err := common.GetGzipReader(r) if err != nil { - return fmt.Errorf("cannot read gzipped csv data: %s", err) + return fmt.Errorf("cannot read gzipped csv data: %w", err) } defer common.PutGzipReader(zr) r = zr @@ -60,7 +60,7 @@ func (ctx *streamContext) Read(r io.Reader, cds []ColumnDescriptor) bool { if ctx.err != nil { if ctx.err != io.EOF { readErrors.Inc() - ctx.err = fmt.Errorf("cannot read csv data: %s", ctx.err) + ctx.err = fmt.Errorf("cannot read csv data: %w", ctx.err) } return false } diff --git a/lib/protoparser/graphite/parser.go b/lib/protoparser/graphite/parser.go index 79838f3db..5ee12c782 100644 --- a/lib/protoparser/graphite/parser.go +++ b/lib/protoparser/graphite/parser.go @@ -75,7 +75,7 @@ func (r *Row) unmarshal(s string, tagsPool []Tag) ([]Tag, error) { var err error tagsPool, err = unmarshalTags(tagsPool, metricAndTags[n+1:]) if err != nil { - return tagsPool, fmt.Errorf("cannot umarshal tags: %s", err) + return tagsPool, fmt.Errorf("cannot umarshal tags: %w", err) } tags := tagsPool[tagsStart:] r.Tags = tags[:len(tags):len(tags)] diff --git a/lib/protoparser/graphite/streamparser.go b/lib/protoparser/graphite/streamparser.go index 3d6657f24..5b4d7fece 100644 --- a/lib/protoparser/graphite/streamparser.go +++ b/lib/protoparser/graphite/streamparser.go @@ -47,7 +47,7 @@ func (ctx *streamContext) Read(r io.Reader) bool { if c, ok := r.(net.Conn); ok { if err := c.SetReadDeadline(time.Now().Add(flushTimeout)); err != nil { readErrors.Inc() - ctx.err = fmt.Errorf("cannot set read deadline: %s", err) + ctx.err = fmt.Errorf("cannot set read deadline: %w", err) return false } } @@ -59,7 +59,7 @@ func (ctx *streamContext) Read(r io.Reader) bool { } else { if ctx.err != io.EOF { readErrors.Inc() - ctx.err = fmt.Errorf("cannot read graphite plaintext protocol data: %s", ctx.err) + ctx.err = fmt.Errorf("cannot read graphite plaintext protocol data: %w", ctx.err) } return false } diff --git a/lib/protoparser/influx/parser.go b/lib/protoparser/influx/parser.go index df1709fe9..96916e995 100644 --- a/lib/protoparser/influx/parser.go +++ b/lib/protoparser/influx/parser.go @@ -162,7 +162,7 @@ func (f *Field) unmarshal(s string, noEscapeChars, hasQuotedFields bool) error { } v, err := parseFieldValue(s[n+1:], hasQuotedFields) if err != nil { - return fmt.Errorf("cannot parse field value for %q: %s", f.Key, err) + return fmt.Errorf("cannot parse field value for %q: %w", f.Key, err) } f.Value = v return nil diff --git a/lib/protoparser/influx/streamparser.go b/lib/protoparser/influx/streamparser.go index 918b4270a..ddbe155da 100644 --- a/lib/protoparser/influx/streamparser.go +++ b/lib/protoparser/influx/streamparser.go @@ -28,7 +28,7 @@ func ParseStream(r io.Reader, isGzipped bool, precision, db string, callback fun if isGzipped { zr, err := common.GetGzipReader(r) if err != nil { - return fmt.Errorf("cannot read gzipped influx line protocol data: %s", err) + return fmt.Errorf("cannot read gzipped influx line protocol data: %w", err) } defer common.PutGzipReader(zr) r = zr @@ -69,7 +69,7 @@ func (ctx *streamContext) Read(r io.Reader, tsMultiplier int64) bool { if ctx.err != nil { if ctx.err != io.EOF { readErrors.Inc() - ctx.err = fmt.Errorf("cannot read influx line protocol data: %s", ctx.err) + ctx.err = fmt.Errorf("cannot read influx line protocol data: %w", ctx.err) } return false } diff --git a/lib/protoparser/opentsdb/parser.go b/lib/protoparser/opentsdb/parser.go index 2372cc6b4..0c69d2c5d 100644 --- a/lib/protoparser/opentsdb/parser.go +++ b/lib/protoparser/opentsdb/parser.go @@ -85,7 +85,7 @@ func (r *Row) unmarshal(s string, tagsPool []Tag) ([]Tag, error) { tagsStart := len(tagsPool) tagsPool, err = unmarshalTags(tagsPool, tail[n+1:]) if err != nil { - return tagsPool, fmt.Errorf("cannot unmarshal tags in %q: %s", s, err) + return tagsPool, fmt.Errorf("cannot unmarshal tags in %q: %w", s, err) } tags := tagsPool[tagsStart:] r.Tags = tags[:len(tags):len(tags)] diff --git a/lib/protoparser/opentsdb/streamparser.go b/lib/protoparser/opentsdb/streamparser.go index 13c5e1890..b250f3c4a 100644 --- a/lib/protoparser/opentsdb/streamparser.go +++ b/lib/protoparser/opentsdb/streamparser.go @@ -46,7 +46,7 @@ func (ctx *streamContext) Read(r io.Reader) bool { if c, ok := r.(net.Conn); ok { if err := c.SetReadDeadline(time.Now().Add(flushTimeout)); err != nil { readErrors.Inc() - ctx.err = fmt.Errorf("cannot set read deadline: %s", err) + ctx.err = fmt.Errorf("cannot set read deadline: %w", err) return false } } @@ -58,7 +58,7 @@ func (ctx *streamContext) Read(r io.Reader) bool { } else { if ctx.err != io.EOF { readErrors.Inc() - ctx.err = fmt.Errorf("cannot read OpenTSDB put protocol data: %s", ctx.err) + ctx.err = fmt.Errorf("cannot read OpenTSDB put protocol data: %w", ctx.err) } return false } diff --git a/lib/protoparser/opentsdbhttp/parser.go b/lib/protoparser/opentsdbhttp/parser.go index 9f3a1d17f..c426f3e86 100644 --- a/lib/protoparser/opentsdbhttp/parser.go +++ b/lib/protoparser/opentsdbhttp/parser.go @@ -67,7 +67,7 @@ func (r *Row) unmarshal(o *fastjson.Value, tagsPool []Tag) ([]Tag, error) { if rawTs != nil { ts, err := getFloat64(rawTs) if err != nil { - return tagsPool, fmt.Errorf("invalid `timestamp` in %s: %s", o, err) + return tagsPool, fmt.Errorf("invalid `timestamp` in %s: %w", o, err) } r.Timestamp = int64(ts) } else { @@ -82,7 +82,7 @@ func (r *Row) unmarshal(o *fastjson.Value, tagsPool []Tag) ([]Tag, error) { } v, err := getFloat64(rawV) if err != nil { - return tagsPool, fmt.Errorf("invalid `value` in %s: %s", o, err) + return tagsPool, fmt.Errorf("invalid `value` in %s: %w", o, err) } r.Value = v @@ -93,13 +93,13 @@ func (r *Row) unmarshal(o *fastjson.Value, tagsPool []Tag) ([]Tag, error) { } rawTags, err := vt.Object() if err != nil { - return tagsPool, fmt.Errorf("invalid `tags` in %s: %s", o, err) + return tagsPool, fmt.Errorf("invalid `tags` in %s: %w", o, err) } tagsStart := len(tagsPool) tagsPool, err = unmarshalTags(tagsPool, rawTags) if err != nil { - return tagsPool, fmt.Errorf("cannot parse tags %s: %s", rawTags, err) + return tagsPool, fmt.Errorf("cannot parse tags %s: %w", rawTags, err) } tags := tagsPool[tagsStart:] r.Tags = tags[:len(tags):len(tags)] diff --git a/lib/protoparser/opentsdbhttp/parser_timing_test.go b/lib/protoparser/opentsdbhttp/parser_timing_test.go index 984c6cd0a..aaeb70a8f 100644 --- a/lib/protoparser/opentsdbhttp/parser_timing_test.go +++ b/lib/protoparser/opentsdbhttp/parser_timing_test.go @@ -22,7 +22,7 @@ func BenchmarkRowsUnmarshal(b *testing.B) { for pb.Next() { v, err := p.Parse(s) if err != nil { - panic(fmt.Errorf("cannot parse %q: %s", s, err)) + panic(fmt.Errorf("cannot parse %q: %w", s, err)) } rows.Unmarshal(v) if len(rows.Rows) != 4 { diff --git a/lib/protoparser/opentsdbhttp/streamparser.go b/lib/protoparser/opentsdbhttp/streamparser.go index 1ad6da3c9..c9713f7df 100644 --- a/lib/protoparser/opentsdbhttp/streamparser.go +++ b/lib/protoparser/opentsdbhttp/streamparser.go @@ -33,7 +33,7 @@ func ParseStream(req *http.Request, callback func(rows []Row) error) error { zr, err := common.GetGzipReader(r) if err != nil { readErrors.Inc() - return fmt.Errorf("cannot read gzipped http protocol data: %s", err) + return fmt.Errorf("cannot read gzipped http protocol data: %w", err) } defer common.PutGzipReader(zr) r = zr @@ -47,7 +47,7 @@ func ParseStream(req *http.Request, callback func(rows []Row) error) error { reqLen, err := ctx.reqBuf.ReadFrom(lr) if err != nil { readErrors.Inc() - return fmt.Errorf("cannot read HTTP OpenTSDB request: %s", err) + return fmt.Errorf("cannot read HTTP OpenTSDB request: %w", err) } if reqLen > int64(*maxInsertRequestSize) { readErrors.Inc() @@ -60,7 +60,7 @@ func ParseStream(req *http.Request, callback func(rows []Row) error) error { v, err := p.ParseBytes(ctx.reqBuf.B) if err != nil { unmarshalErrors.Inc() - return fmt.Errorf("cannot parse HTTP OpenTSDB json: %s", err) + return fmt.Errorf("cannot parse HTTP OpenTSDB json: %w", err) } ctx.Rows.Unmarshal(v) rowsRead.Add(len(ctx.Rows.Rows)) diff --git a/lib/protoparser/prometheus/parser.go b/lib/protoparser/prometheus/parser.go index 6a8d72097..e6895e45c 100644 --- a/lib/protoparser/prometheus/parser.go +++ b/lib/protoparser/prometheus/parser.go @@ -110,7 +110,7 @@ func (r *Row) unmarshal(s string, tagsPool []Tag, noEscapes bool) ([]Tag, error) var err error s, tagsPool, err = unmarshalTags(tagsPool, s, noEscapes) if err != nil { - return tagsPool, fmt.Errorf("cannot unmarshal tags: %s", err) + return tagsPool, fmt.Errorf("cannot unmarshal tags: %w", err) } if len(s) > 0 && s[0] == ' ' { // Fast path - skip whitespace. @@ -226,7 +226,7 @@ func unmarshalTags(dst []Tag, s string, noEscapes bool) (string, []Tag, error) { var err error value, err = unescapeValue(s[:n+1]) if err != nil { - return s, dst, fmt.Errorf("cannot unescape value %q for tag %q: %s", s[:n+1], key, err) + return s, dst, fmt.Errorf("cannot unescape value %q for tag %q: %w", s[:n+1], key, err) } s = s[n+1:] } diff --git a/lib/protoparser/promremotewrite/streamparser.go b/lib/protoparser/promremotewrite/streamparser.go index af8b593a9..5f7268180 100644 --- a/lib/protoparser/promremotewrite/streamparser.go +++ b/lib/protoparser/promremotewrite/streamparser.go @@ -44,11 +44,11 @@ func (ctx *pushCtx) Read(r *http.Request) error { ctx.reqBuf, err = readSnappy(ctx.reqBuf[:0], r.Body) if err != nil { readErrors.Inc() - return fmt.Errorf("cannot read prompb.WriteRequest: %s", err) + return fmt.Errorf("cannot read prompb.WriteRequest: %w", err) } if err = ctx.wr.Unmarshal(ctx.reqBuf); err != nil { unmarshalErrors.Inc() - return fmt.Errorf("cannot unmarshal prompb.WriteRequest with size %d bytes: %s", len(ctx.reqBuf), err) + return fmt.Errorf("cannot unmarshal prompb.WriteRequest with size %d bytes: %w", len(ctx.reqBuf), err) } rows := 0 @@ -98,7 +98,7 @@ func readSnappy(dst []byte, r io.Reader) ([]byte, error) { reqLen, err := bb.ReadFrom(lr) if err != nil { bodyBufferPool.Put(bb) - return dst, fmt.Errorf("cannot read compressed request: %s", err) + return dst, fmt.Errorf("cannot read compressed request: %w", err) } if reqLen > int64(*maxInsertRequestSize) { return dst, fmt.Errorf("too big packed request; mustn't exceed `-maxInsertRequestSize=%d` bytes", *maxInsertRequestSize) @@ -108,7 +108,7 @@ func readSnappy(dst []byte, r io.Reader) ([]byte, error) { buf, err = snappy.Decode(buf, bb.B) bodyBufferPool.Put(bb) if err != nil { - err = fmt.Errorf("cannot decompress request with length %d: %s", reqLen, err) + err = fmt.Errorf("cannot decompress request with length %d: %w", reqLen, err) return dst, err } if len(buf) > *maxInsertRequestSize { diff --git a/lib/protoparser/vmimport/parser.go b/lib/protoparser/vmimport/parser.go index 785b0845d..13f96191d 100644 --- a/lib/protoparser/vmimport/parser.go +++ b/lib/protoparser/vmimport/parser.go @@ -53,7 +53,7 @@ func (r *Row) unmarshal(s string, tu *tagsUnmarshaler) error { r.reset() v, err := tu.p.Parse(s) if err != nil { - return fmt.Errorf("cannot parse json line: %s", err) + return fmt.Errorf("cannot parse json line: %w", err) } // Unmarshal tags @@ -63,7 +63,7 @@ func (r *Row) unmarshal(s string, tu *tagsUnmarshaler) error { } tagsStart := len(tu.tagsPool) if err := tu.unmarshalTags(metric); err != nil { - return fmt.Errorf("cannot unmarshal `metric`: %s", err) + return fmt.Errorf("cannot unmarshal `metric`: %w", err) } tags := tu.tagsPool[tagsStart:] r.Tags = tags[:len(tags):len(tags)] @@ -79,7 +79,7 @@ func (r *Row) unmarshal(s string, tu *tagsUnmarshaler) error { for i, v := range values { f, err := v.Float64() if err != nil { - return fmt.Errorf("cannot unmarshal value at position %d: %s", i, err) + return fmt.Errorf("cannot unmarshal value at position %d: %w", i, err) } r.Values = append(r.Values, f) } @@ -92,7 +92,7 @@ func (r *Row) unmarshal(s string, tu *tagsUnmarshaler) error { for i, v := range timestamps { ts, err := v.Int64() if err != nil { - return fmt.Errorf("cannot unmarshal timestamp at position %d: %s", i, err) + return fmt.Errorf("cannot unmarshal timestamp at position %d: %w", i, err) } r.Timestamps = append(r.Timestamps, ts) } @@ -158,7 +158,7 @@ func (tu *tagsUnmarshaler) unmarshalTags(o *fastjson.Object) error { tag.Key = tu.addBytes(key) sb, err := v.StringBytes() if err != nil && tu.err != nil { - tu.err = fmt.Errorf("cannot parse value for tag %q: %s", tag.Key, err) + tu.err = fmt.Errorf("cannot parse value for tag %q: %w", tag.Key, err) } tag.Value = tu.addBytes(sb) }) diff --git a/lib/protoparser/vmimport/streamparser.go b/lib/protoparser/vmimport/streamparser.go index 77d0ff84a..97400a312 100644 --- a/lib/protoparser/vmimport/streamparser.go +++ b/lib/protoparser/vmimport/streamparser.go @@ -26,7 +26,7 @@ func ParseStream(req *http.Request, callback func(rows []Row) error) error { if req.Header.Get("Content-Encoding") == "gzip" { zr, err := common.GetGzipReader(r) if err != nil { - return fmt.Errorf("cannot read gzipped vmimport data: %s", err) + return fmt.Errorf("cannot read gzipped vmimport data: %w", err) } defer common.PutGzipReader(zr) r = zr @@ -50,7 +50,7 @@ func (ctx *streamContext) Read(r io.Reader) bool { if ctx.err != nil { if ctx.err != io.EOF { readErrors.Inc() - ctx.err = fmt.Errorf("cannot read vmimport data: %s", ctx.err) + ctx.err = fmt.Errorf("cannot read vmimport data: %w", ctx.err) } return false } diff --git a/lib/storage/block_header.go b/lib/storage/block_header.go index d41c41167..cc266109b 100644 --- a/lib/storage/block_header.go +++ b/lib/storage/block_header.go @@ -121,7 +121,7 @@ func (bh *blockHeader) Unmarshal(src []byte) ([]byte, error) { tail, err := bh.TSID.Unmarshal(src) if err != nil { - return src, fmt.Errorf("cannot unmarshal TSID: %s", err) + return src, fmt.Errorf("cannot unmarshal TSID: %w", err) } src = tail @@ -154,10 +154,10 @@ func (bh *blockHeader) Unmarshal(src []byte) ([]byte, error) { return src, fmt.Errorf("RowsCount in block header cannot be zero") } if err = encoding.CheckMarshalType(bh.TimestampsMarshalType); err != nil { - return src, fmt.Errorf("unsupported TimestampsMarshalType: %s", err) + return src, fmt.Errorf("unsupported TimestampsMarshalType: %w", err) } if err = encoding.CheckMarshalType(bh.ValuesMarshalType); err != nil { - return src, fmt.Errorf("unsupported ValuesMarshalType: %s", err) + return src, fmt.Errorf("unsupported ValuesMarshalType: %w", err) } if err = encoding.CheckPrecisionBits(bh.PrecisionBits); err != nil { return src, err @@ -189,7 +189,7 @@ func unmarshalBlockHeaders(dst []blockHeader, src []byte, blockHeadersCount int) for len(src) > 0 { tmp, err := bh.Unmarshal(src) if err != nil { - return dst, fmt.Errorf("cannot unmarshal block header: %s", err) + return dst, fmt.Errorf("cannot unmarshal block header: %w", err) } src = tmp dst = append(dst, bh) diff --git a/lib/storage/block_stream_merger.go b/lib/storage/block_stream_merger.go index 89819ad7e..e41301e70 100644 --- a/lib/storage/block_stream_merger.go +++ b/lib/storage/block_stream_merger.go @@ -39,7 +39,7 @@ func (bsm *blockStreamMerger) Init(bsrs []*blockStreamReader) { continue } if err := bsr.Error(); err != nil { - bsm.err = fmt.Errorf("cannot obtain the next block to merge: %s", err) + bsm.err = fmt.Errorf("cannot obtain the next block to merge: %w", err) return } } @@ -74,7 +74,7 @@ func (bsm *blockStreamMerger) NextBlock() bool { case io.EOF: return false default: - bsm.err = fmt.Errorf("cannot obtain the next block to merge: %s", bsm.err) + bsm.err = fmt.Errorf("cannot obtain the next block to merge: %w", bsm.err) return false } } diff --git a/lib/storage/block_stream_reader.go b/lib/storage/block_stream_reader.go index af46702e8..9b5e20619 100644 --- a/lib/storage/block_stream_reader.go +++ b/lib/storage/block_stream_reader.go @@ -131,20 +131,20 @@ func (bsr *blockStreamReader) InitFromFilePart(path string) error { path = filepath.Clean(path) if err := bsr.ph.ParseFromPath(path); err != nil { - return fmt.Errorf("cannot parse path to part: %s", err) + return fmt.Errorf("cannot parse path to part: %w", err) } timestampsPath := path + "/timestamps.bin" timestampsFile, err := filestream.Open(timestampsPath, true) if err != nil { - return fmt.Errorf("cannot open timestamps file in stream mode: %s", err) + return fmt.Errorf("cannot open timestamps file in stream mode: %w", err) } valuesPath := path + "/values.bin" valuesFile, err := filestream.Open(valuesPath, true) if err != nil { timestampsFile.MustClose() - return fmt.Errorf("cannot open values file in stream mode: %s", err) + return fmt.Errorf("cannot open values file in stream mode: %w", err) } indexPath := path + "/index.bin" @@ -152,7 +152,7 @@ func (bsr *blockStreamReader) InitFromFilePart(path string) error { if err != nil { timestampsFile.MustClose() valuesFile.MustClose() - return fmt.Errorf("cannot open index file in stream mode: %s", err) + return fmt.Errorf("cannot open index file in stream mode: %w", err) } metaindexPath := path + "/metaindex.bin" @@ -161,7 +161,7 @@ func (bsr *blockStreamReader) InitFromFilePart(path string) error { timestampsFile.MustClose() valuesFile.MustClose() indexFile.MustClose() - return fmt.Errorf("cannot open metaindex file in stream mode: %s", err) + return fmt.Errorf("cannot open metaindex file in stream mode: %w", err) } mrs, err := unmarshalMetaindexRows(bsr.mrs[:0], metaindexFile) metaindexFile.MustClose() @@ -169,7 +169,7 @@ func (bsr *blockStreamReader) InitFromFilePart(path string) error { timestampsFile.MustClose() valuesFile.MustClose() indexFile.MustClose() - return fmt.Errorf("cannot unmarshal metaindex rows from inmemoryPart: %s", err) + return fmt.Errorf("cannot unmarshal metaindex rows from inmemoryPart: %w", err) } bsr.path = path @@ -199,7 +199,7 @@ func (bsr *blockStreamReader) Error() error { if bsr.err == nil || bsr.err == io.EOF { return nil } - return fmt.Errorf("error when reading part %q: %s", bsr, bsr.err) + return fmt.Errorf("error when reading part %q: %w", bsr, bsr.err) } // NextBlock advances bsr to the next block. @@ -223,7 +223,7 @@ func (bsr *blockStreamReader) NextBlock() bool { return false } - bsr.err = fmt.Errorf("cannot read next block: %s", err) + bsr.err = fmt.Errorf("cannot read next block: %w", err) return false } @@ -238,7 +238,7 @@ func (bsr *blockStreamReader) readBlock() error { if err == io.EOF { return io.EOF } - return fmt.Errorf("cannot read index block from index data: %s", err) + return fmt.Errorf("cannot read index block from index data: %w", err) } } @@ -251,7 +251,7 @@ func (bsr *blockStreamReader) readBlock() error { bsr.indexCursor = bsr.indexCursor[marshaledBlockHeaderSize:] tail, err := bsr.Block.bh.Unmarshal(bsr.Block.headerData) if err != nil { - return fmt.Errorf("cannot parse block header read from index data at offset %d: %s", bsr.prevIndexBlockOffset(), err) + return fmt.Errorf("cannot parse block header read from index data at offset %d: %w", bsr.prevIndexBlockOffset(), err) } if len(tail) > 0 { return fmt.Errorf("non-empty tail left after parsing block header at offset %d: %x", bsr.prevIndexBlockOffset(), tail) @@ -287,13 +287,13 @@ func (bsr *blockStreamReader) readBlock() error { // Read timestamps data. bsr.Block.timestampsData = bytesutil.Resize(bsr.Block.timestampsData, int(bsr.Block.bh.TimestampsBlockSize)) if err := fs.ReadFullData(bsr.timestampsReader, bsr.Block.timestampsData); err != nil { - return fmt.Errorf("cannot read timestamps block at offset %d: %s", bsr.timestampsBlockOffset, err) + return fmt.Errorf("cannot read timestamps block at offset %d: %w", bsr.timestampsBlockOffset, err) } // Read values data. bsr.Block.valuesData = bytesutil.Resize(bsr.Block.valuesData, int(bsr.Block.bh.ValuesBlockSize)) if err := fs.ReadFullData(bsr.valuesReader, bsr.Block.valuesData); err != nil { - return fmt.Errorf("cannot read values block at offset %d: %s", bsr.valuesBlockOffset, err) + return fmt.Errorf("cannot read values block at offset %d: %w", bsr.valuesBlockOffset, err) } // Update offsets. @@ -326,11 +326,11 @@ func (bsr *blockStreamReader) readIndexBlock() error { // Read index block. bsr.compressedIndexData = bytesutil.Resize(bsr.compressedIndexData, int(bsr.mr.IndexBlockSize)) if err := fs.ReadFullData(bsr.indexReader, bsr.compressedIndexData); err != nil { - return fmt.Errorf("cannot read index block from index data at offset %d: %s", bsr.indexBlockOffset, err) + return fmt.Errorf("cannot read index block from index data at offset %d: %w", bsr.indexBlockOffset, err) } tmpData, err := encoding.DecompressZSTD(bsr.indexData[:0], bsr.compressedIndexData) if err != nil { - return fmt.Errorf("cannot decompress index block read at offset %d: %s", bsr.indexBlockOffset, err) + return fmt.Errorf("cannot decompress index block read at offset %d: %w", bsr.indexBlockOffset, err) } bsr.indexData = tmpData bsr.indexCursor = bsr.indexData diff --git a/lib/storage/block_stream_reader_test.go b/lib/storage/block_stream_reader_test.go index ae31f0f10..57cc04510 100644 --- a/lib/storage/block_stream_reader_test.go +++ b/lib/storage/block_stream_reader_test.go @@ -105,14 +105,14 @@ func testBlockStreamReaderReadRows(mp *inmemoryPart, rows []rawRow) error { rowsCount := 0 for bsr.NextBlock() { if err := bsr.Block.UnmarshalData(); err != nil { - return fmt.Errorf("cannot unmarshal block data: %s", err) + return fmt.Errorf("cannot unmarshal block data: %w", err) } for bsr.Block.nextRow() { rowsCount++ } } if err := bsr.Error(); err != nil { - return fmt.Errorf("unexpected error in bsr.NextBlock: %s", err) + return fmt.Errorf("unexpected error in bsr.NextBlock: %w", err) } if rowsCount != len(rows) { return fmt.Errorf("unexpected number of rows read; got %d; want %d", rowsCount, len(rows)) diff --git a/lib/storage/block_stream_reader_timing_test.go b/lib/storage/block_stream_reader_timing_test.go index abe885f0e..7628fac7b 100644 --- a/lib/storage/block_stream_reader_timing_test.go +++ b/lib/storage/block_stream_reader_timing_test.go @@ -34,13 +34,13 @@ func benchmarkBlockStreamReader(b *testing.B, mp *inmemoryPart, readRows bool) { continue } if err := bsr.Block.UnmarshalData(); err != nil { - panic(fmt.Errorf("unexpected error when unmarshaling rows on block %d: %s", blockNum, err)) + panic(fmt.Errorf("unexpected error when unmarshaling rows on block %d: %w", blockNum, err)) } for bsr.Block.nextRow() { } } if err := bsr.Error(); err != nil { - panic(fmt.Errorf("unexpected error when reading block %d: %s", blockNum, err)) + panic(fmt.Errorf("unexpected error when reading block %d: %w", blockNum, err)) } blockNum++ } diff --git a/lib/storage/block_stream_writer.go b/lib/storage/block_stream_writer.go index 8982c33b1..ce35c0b0b 100644 --- a/lib/storage/block_stream_writer.go +++ b/lib/storage/block_stream_writer.go @@ -92,7 +92,7 @@ func (bsw *blockStreamWriter) InitFromFilePart(path string, nocache bool, compre // Create the directory if err := fs.MkdirAllFailIfExist(path); err != nil { - return fmt.Errorf("cannot create directory %q: %s", path, err) + return fmt.Errorf("cannot create directory %q: %w", path, err) } // Create part files in the directory. @@ -100,7 +100,7 @@ func (bsw *blockStreamWriter) InitFromFilePart(path string, nocache bool, compre timestampsFile, err := filestream.Create(timestampsPath, nocache) if err != nil { fs.MustRemoveAll(path) - return fmt.Errorf("cannot create timestamps file: %s", err) + return fmt.Errorf("cannot create timestamps file: %w", err) } valuesPath := path + "/values.bin" @@ -108,7 +108,7 @@ func (bsw *blockStreamWriter) InitFromFilePart(path string, nocache bool, compre if err != nil { timestampsFile.MustClose() fs.MustRemoveAll(path) - return fmt.Errorf("cannot create values file: %s", err) + return fmt.Errorf("cannot create values file: %w", err) } indexPath := path + "/index.bin" @@ -117,7 +117,7 @@ func (bsw *blockStreamWriter) InitFromFilePart(path string, nocache bool, compre timestampsFile.MustClose() valuesFile.MustClose() fs.MustRemoveAll(path) - return fmt.Errorf("cannot create index file: %s", err) + return fmt.Errorf("cannot create index file: %w", err) } // Always cache metaindex file in OS page cache, since it is immediately @@ -129,7 +129,7 @@ func (bsw *blockStreamWriter) InitFromFilePart(path string, nocache bool, compre valuesFile.MustClose() indexFile.MustClose() fs.MustRemoveAll(path) - return fmt.Errorf("cannot create metaindex file: %s", err) + return fmt.Errorf("cannot create metaindex file: %w", err) } bsw.reset() diff --git a/lib/storage/block_stream_writer_timing_test.go b/lib/storage/block_stream_writer_timing_test.go index f786e4b82..8e51d06a2 100644 --- a/lib/storage/block_stream_writer_timing_test.go +++ b/lib/storage/block_stream_writer_timing_test.go @@ -42,7 +42,7 @@ func benchmarkBlockStreamWriter(b *testing.B, ebs []Block, rowsCount int, writeR for i := range ebsCopy { eb := &ebsCopy[i] if err := eb.UnmarshalData(); err != nil { - panic(fmt.Errorf("cannot unmarshal block %d on loop %d: %s", i, loopCount, err)) + panic(fmt.Errorf("cannot unmarshal block %d on loop %d: %w", i, loopCount, err)) } } } @@ -73,7 +73,7 @@ func newBenchBlocks(rows []rawRow) []Block { ebs = append(ebs, eb) } if err := bsr.Error(); err != nil { - panic(fmt.Errorf("unexpected error when reading inmemoryPart: %s", err)) + panic(fmt.Errorf("unexpected error when reading inmemoryPart: %w", err)) } return ebs } diff --git a/lib/storage/index_db.go b/lib/storage/index_db.go index 522072010..c138144d4 100644 --- a/lib/storage/index_db.go +++ b/lib/storage/index_db.go @@ -171,7 +171,7 @@ func openIndexDB(path string, metricIDCache, metricNameCache *workingsetcache.Ca tb, err := mergeset.OpenTable(path, invalidateTagCache, mergeTagToMetricIDsRows) if err != nil { - return nil, fmt.Errorf("cannot open indexDB %q: %s", path, err) + return nil, fmt.Errorf("cannot open indexDB %q: %w", path, err) } name := filepath.Base(path) @@ -198,7 +198,7 @@ func openIndexDB(path string, metricIDCache, metricNameCache *workingsetcache.Ca dmis, err := is.loadDeletedMetricIDs() db.putIndexSearch(is) if err != nil { - return nil, fmt.Errorf("cannot load deleted metricIDs: %s", err) + return nil, fmt.Errorf("cannot load deleted metricIDs: %w", err) } db.setDeletedMetricIDs(dmis) @@ -206,7 +206,7 @@ func openIndexDB(path string, metricIDCache, metricNameCache *workingsetcache.Ca date, err := is.getStartDateForPerDayInvertedIndex() db.putIndexSearch(is) if err != nil { - return nil, fmt.Errorf("cannot obtain start date for per-day inverted index: %s", err) + return nil, fmt.Errorf("cannot obtain start date for per-day inverted index: %w", err) } db.startDateForPerDayInvertedIndex = date @@ -481,7 +481,7 @@ func unmarshalTSIDs(dst []TSID, src []byte) ([]TSID, error) { for i := 0; i < int(n); i++ { tail, err := dst[dstLen+i].Unmarshal(src) if err != nil { - return dst, fmt.Errorf("cannot unmarshal tsid #%d out of %d: %s", i, n, err) + return dst, fmt.Errorf("cannot unmarshal tsid #%d out of %d: %w", i, n, err) } src = tail } @@ -510,7 +510,7 @@ func (db *indexDB) getTSIDByNameNoCreate(dst *TSID, metricName []byte) error { return nil } if err != io.EOF { - return fmt.Errorf("cannot search TSID by MetricName %q: %s", metricName, err) + return fmt.Errorf("cannot search TSID by MetricName %q: %w", metricName, err) } // Do not search for the TSID in the external storage, @@ -544,7 +544,7 @@ func (is *indexSearch) GetOrCreateTSIDByName(dst *TSID, metricName []byte) error return nil } if err != io.EOF { - return fmt.Errorf("cannot search TSID by MetricName %q: %s", metricName, err) + return fmt.Errorf("cannot search TSID by MetricName %q: %w", metricName, err) } is.tsidByNameMisses++ } else { @@ -559,7 +559,7 @@ func (is *indexSearch) GetOrCreateTSIDByName(dst *TSID, metricName []byte) error // It is OK if duplicate TSID for mn is created by concurrent goroutines. // Metric results will be merged by mn after TableSearch. if err := is.db.createTSIDByName(dst, metricName); err != nil { - return fmt.Errorf("cannot create TSID by MetricName %q: %s", metricName, err) + return fmt.Errorf("cannot create TSID by MetricName %q: %w", metricName, err) } return nil } @@ -591,15 +591,15 @@ func (db *indexDB) createTSIDByName(dst *TSID, metricName []byte) error { mn := GetMetricName() defer PutMetricName(mn) if err := mn.Unmarshal(metricName); err != nil { - return fmt.Errorf("cannot unmarshal metricName %q: %s", metricName, err) + return fmt.Errorf("cannot unmarshal metricName %q: %w", metricName, err) } if err := db.generateTSID(dst, metricName, mn); err != nil { - return fmt.Errorf("cannot generate TSID: %s", err) + return fmt.Errorf("cannot generate TSID: %w", err) } db.putMetricNameToCache(dst.MetricID, metricName) if err := db.createIndexes(dst, mn); err != nil { - return fmt.Errorf("cannot create indexes: %s", err) + return fmt.Errorf("cannot create indexes: %w", err) } // There is no need in invalidating tag cache, since it is invalidated @@ -621,7 +621,7 @@ func (db *indexDB) generateTSID(dst *TSID, metricName []byte, mn *MetricName) er return nil } if err != io.EOF { - return fmt.Errorf("external search failed: %s", err) + return fmt.Errorf("external search failed: %w", err) } } @@ -786,7 +786,7 @@ func (is *indexSearch) searchTagKeys(tks map[string]struct{}, maxTagKeys int) er ts.Seek(kb.B) } if err := ts.Error(); err != nil { - return fmt.Errorf("error during search for prefix %q: %s", prefix, err) + return fmt.Errorf("error during search for prefix %q: %w", prefix, err) } return nil } @@ -861,7 +861,7 @@ func (is *indexSearch) searchTagValues(tvs map[string]struct{}, tagKey []byte, m ts.Seek(kb.B) } if err := ts.Error(); err != nil { - return fmt.Errorf("error when searching for tag name prefix %q: %s", prefix, err) + return fmt.Errorf("error when searching for tag name prefix %q: %w", prefix, err) } return nil } @@ -885,7 +885,7 @@ func (db *indexDB) GetSeriesCount() (uint64, error) { extDB.putIndexSearch(is) }) if ok && err != nil { - return 0, fmt.Errorf("error when searching in extDB: %s", err) + return 0, fmt.Errorf("error when searching in extDB: %w", err) } return n + nExt, nil } @@ -919,7 +919,7 @@ func (is *indexSearch) getSeriesCount() (uint64, error) { metricIDsLen += uint64(mp.MetricIDsLen()) } if err := ts.Error(); err != nil { - return 0, fmt.Errorf("error when counting unique timeseries: %s", err) + return 0, fmt.Errorf("error when counting unique timeseries: %w", err) } return metricIDsLen, nil } @@ -944,7 +944,7 @@ func (db *indexDB) GetTSDBStatusForDate(date uint64, topN int) (*TSDBStatus, err extDB.putIndexSearch(is) }) if ok && err != nil { - return nil, fmt.Errorf("error when obtaining TSDB status from extDB: %s", err) + return nil, fmt.Errorf("error when obtaining TSDB status from extDB: %w", err) } return status, nil } @@ -973,7 +973,7 @@ func (is *indexSearch) getTSDBStatusForDate(date uint64, topN int) (*TSDBStatus, var err error tail, tmp, err = unmarshalTagValue(tmp[:0], tail) if err != nil { - return nil, fmt.Errorf("cannot unmarshal tag key from line %q: %s", item, err) + return nil, fmt.Errorf("cannot unmarshal tag key from line %q: %w", item, err) } if len(tmp) == 0 { tmp = append(tmp, "__name__"...) @@ -986,7 +986,7 @@ func (is *indexSearch) getTSDBStatusForDate(date uint64, topN int) (*TSDBStatus, tmp = append(tmp, '=') tail, tmp, err = unmarshalTagValue(tmp, tail) if err != nil { - return nil, fmt.Errorf("cannot unmarshal tag value from line %q: %s", item, err) + return nil, fmt.Errorf("cannot unmarshal tag value from line %q: %w", item, err) } if !bytes.Equal(tmp, labelNameValue) { thSeriesCountByLabelValuePair.pushIfNonEmpty(labelNameValue, seriesCountByLabelValuePair) @@ -1006,7 +1006,7 @@ func (is *indexSearch) getTSDBStatusForDate(date uint64, topN int) (*TSDBStatus, seriesCountByLabelValuePair += uint64(mp.MetricIDsLen()) } if err := ts.Error(); err != nil { - return nil, fmt.Errorf("error when counting time series by metric names: %s", err) + return nil, fmt.Errorf("error when counting time series by metric names: %w", err) } thLabelValueCountByLabelName.pushIfNonEmpty(labelName, labelValueCountByLabelName) thSeriesCountByLabelValuePair.pushIfNonEmpty(labelNameValue, seriesCountByLabelValuePair) @@ -1139,7 +1139,7 @@ func (db *indexDB) searchMetricName(dst []byte, metricID uint64) ([]byte, error) // Mark the metricID as deleted, so it will be created again when new data point // for the given time series will arrive. if err := db.deleteMetricIDs([]uint64{metricID}); err != nil { - return dst, fmt.Errorf("cannot delete metricID for missing metricID->metricName entry; metricID=%d; error: %s", metricID, err) + return dst, fmt.Errorf("cannot delete metricID for missing metricID->metricName entry; metricID=%d; error: %w", metricID, err) } return dst, io.EOF } @@ -1177,7 +1177,7 @@ func (db *indexDB) DeleteTSIDs(tfss []*TagFilters) (int, error) { deletedCount += n }) { if err != nil { - return deletedCount, fmt.Errorf("cannot delete tsids in extDB: %s", err) + return deletedCount, fmt.Errorf("cannot delete tsids in extDB: %w", err) } } return deletedCount, nil @@ -1366,7 +1366,7 @@ func (is *indexSearch) getTSIDByMetricName(dst *TSID, metricName []byte) error { v := ts.Item[len(kb.B):] tail, err := dst.Unmarshal(v) if err != nil { - return fmt.Errorf("cannot unmarshal TSID: %s", err) + return fmt.Errorf("cannot unmarshal TSID: %w", err) } if len(tail) > 0 { return fmt.Errorf("unexpected non-empty tail left after unmarshaling TSID: %X", tail) @@ -1382,7 +1382,7 @@ func (is *indexSearch) getTSIDByMetricName(dst *TSID, metricName []byte) error { return nil } if err := ts.Error(); err != nil { - return fmt.Errorf("error when searching TSID by metricName; searchPrefix %q: %s", kb.B, err) + return fmt.Errorf("error when searching TSID by metricName; searchPrefix %q: %w", kb.B, err) } // Nothing found return io.EOF @@ -1402,7 +1402,7 @@ func (is *indexSearch) searchMetricName(dst []byte, metricID uint64) ([]byte, er if err == io.EOF { return dst, err } - return dst, fmt.Errorf("error when searching metricName by metricID; searchPrefix %q: %s", kb.B, err) + return dst, fmt.Errorf("error when searching metricName by metricID; searchPrefix %q: %w", kb.B, err) } v := ts.Item[len(kb.B):] dst = append(dst, v...) @@ -1449,7 +1449,7 @@ func (is *indexSearch) containsTimeRange(tr TimeRange) (bool, error) { ts.Seek(kb.B) if !ts.NextItem() { if err := ts.Error(); err != nil { - return false, fmt.Errorf("error when searching for minDate=%d, prefix %q: %s", minDate, kb.B, err) + return false, fmt.Errorf("error when searching for minDate=%d, prefix %q: %w", minDate, kb.B, err) } return false, nil } @@ -1503,7 +1503,7 @@ func (is *indexSearch) searchTSIDs(tfss []*TagFilters, tr TimeRange, maxMetrics atomic.AddUint64(&is.db.missingTSIDsForMetricID, 1) continue } - return nil, fmt.Errorf("cannot find tsid %d out of %d for metricID %d: %s", i, len(metricIDs), metricID, err) + return nil, fmt.Errorf("cannot find tsid %d out of %d for metricID %d: %w", i, len(metricIDs), metricID, err) } is.db.putToMetricIDCache(metricID, tsid) i++ @@ -1525,12 +1525,12 @@ func (is *indexSearch) getTSIDByMetricID(dst *TSID, metricID uint64) error { if err == io.EOF { return err } - return fmt.Errorf("error when searching TSID by metricID; searchPrefix %q: %s", kb.B, err) + return fmt.Errorf("error when searching TSID by metricID; searchPrefix %q: %w", kb.B, err) } v := ts.Item[len(kb.B):] tail, err := dst.Unmarshal(v) if err != nil { - return fmt.Errorf("cannot unmarshal TSID=%X: %s", v, err) + return fmt.Errorf("cannot unmarshal TSID=%X: %w", v, err) } if len(tail) > 0 { return fmt.Errorf("unexpected non-zero tail left after unmarshaling TSID: %X", tail) @@ -1557,16 +1557,16 @@ func (is *indexSearch) updateMetricIDsByMetricNameMatch(metricIDs, srcMetricIDs // Skip this metricID for now. continue } - return fmt.Errorf("cannot find metricName by metricID %d: %s", metricID, err) + return fmt.Errorf("cannot find metricName by metricID %d: %w", metricID, err) } if err := mn.Unmarshal(metricName.B); err != nil { - return fmt.Errorf("cannot unmarshal metricName %q: %s", metricName.B, err) + return fmt.Errorf("cannot unmarshal metricName %q: %w", metricName.B, err) } // Match the mn against tfs. ok, err := matchTagFilters(mn, tfs, &is.kb) if err != nil { - return fmt.Errorf("cannot match MetricName %s against tagFilters: %s", mn, err) + return fmt.Errorf("cannot match MetricName %s against tagFilters: %w", mn, err) } if !ok { continue @@ -1732,7 +1732,7 @@ func (is *indexSearch) getTagFilterWithMinMetricIDsCount(tfs *TagFilters, maxMet uselessTagFilters++ continue } - return nil, nil, fmt.Errorf("cannot find MetricIDs for tagFilter %s: %s", tf, err) + return nil, nil, fmt.Errorf("cannot find MetricIDs for tagFilter %s: %w", tf, err) } if metricIDs.Len() >= maxMetrics { // The tf matches at least maxMetrics. Skip it @@ -1793,7 +1793,7 @@ func matchTagFilters(mn *MetricName, tfs []*tagFilter, kb *bytesutil.ByteBuffer) kb.B = b[:len(kb.B)] ok, err := matchTagFilter(b, tf) if err != nil { - return false, fmt.Errorf("cannot match MetricGroup %q with tagFilter %s: %s", mn.MetricGroup, tf, err) + return false, fmt.Errorf("cannot match MetricGroup %q with tagFilter %s: %w", mn.MetricGroup, tf, err) } if !ok { // Move failed tf to start. @@ -1821,7 +1821,7 @@ func matchTagFilters(mn *MetricName, tfs []*tagFilter, kb *bytesutil.ByteBuffer) kb.B = b[:len(kb.B)] ok, err := matchTagFilter(b, tf) if err != nil { - return false, fmt.Errorf("cannot match tag %q with tagFilter %s: %s", tag, tf, err) + return false, fmt.Errorf("cannot match tag %q with tagFilter %s: %w", tag, tf, err) } if !ok { // Move failed tf to start. @@ -1995,7 +1995,7 @@ func (is *indexSearch) getMetricIDsForTagFilter(tf *tagFilter, maxMetrics int) ( if err == errFallbackToMetricNameMatch { return nil, err } - return nil, fmt.Errorf("error when searching for metricIDs for tagFilter in fast path: %s; tagFilter=%s", err, tf) + return nil, fmt.Errorf("error when searching for metricIDs for tagFilter in fast path: %w; tagFilter=%s", err, tf) } return metricIDs, nil } @@ -2010,7 +2010,7 @@ func (is *indexSearch) getMetricIDsForTagFilter(tf *tagFilter, maxMetrics int) ( if err == errFallbackToMetricNameMatch { return nil, err } - return nil, fmt.Errorf("error when searching for metricIDs for tagFilter in slow path: %s; tagFilter=%s", err, tf) + return nil, fmt.Errorf("error when searching for metricIDs for tagFilter in slow path: %w; tagFilter=%s", err, tf) } return metricIDs, nil } @@ -2065,7 +2065,7 @@ func (is *indexSearch) getMetricIDsForTagFilterSlow(tf *tagFilter, maxLoops int, // Slow path: need tf.matchSuffix call. ok, err := tf.matchSuffix(suffix) if err != nil { - return fmt.Errorf("error when matching %s against suffix %q: %s", tf, suffix, err) + return fmt.Errorf("error when matching %s against suffix %q: %w", tf, suffix, err) } if !ok { prevMatch = false @@ -2100,7 +2100,7 @@ func (is *indexSearch) getMetricIDsForTagFilterSlow(tf *tagFilter, maxLoops int, } } if err := ts.Error(); err != nil { - return fmt.Errorf("error when searching for tag filter prefix %q: %s", prefix, err) + return fmt.Errorf("error when searching for tag filter prefix %q: %w", prefix, err) } return nil } @@ -2165,7 +2165,7 @@ func (is *indexSearch) updateMetricIDsForOrSuffixNoFilter(prefix []byte, maxMetr } } if err := ts.Error(); err != nil { - return fmt.Errorf("error when searching for tag filter prefix %q: %s", prefix, err) + return fmt.Errorf("error when searching for tag filter prefix %q: %w", prefix, err) } return nil } @@ -2232,7 +2232,7 @@ func (is *indexSearch) updateMetricIDsForOrSuffixWithFilter(prefix []byte, metri } } if err := ts.Error(); err != nil { - return fmt.Errorf("error when searching for tag filter prefix %q: %s", prefix, err) + return fmt.Errorf("error when searching for tag filter prefix %q: %w", prefix, err) } return nil } @@ -2365,7 +2365,7 @@ func (is *indexSearch) tryUpdatingMetricIDsForDateRange(metricIDs *uint64set.Set return } dateStr := time.Unix(int64(date*24*3600), 0) - errGlobal = fmt.Errorf("cannot search for metricIDs for %s: %s", dateStr, err) + errGlobal = fmt.Errorf("cannot search for metricIDs for %s: %w", dateStr, err) return } if metricIDs.Len() < maxMetrics { @@ -2453,7 +2453,7 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter // according to startDateForPerDayInvertedIndex. return nil, nil } - return nil, fmt.Errorf("cannot obtain all the metricIDs: %s", err) + return nil, fmt.Errorf("cannot obtain all the metricIDs: %w", err) } if m.Len() >= maxDateMetrics { // Too many time series found for the given (date). Fall back to global search. @@ -2569,14 +2569,14 @@ func (is *indexSearch) storeDateMetricID(date, metricID uint64) error { logger.Errorf("missing metricName by metricID %d; this could be the case after unclean shutdown; "+ "deleting the metricID, so it could be re-created next time", metricID) if err := is.db.deleteMetricIDs([]uint64{metricID}); err != nil { - return fmt.Errorf("cannot delete metricID %d after unclean shutdown: %s", metricID, err) + return fmt.Errorf("cannot delete metricID %d after unclean shutdown: %w", metricID, err) } return nil } - return fmt.Errorf("cannot find metricName by metricID %d: %s", metricID, err) + return fmt.Errorf("cannot find metricName by metricID %d: %w", metricID, err) } if err = mn.Unmarshal(kb.B); err != nil { - return fmt.Errorf("cannot unmarshal metricName %q obtained by metricID %d: %s", metricID, kb.B, err) + return fmt.Errorf("cannot unmarshal metricName %q obtained by metricID %d: %w", metricID, kb.B, err) } kb.B = marshalCommonPrefix(kb.B[:0], nsPrefixDateTagToMetricIDs) kb.B = encoding.MarshalUint64(kb.B, date) @@ -2595,7 +2595,7 @@ func (is *indexSearch) storeDateMetricID(date, metricID uint64) error { items.Next() } if err = is.db.tb.AddItems(items.Items); err != nil { - return fmt.Errorf("cannot add per-day entires for metricID %d: %s", metricID, err) + return fmt.Errorf("cannot add per-day entires for metricID %d: %w", metricID, err) } return nil } @@ -2642,7 +2642,7 @@ func (is *indexSearch) hasDateMetricID(date, metricID uint64) (bool, error) { if err == io.EOF { return false, nil } - return false, fmt.Errorf("error when searching for (date=%d, metricID=%d) entry: %s", date, metricID, err) + return false, fmt.Errorf("error when searching for (date=%d, metricID=%d) entry: %w", date, metricID, err) } if string(ts.Item) != string(kb.B) { return false, fmt.Errorf("unexpected entry for (date=%d, metricID=%d); got %q; want %q", date, metricID, ts.Item, kb.B) @@ -2741,7 +2741,7 @@ func (is *indexSearch) updateMetricIDsForPrefix(prefix []byte, metricIDs *uint64 } } if err := ts.Error(); err != nil { - return fmt.Errorf("error when searching for all metricIDs by prefix %q: %s", prefix, err) + return fmt.Errorf("error when searching for all metricIDs by prefix %q: %w", prefix, err) } return nil } @@ -2795,7 +2795,7 @@ func (is *indexSearch) intersectMetricIDsWithTagFilterNocache(tf *tagFilter, fil if err == errFallbackToMetricNameMatch { return nil, err } - return nil, fmt.Errorf("error when intersecting metricIDs for tagFilter in fast path: %s; tagFilter=%s", err, tf) + return nil, fmt.Errorf("error when intersecting metricIDs for tagFilter in fast path: %w; tagFilter=%s", err, tf) } return metricIDs, nil } @@ -2817,7 +2817,7 @@ func (is *indexSearch) intersectMetricIDsWithTagFilterNocache(tf *tagFilter, fil if err == errFallbackToMetricNameMatch { return nil, err } - return nil, fmt.Errorf("error when intersecting metricIDs for tagFilter in slow path: %s; tagFilter=%s", err, tf) + return nil, fmt.Errorf("error when intersecting metricIDs for tagFilter in slow path: %w; tagFilter=%s", err, tf) } return metricIDs, nil } @@ -2885,7 +2885,7 @@ func (mp *tagToMetricIDsRowParser) Reset() { func (mp *tagToMetricIDsRowParser) Init(b []byte, nsPrefixExpected byte) error { tail, nsPrefix, err := unmarshalCommonPrefix(b) if err != nil { - return fmt.Errorf("invalid tag->metricIDs row %q: %s", b, err) + return fmt.Errorf("invalid tag->metricIDs row %q: %w", b, err) } if nsPrefix != nsPrefixExpected { return fmt.Errorf("invalid prefix for tag->metricIDs row %q; got %d; want %d", b, nsPrefix, nsPrefixExpected) @@ -2901,7 +2901,7 @@ func (mp *tagToMetricIDsRowParser) Init(b []byte, nsPrefixExpected byte) error { mp.NSPrefix = nsPrefix tail, err = mp.Tag.Unmarshal(tail) if err != nil { - return fmt.Errorf("cannot unmarshal tag from tag->metricIDs row %q: %s", b, err) + return fmt.Errorf("cannot unmarshal tag from tag->metricIDs row %q: %w", b, err) } return mp.InitOnlyTail(b, tail) } diff --git a/lib/storage/index_db_test.go b/lib/storage/index_db_test.go index fa71293dc..624782754 100644 --- a/lib/storage/index_db_test.go +++ b/lib/storage/index_db_test.go @@ -585,7 +585,7 @@ func TestIndexDB(t *testing.T) { select { case err := <-ch: if err != nil { - errors = append(errors, fmt.Errorf("unexpected error: %s", err)) + errors = append(errors, fmt.Errorf("unexpected error: %w", err)) } case <-time.After(30 * time.Second): t.Fatalf("timeout") @@ -689,7 +689,7 @@ func testIndexDBGetOrCreateTSIDByName(db *indexDB, metricGroups int) ([]MetricNa // Create tsid for the metricName. var tsid TSID if err := is.GetOrCreateTSIDByName(&tsid, metricNameBuf); err != nil { - return nil, nil, fmt.Errorf("unexpected error when creating tsid for mn:\n%s: %s", &mn, err) + return nil, nil, fmt.Errorf("unexpected error when creating tsid for mn:\n%s: %w", &mn, err) } mns = append(mns, mn) @@ -701,7 +701,7 @@ func testIndexDBGetOrCreateTSIDByName(db *indexDB, metricGroups int) ([]MetricNa for i := range tsids { tsid := &tsids[i] if err := is.storeDateMetricID(date, tsid.MetricID); err != nil { - return nil, nil, fmt.Errorf("error in storeDateMetricID(%d, %d): %s", date, tsid.MetricID, err) + return nil, nil, fmt.Errorf("error in storeDateMetricID(%d, %d): %w", date, tsid.MetricID, err) } } @@ -736,7 +736,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC metricName := mn.Marshal(nil) if err := db.getTSIDByNameNoCreate(&tsidCopy, metricName); err != nil { - return fmt.Errorf("cannot obtain tsid #%d for mn %s: %s", i, mn, err) + return fmt.Errorf("cannot obtain tsid #%d for mn %s: %w", i, mn, err) } if isConcurrent { // Copy tsid.MetricID, since multiple TSIDs may match @@ -751,7 +751,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC var err error metricNameCopy, err = db.searchMetricName(metricNameCopy[:0], tsidCopy.MetricID) if err != nil { - return fmt.Errorf("error in searchMetricName for metricID=%d; i=%d: %s", tsidCopy.MetricID, i, err) + return fmt.Errorf("error in searchMetricName for metricID=%d; i=%d: %w", tsidCopy.MetricID, i, err) } if !bytes.Equal(metricName, metricNameCopy) { return fmt.Errorf("unexpected mn for metricID=%d;\ngot\n%q\nwant\n%q", tsidCopy.MetricID, metricNameCopy, metricName) @@ -769,7 +769,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC // Test SearchTagValues tvs, err := db.SearchTagValues(nil, 1e5) if err != nil { - return fmt.Errorf("error in SearchTagValues for __name__: %s", err) + return fmt.Errorf("error in SearchTagValues for __name__: %w", err) } if !hasValue(tvs, mn.MetricGroup) { return fmt.Errorf("SearchTagValues couldn't find %q; found %q", mn.MetricGroup, tvs) @@ -778,7 +778,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC tag := &mn.Tags[i] tvs, err := db.SearchTagValues(tag.Key, 1e5) if err != nil { - return fmt.Errorf("error in SearchTagValues for __name__: %s", err) + return fmt.Errorf("error in SearchTagValues for __name__: %w", err) } if !hasValue(tvs, tag.Value) { return fmt.Errorf("SearchTagValues couldn't find %q=%q; found %q", tag.Key, tag.Value, tvs) @@ -790,7 +790,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC // Test SearchTagKeys tks, err := db.SearchTagKeys(1e5) if err != nil { - return fmt.Errorf("error in SearchTagKeys: %s", err) + return fmt.Errorf("error in SearchTagKeys: %w", err) } if !hasValue(tks, nil) { return fmt.Errorf("cannot find __name__ in %q", tks) @@ -807,7 +807,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC if !isConcurrent { n, err := db.GetSeriesCount() if err != nil { - return fmt.Errorf("unexpected error in GetSeriesCount(): %s", err) + return fmt.Errorf("unexpected error in GetSeriesCount(): %w", err) } if n != uint64(len(timeseriesCounters)) { return fmt.Errorf("unexpected GetSeriesCount(); got %d; want %d", n, uint64(len(timeseriesCounters))) @@ -822,23 +822,23 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC // Search without regexps. tfs := NewTagFilters() if err := tfs.Add(nil, mn.MetricGroup, false, false); err != nil { - return fmt.Errorf("cannot create tag filter for MetricGroup: %s", err) + return fmt.Errorf("cannot create tag filter for MetricGroup: %w", err) } for j := 0; j < len(mn.Tags); j++ { t := &mn.Tags[j] if err := tfs.Add(t.Key, t.Value, false, false); err != nil { - return fmt.Errorf("cannot create tag filter for tag: %s", err) + return fmt.Errorf("cannot create tag filter for tag: %w", err) } } if err := tfs.Add(nil, []byte("foobar"), true, false); err != nil { - return fmt.Errorf("cannot add negative filter: %s", err) + return fmt.Errorf("cannot add negative filter: %w", err) } if err := tfs.Add(nil, nil, true, false); err != nil { - return fmt.Errorf("cannot add no-op negative filter: %s", err) + return fmt.Errorf("cannot add no-op negative filter: %w", err) } tsidsFound, err := db.searchTSIDs([]*TagFilters{tfs}, TimeRange{}, 1e5) if err != nil { - return fmt.Errorf("cannot search by exact tag filter: %s", err) + return fmt.Errorf("cannot search by exact tag filter: %w", err) } if !testHasTSID(tsidsFound, tsid) { return fmt.Errorf("tsids is missing in exact tsidsFound\ntsid=%+v\ntsidsFound=%+v\ntfs=%s\nmn=%s\ni=%d", tsid, tsidsFound, tfs, mn, i) @@ -847,7 +847,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC // Verify tag cache. tsidsCached, err := db.searchTSIDs([]*TagFilters{tfs}, TimeRange{}, 1e5) if err != nil { - return fmt.Errorf("cannot search by exact tag filter: %s", err) + return fmt.Errorf("cannot search by exact tag filter: %w", err) } if !reflect.DeepEqual(tsidsCached, tsidsFound) { return fmt.Errorf("unexpected tsids returned; got\n%+v; want\n%+v", tsidsCached, tsidsFound) @@ -855,11 +855,11 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC // Add negative filter for zeroing search results. if err := tfs.Add(nil, mn.MetricGroup, true, false); err != nil { - return fmt.Errorf("cannot add negative filter for zeroing search results: %s", err) + return fmt.Errorf("cannot add negative filter for zeroing search results: %w", err) } tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs}, TimeRange{}, 1e5) if err != nil { - return fmt.Errorf("cannot search by exact tag filter with full negative: %s", err) + return fmt.Errorf("cannot search by exact tag filter with full negative: %w", err) } if testHasTSID(tsidsFound, tsid) { return fmt.Errorf("unexpected tsid found for exact negative filter\ntsid=%+v\ntsidsFound=%+v\ntfs=%s\nmn=%s", tsid, tsidsFound, tfs, mn) @@ -880,7 +880,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC } tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs}, TimeRange{}, 1e5) if err != nil { - return fmt.Errorf("cannot search by regexp tag filter for Graphite wildcard: %s", err) + return fmt.Errorf("cannot search by regexp tag filter for Graphite wildcard: %w", err) } if !testHasTSID(tsidsFound, tsid) { return fmt.Errorf("tsids is missing in regexp for Graphite wildcard tsidsFound\ntsid=%+v\ntsidsFound=%+v\ntfs=%s\nmn=%s", tsid, tsidsFound, tfs, mn) @@ -889,36 +889,36 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC // Search with regexps. tfs.Reset() if err := tfs.Add(nil, mn.MetricGroup, false, true); err != nil { - return fmt.Errorf("cannot create regexp tag filter for MetricGroup: %s", err) + return fmt.Errorf("cannot create regexp tag filter for MetricGroup: %w", err) } for j := 0; j < len(mn.Tags); j++ { t := &mn.Tags[j] if err := tfs.Add(t.Key, append(t.Value, "|foo*."...), false, true); err != nil { - return fmt.Errorf("cannot create regexp tag filter for tag: %s", err) + return fmt.Errorf("cannot create regexp tag filter for tag: %w", err) } if err := tfs.Add(t.Key, append(t.Value, "|aaa|foo|bar"...), false, true); err != nil { - return fmt.Errorf("cannot create regexp tag filter for tag: %s", err) + return fmt.Errorf("cannot create regexp tag filter for tag: %w", err) } } if err := tfs.Add(nil, []byte("^foobar$"), true, true); err != nil { - return fmt.Errorf("cannot add negative filter with regexp: %s", err) + return fmt.Errorf("cannot add negative filter with regexp: %w", err) } if err := tfs.Add(nil, nil, true, true); err != nil { - return fmt.Errorf("cannot add no-op negative filter with regexp: %s", err) + return fmt.Errorf("cannot add no-op negative filter with regexp: %w", err) } tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs}, TimeRange{}, 1e5) if err != nil { - return fmt.Errorf("cannot search by regexp tag filter: %s", err) + return fmt.Errorf("cannot search by regexp tag filter: %w", err) } if !testHasTSID(tsidsFound, tsid) { return fmt.Errorf("tsids is missing in regexp tsidsFound\ntsid=%+v\ntsidsFound=%+v\ntfs=%s\nmn=%s", tsid, tsidsFound, tfs, mn) } if err := tfs.Add(nil, mn.MetricGroup, true, true); err != nil { - return fmt.Errorf("cannot add negative filter for zeroing search results: %s", err) + return fmt.Errorf("cannot add negative filter for zeroing search results: %w", err) } tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs}, TimeRange{}, 1e5) if err != nil { - return fmt.Errorf("cannot search by regexp tag filter with full negative: %s", err) + return fmt.Errorf("cannot search by regexp tag filter with full negative: %w", err) } if testHasTSID(tsidsFound, tsid) { return fmt.Errorf("unexpected tsid found for regexp negative filter\ntsid=%+v\ntsidsFound=%+v\ntfs=%s\nmn=%s", tsid, tsidsFound, tfs, mn) @@ -927,14 +927,14 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC // Search with filter matching zero results. tfs.Reset() if err := tfs.Add([]byte("non-existing-key"), []byte("foobar"), false, false); err != nil { - return fmt.Errorf("cannot add non-existing key: %s", err) + return fmt.Errorf("cannot add non-existing key: %w", err) } if err := tfs.Add(nil, mn.MetricGroup, false, true); err != nil { - return fmt.Errorf("cannot create tag filter for MetricGroup matching zero results: %s", err) + return fmt.Errorf("cannot create tag filter for MetricGroup matching zero results: %w", err) } tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs}, TimeRange{}, 1e5) if err != nil { - return fmt.Errorf("cannot search by non-existing tag filter: %s", err) + return fmt.Errorf("cannot search by non-existing tag filter: %w", err) } if len(tsidsFound) > 0 { return fmt.Errorf("non-zero tsidsFound for non-existing tag filter: %+v", tsidsFound) @@ -950,7 +950,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC tfs.Reset() tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs}, TimeRange{}, 1e5) if err != nil { - return fmt.Errorf("cannot search for common prefix: %s", err) + return fmt.Errorf("cannot search for common prefix: %w", err) } if !testHasTSID(tsidsFound, tsid) { return fmt.Errorf("tsids is missing in common prefix\ntsid=%+v\ntsidsFound=%+v\ntfs=%s\nmn=%s", tsid, tsidsFound, tfs, mn) @@ -959,11 +959,11 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC // Search with empty metricGroup. It should match zero results. tfs.Reset() if err := tfs.Add(nil, nil, false, false); err != nil { - return fmt.Errorf("cannot create tag filter for empty metricGroup: %s", err) + return fmt.Errorf("cannot create tag filter for empty metricGroup: %w", err) } tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs}, TimeRange{}, 1e5) if err != nil { - return fmt.Errorf("cannot search for empty metricGroup: %s", err) + return fmt.Errorf("cannot search for empty metricGroup: %w", err) } if len(tsidsFound) != 0 { return fmt.Errorf("unexpected non-empty tsids found for empty metricGroup: %v", tsidsFound) @@ -972,15 +972,15 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC // Search with multiple tfss tfs1 := NewTagFilters() if err := tfs1.Add(nil, nil, false, false); err != nil { - return fmt.Errorf("cannot create tag filter for empty metricGroup: %s", err) + return fmt.Errorf("cannot create tag filter for empty metricGroup: %w", err) } tfs2 := NewTagFilters() if err := tfs2.Add(nil, mn.MetricGroup, false, false); err != nil { - return fmt.Errorf("cannot create tag filter for MetricGroup: %s", err) + return fmt.Errorf("cannot create tag filter for MetricGroup: %w", err) } tsidsFound, err = db.searchTSIDs([]*TagFilters{tfs1, tfs2}, TimeRange{}, 1e5) if err != nil { - return fmt.Errorf("cannot search for empty metricGroup: %s", err) + return fmt.Errorf("cannot search for empty metricGroup: %w", err) } if !testHasTSID(tsidsFound, tsid) { return fmt.Errorf("tsids is missing when searching for multiple tfss \ntsid=%+v\ntsidsFound=%+v\ntfs=%s\nmn=%s", tsid, tsidsFound, tfs, mn) @@ -989,7 +989,7 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC // Verify empty tfss tsidsFound, err = db.searchTSIDs(nil, TimeRange{}, 1e5) if err != nil { - return fmt.Errorf("cannot search for nil tfss: %s", err) + return fmt.Errorf("cannot search for nil tfss: %w", err) } if len(tsidsFound) != 0 { return fmt.Errorf("unexpected non-empty tsids fround for nil tfss; found %d tsids", len(tsidsFound)) diff --git a/lib/storage/index_db_timing_test.go b/lib/storage/index_db_timing_test.go index 0a2520a0f..f8ec97afc 100644 --- a/lib/storage/index_db_timing_test.go +++ b/lib/storage/index_db_timing_test.go @@ -103,7 +103,7 @@ func benchmarkIndexDBAddTSIDs(db *indexDB, tsid *TSID, mn *MetricName, startOffs mn.sortTags() metricName = mn.Marshal(metricName[:0]) if err := is.GetOrCreateTSIDByName(tsid, metricName); err != nil { - panic(fmt.Errorf("cannot insert record: %s", err)) + panic(fmt.Errorf("cannot insert record: %w", err)) } } } @@ -353,7 +353,7 @@ func BenchmarkIndexDBGetTSIDs(b *testing.B) { mnLocal.sortTags() metricNameLocal = mnLocal.Marshal(metricNameLocal[:0]) if err := is.GetOrCreateTSIDByName(&tsidLocal, metricNameLocal); err != nil { - panic(fmt.Errorf("cannot obtain tsid: %s", err)) + panic(fmt.Errorf("cannot obtain tsid: %w", err)) } } } diff --git a/lib/storage/merge.go b/lib/storage/merge.go index 8d696cb27..32963af50 100644 --- a/lib/storage/merge.go +++ b/lib/storage/merge.go @@ -30,7 +30,7 @@ func mergeBlockStreams(ph *partHeader, bsw *blockStreamWriter, bsrs []*blockStre if err == errForciblyStopped { return err } - return fmt.Errorf("cannot merge %d streams: %s: %s", len(bsrs), bsrs, err) + return fmt.Errorf("cannot merge %d streams: %s: %w", len(bsrs), bsrs, err) } var bsmPool = &sync.Pool{ @@ -101,7 +101,7 @@ func mergeBlockStreamsInternal(ph *partHeader, bsw *blockStreamWriter, bsm *bloc // Slow path - pendingBlock and bsm.Block belong to the same time series, // so they must be merged. if err := unmarshalAndCalibrateScale(pendingBlock, bsm.Block); err != nil { - return fmt.Errorf("cannot unmarshal and calibrate scale for blocks to be merged: %s", err) + return fmt.Errorf("cannot unmarshal and calibrate scale for blocks to be merged: %w", err) } tmpBlock.Reset() tmpBlock.bh.TSID = bsm.Block.bh.TSID @@ -128,7 +128,7 @@ func mergeBlockStreamsInternal(ph *partHeader, bsw *blockStreamWriter, bsm *bloc bsw.WriteExternalBlock(tmpBlock, ph, rowsMerged) } if err := bsm.Error(); err != nil { - return fmt.Errorf("cannot read block to be merged: %s", err) + return fmt.Errorf("cannot read block to be merged: %w", err) } if pendingBlock != nil { bsw.WriteExternalBlock(pendingBlock, ph, rowsMerged) diff --git a/lib/storage/merge_timing_test.go b/lib/storage/merge_timing_test.go index ecdb60861..d4ae7d852 100644 --- a/lib/storage/merge_timing_test.go +++ b/lib/storage/merge_timing_test.go @@ -42,7 +42,7 @@ func benchmarkMergeBlockStreams(b *testing.B, mps []*inmemoryPart, rowsPerLoop i mpOut.Reset() bsw.InitFromInmemoryPart(&mpOut) if err := mergeBlockStreams(&mpOut.ph, &bsw, bsrs, nil, &rowsMerged, nil, &rowsDeleted); err != nil { - panic(fmt.Errorf("cannot merge block streams: %s", err)) + panic(fmt.Errorf("cannot merge block streams: %w", err)) } } }) diff --git a/lib/storage/metaindex_row.go b/lib/storage/metaindex_row.go index 81649af78..8dc1dbdbd 100644 --- a/lib/storage/metaindex_row.go +++ b/lib/storage/metaindex_row.go @@ -77,7 +77,7 @@ func (mr *metaindexRow) Unmarshal(src []byte) ([]byte, error) { // Unmarshal TSID tail, err := mr.TSID.Unmarshal(src) if err != nil { - return src, fmt.Errorf("cannot unmarshal TSID: %s", err) + return src, fmt.Errorf("cannot unmarshal TSID: %w", err) } src = tail @@ -130,11 +130,11 @@ func (mr *metaindexRow) Unmarshal(src []byte) ([]byte, error) { func unmarshalMetaindexRows(dst []metaindexRow, r io.Reader) ([]metaindexRow, error) { compressedData, err := ioutil.ReadAll(r) if err != nil { - return dst, fmt.Errorf("cannot read metaindex rows: %s", err) + return dst, fmt.Errorf("cannot read metaindex rows: %w", err) } data, err := encoding.DecompressZSTD(nil, compressedData) if err != nil { - return dst, fmt.Errorf("cannot decompress metaindex rows: %s", err) + return dst, fmt.Errorf("cannot decompress metaindex rows: %w", err) } dstLen := len(dst) @@ -147,7 +147,7 @@ func unmarshalMetaindexRows(dst []metaindexRow, r io.Reader) ([]metaindexRow, er mr := &dst[len(dst)-1] tail, err := mr.Unmarshal(data) if err != nil { - return dst, fmt.Errorf("cannot unmarshal metaindexRow #%d from metaindex data: %s", len(dst)-dstLen, err) + return dst, fmt.Errorf("cannot unmarshal metaindexRow #%d from metaindex data: %w", len(dst)-dstLen, err) } data = tail } diff --git a/lib/storage/metric_name.go b/lib/storage/metric_name.go index 265a4f361..385f51a9b 100644 --- a/lib/storage/metric_name.go +++ b/lib/storage/metric_name.go @@ -49,11 +49,11 @@ func (tag *Tag) Unmarshal(src []byte) ([]byte, error) { var err error src, tag.Key, err = unmarshalTagValue(tag.Key[:0], src) if err != nil { - return src, fmt.Errorf("cannot unmarshal key: %s", err) + return src, fmt.Errorf("cannot unmarshal key: %w", err) } src, tag.Value, err = unmarshalTagValue(tag.Value[:0], src) if err != nil { - return src, fmt.Errorf("cannot unmarshal value: %s", err) + return src, fmt.Errorf("cannot unmarshal value: %w", err) } return src, nil } @@ -376,7 +376,7 @@ func (mn *MetricName) Unmarshal(src []byte) error { var err error src, mn.MetricGroup, err = unmarshalTagValue(mn.MetricGroup[:0], src) if err != nil { - return fmt.Errorf("cannot unmarshal MetricGroup: %s", err) + return fmt.Errorf("cannot unmarshal MetricGroup: %w", err) } mn.Tags = mn.Tags[:0] @@ -385,7 +385,7 @@ func (mn *MetricName) Unmarshal(src []byte) error { var err error src, err = tag.Unmarshal(src) if err != nil { - return fmt.Errorf("cannot unmarshal tag: %s", err) + return fmt.Errorf("cannot unmarshal tag: %w", err) } } @@ -505,13 +505,13 @@ func (mn *MetricName) unmarshalRaw(src []byte) error { for len(src) > 0 { tail, key, err := unmarshalBytesFast(src) if err != nil { - return fmt.Errorf("cannot decode key: %s", err) + return fmt.Errorf("cannot decode key: %w", err) } src = tail tail, value, err := unmarshalBytesFast(src) if err != nil { - return fmt.Errorf("cannot decode value: %s", err) + return fmt.Errorf("cannot decode value: %w", err) } src = tail diff --git a/lib/storage/part.go b/lib/storage/part.go index 9ab84c263..1c8fa54ae 100644 --- a/lib/storage/part.go +++ b/lib/storage/part.go @@ -56,13 +56,13 @@ func openFilePart(path string) (*part, error) { var ph partHeader if err := ph.ParseFromPath(path); err != nil { - return nil, fmt.Errorf("cannot parse path to part: %s", err) + return nil, fmt.Errorf("cannot parse path to part: %w", err) } timestampsPath := path + "/timestamps.bin" timestampsFile, err := fs.OpenReaderAt(timestampsPath) if err != nil { - return nil, fmt.Errorf("cannot open timestamps file: %s", err) + return nil, fmt.Errorf("cannot open timestamps file: %w", err) } timestampsSize := fs.MustFileSize(timestampsPath) @@ -70,7 +70,7 @@ func openFilePart(path string) (*part, error) { valuesFile, err := fs.OpenReaderAt(valuesPath) if err != nil { timestampsFile.MustClose() - return nil, fmt.Errorf("cannot open values file: %s", err) + return nil, fmt.Errorf("cannot open values file: %w", err) } valuesSize := fs.MustFileSize(valuesPath) @@ -79,7 +79,7 @@ func openFilePart(path string) (*part, error) { if err != nil { timestampsFile.MustClose() valuesFile.MustClose() - return nil, fmt.Errorf("cannot open index file: %s", err) + return nil, fmt.Errorf("cannot open index file: %w", err) } indexSize := fs.MustFileSize(indexPath) @@ -89,7 +89,7 @@ func openFilePart(path string) (*part, error) { timestampsFile.MustClose() valuesFile.MustClose() indexFile.MustClose() - return nil, fmt.Errorf("cannot open metaindex file: %s", err) + return nil, fmt.Errorf("cannot open metaindex file: %w", err) } metaindexSize := fs.MustFileSize(metaindexPath) @@ -105,7 +105,7 @@ func newPart(ph *partHeader, path string, size uint64, metaindexReader filestrea var errors []error metaindex, err := unmarshalMetaindexRows(nil, metaindexReader) if err != nil { - errors = append(errors, fmt.Errorf("cannot unmarshal metaindex data: %s", err)) + errors = append(errors, fmt.Errorf("cannot unmarshal metaindex data: %w", err)) } metaindexReader.MustClose() @@ -122,7 +122,7 @@ func newPart(ph *partHeader, path string, size uint64, metaindexReader filestrea if len(errors) > 0 { // Return only the first error, since it has no sense in returning all errors. - err = fmt.Errorf("cannot initialize part %q: %s", &p, errors[0]) + err = fmt.Errorf("cannot initialize part %q: %w", &p, errors[0]) p.MustClose() return nil, err } diff --git a/lib/storage/part_header.go b/lib/storage/part_header.go index 8e8e95fb8..54bbb760b 100644 --- a/lib/storage/part_header.go +++ b/lib/storage/part_header.go @@ -76,19 +76,19 @@ func (ph *partHeader) ParseFromPath(path string) error { ph.RowsCount, err = strconv.ParseUint(a[0], 10, 64) if err != nil { - return fmt.Errorf("cannot parse rowsCount from partName %q: %s", partName, err) + return fmt.Errorf("cannot parse rowsCount from partName %q: %w", partName, err) } ph.BlocksCount, err = strconv.ParseUint(a[1], 10, 64) if err != nil { - return fmt.Errorf("cannot parse blocksCount from partName %q: %s", partName, err) + return fmt.Errorf("cannot parse blocksCount from partName %q: %w", partName, err) } ph.MinTimestamp, err = fromUserReadableTimestamp(a[2]) if err != nil { - return fmt.Errorf("cannot parse minTimestamp from partName %q: %s", partName, err) + return fmt.Errorf("cannot parse minTimestamp from partName %q: %w", partName, err) } ph.MaxTimestamp, err = fromUserReadableTimestamp(a[3]) if err != nil { - return fmt.Errorf("cannot parse maxTimestamp from partName %q: %s", partName, err) + return fmt.Errorf("cannot parse maxTimestamp from partName %q: %w", partName, err) } if ph.MinTimestamp > ph.MaxTimestamp { diff --git a/lib/storage/part_search.go b/lib/storage/part_search.go index 29f2dc365..705595b53 100644 --- a/lib/storage/part_search.go +++ b/lib/storage/part_search.go @@ -161,7 +161,7 @@ func (ps *partSearch) nextBHS() bool { var err error ib, err = ps.readIndexBlock(mr) if err != nil { - ps.err = fmt.Errorf("cannot read index block for part %q at offset %d with size %d: %s", + ps.err = fmt.Errorf("cannot read index block for part %q at offset %d with size %d: %w", &ps.p.ph, mr.IndexBlockOffset, mr.IndexBlockSize, err) return false } @@ -216,12 +216,12 @@ func (ps *partSearch) readIndexBlock(mr *metaindexRow) (*indexBlock, error) { var err error ps.indexBuf, err = encoding.DecompressZSTD(ps.indexBuf[:0], ps.compressedIndexBuf) if err != nil { - return nil, fmt.Errorf("cannot decompress index block: %s", err) + return nil, fmt.Errorf("cannot decompress index block: %w", err) } ib := getIndexBlock() ib.bhs, err = unmarshalBlockHeaders(ib.bhs[:0], ps.indexBuf, int(mr.BlockHeadersCount)) if err != nil { - return nil, fmt.Errorf("cannot unmarshal index block: %s", err) + return nil, fmt.Errorf("cannot unmarshal index block: %w", err) } return ib, nil } diff --git a/lib/storage/part_search_test.go b/lib/storage/part_search_test.go index d06d2ac01..6a34bded2 100644 --- a/lib/storage/part_search_test.go +++ b/lib/storage/part_search_test.go @@ -1255,7 +1255,7 @@ func testPartSearchSerial(p *part, tsids []TSID, tr TimeRange, expectedRawBlocks bs = append(bs, b) } if err := ps.Error(); err != nil { - return fmt.Errorf("unexpected error in search: %s", err) + return fmt.Errorf("unexpected error in search: %w", err) } if bs == nil { @@ -1263,7 +1263,7 @@ func testPartSearchSerial(p *part, tsids []TSID, tr TimeRange, expectedRawBlocks } rbs := newTestRawBlocks(bs, tr) if err := testEqualRawBlocks(rbs, expectedRawBlocks); err != nil { - return fmt.Errorf("unequal blocks: %s", err) + return fmt.Errorf("unequal blocks: %w", err) } return nil } @@ -1297,7 +1297,7 @@ func newTestRawBlocks(bs []Block, tr TimeRange) []rawBlock { func newTestRawBlock(b *Block, tr TimeRange) rawBlock { if err := b.UnmarshalData(); err != nil { - panic(fmt.Errorf("cannot unmarshal block data: %s", err)) + panic(fmt.Errorf("cannot unmarshal block data: %w", err)) } var rb rawBlock var values []int64 @@ -1425,7 +1425,7 @@ func newTestPart(rows []rawRow) *part { mp := newTestInmemoryPart(rows) p, err := mp.NewPart() if err != nil { - panic(fmt.Errorf("cannot create new part: %s", err)) + panic(fmt.Errorf("cannot create new part: %w", err)) } return p } diff --git a/lib/storage/partition.go b/lib/storage/partition.go index 99fcbe680..3bf57a5e7 100644 --- a/lib/storage/partition.go +++ b/lib/storage/partition.go @@ -208,10 +208,10 @@ func createPartition(timestamp int64, smallPartitionsPath, bigPartitionsPath str logger.Infof("creating a partition %q with smallPartsPath=%q, bigPartsPath=%q", name, smallPartsPath, bigPartsPath) if err := createPartitionDirs(smallPartsPath); err != nil { - return nil, fmt.Errorf("cannot create directories for small parts %q: %s", smallPartsPath, err) + return nil, fmt.Errorf("cannot create directories for small parts %q: %w", smallPartsPath, err) } if err := createPartitionDirs(bigPartsPath); err != nil { - return nil, fmt.Errorf("cannot create directories for big parts %q: %s", bigPartsPath, err) + return nil, fmt.Errorf("cannot create directories for big parts %q: %w", bigPartsPath, err) } pt := newPartition(name, smallPartsPath, bigPartsPath, getDeletedMetricIDs) @@ -252,19 +252,19 @@ func openPartition(smallPartsPath, bigPartsPath string, getDeletedMetricIDs func smallParts, err := openParts(smallPartsPath, bigPartsPath, smallPartsPath) if err != nil { - return nil, fmt.Errorf("cannot open small parts from %q: %s", smallPartsPath, err) + return nil, fmt.Errorf("cannot open small parts from %q: %w", smallPartsPath, err) } bigParts, err := openParts(smallPartsPath, bigPartsPath, bigPartsPath) if err != nil { mustCloseParts(smallParts) - return nil, fmt.Errorf("cannot open big parts from %q: %s", bigPartsPath, err) + return nil, fmt.Errorf("cannot open big parts from %q: %w", bigPartsPath, err) } pt := newPartition(name, smallPartsPath, bigPartsPath, getDeletedMetricIDs) pt.smallParts = smallParts pt.bigParts = bigParts if err := pt.tr.fromPartitionName(name); err != nil { - return nil, fmt.Errorf("cannot obtain partition time range from smallPartsPath %q: %s", smallPartsPath, err) + return nil, fmt.Errorf("cannot obtain partition time range from smallPartsPath %q: %w", smallPartsPath, err) } pt.startMergeWorkers() pt.startRawRowsFlusher() @@ -789,7 +789,7 @@ func (pt *partition) flushInmemoryParts(dstPws []*partWrapper, force bool) ([]*p pt.partsLock.Unlock() if err := pt.mergePartsOptimal(dstPws); err != nil { - return dstPws, fmt.Errorf("cannot merge %d inmemory parts: %s", len(dstPws), err) + return dstPws, fmt.Errorf("cannot merge %d inmemory parts: %w", len(dstPws), err) } return dstPws, nil } @@ -797,13 +797,13 @@ func (pt *partition) flushInmemoryParts(dstPws []*partWrapper, force bool) ([]*p func (pt *partition) mergePartsOptimal(pws []*partWrapper) error { for len(pws) > defaultPartsToMerge { if err := pt.mergeParts(pws[:defaultPartsToMerge], nil); err != nil { - return fmt.Errorf("cannot merge %d parts: %s", defaultPartsToMerge, err) + return fmt.Errorf("cannot merge %d parts: %w", defaultPartsToMerge, err) } pws = pws[defaultPartsToMerge:] } if len(pws) > 0 { if err := pt.mergeParts(pws, nil); err != nil { - return fmt.Errorf("cannot merge %d parts: %s", len(pws), err) + return fmt.Errorf("cannot merge %d parts: %w", len(pws), err) } } return nil @@ -1028,7 +1028,7 @@ func (pt *partition) mergeParts(pws []*partWrapper, stopCh <-chan struct{}) erro bsr.InitFromInmemoryPart(pw.mp) } else { if err := bsr.InitFromFilePart(pw.p.path); err != nil { - return fmt.Errorf("cannot open source part for merging: %s", err) + return fmt.Errorf("cannot open source part for merging: %w", err) } } bsrs = append(bsrs, bsr) @@ -1054,7 +1054,7 @@ func (pt *partition) mergeParts(pws []*partWrapper, stopCh <-chan struct{}) erro bsw := getBlockStreamWriter() compressLevel := getCompressLevelForRowsCount(outRowsCount, outBlocksCount) if err := bsw.InitFromFilePart(tmpPartPath, nocache, compressLevel); err != nil { - return fmt.Errorf("cannot create destination part %q: %s", tmpPartPath, err) + return fmt.Errorf("cannot create destination part %q: %w", tmpPartPath, err) } // Merge parts. @@ -1072,7 +1072,7 @@ func (pt *partition) mergeParts(pws []*partWrapper, stopCh <-chan struct{}) erro if err == errForciblyStopped { return err } - return fmt.Errorf("error when merging parts to %q: %s", tmpPartPath, err) + return fmt.Errorf("error when merging parts to %q: %w", tmpPartPath, err) } // Close bsrs. @@ -1098,12 +1098,12 @@ func (pt *partition) mergeParts(pws []*partWrapper, stopCh <-chan struct{}) erro fmt.Fprintf(&bb, "%s -> %s\n", tmpPartPath, dstPartPath) txnPath := fmt.Sprintf("%s/txn/%016X", ptPath, mergeIdx) if err := fs.WriteFileAtomically(txnPath, bb.B); err != nil { - return fmt.Errorf("cannot create transaction file %q: %s", txnPath, err) + return fmt.Errorf("cannot create transaction file %q: %w", txnPath, err) } // Run the created transaction. if err := runTransaction(&pt.snapshotLock, pt.smallPartsPath, pt.bigPartsPath, txnPath); err != nil { - return fmt.Errorf("cannot execute transaction %q: %s", txnPath, err) + return fmt.Errorf("cannot execute transaction %q: %w", txnPath, err) } var newPW *partWrapper @@ -1112,7 +1112,7 @@ func (pt *partition) mergeParts(pws []*partWrapper, stopCh <-chan struct{}) erro // Open the merged part if it is non-empty. newP, err := openFilePart(dstPartPath) if err != nil { - return fmt.Errorf("cannot open merged part %q: %s", dstPartPath, err) + return fmt.Errorf("cannot open merged part %q: %w", dstPartPath, err) } newPSize = newP.size newPW = &partWrapper{ @@ -1316,7 +1316,7 @@ func openParts(pathPrefix1, pathPrefix2, path string) ([]*partWrapper, error) { } d, err := os.Open(path) if err != nil { - return nil, fmt.Errorf("cannot open directory %q: %s", path, err) + return nil, fmt.Errorf("cannot open directory %q: %w", path, err) } defer fs.MustClose(d) @@ -1324,7 +1324,7 @@ func openParts(pathPrefix1, pathPrefix2, path string) ([]*partWrapper, error) { // Snapshots cannot be created yet, so use fakeSnapshotLock. var fakeSnapshotLock sync.RWMutex if err := runTransactions(&fakeSnapshotLock, pathPrefix1, pathPrefix2, path); err != nil { - return nil, fmt.Errorf("cannot run transactions from %q: %s", path, err) + return nil, fmt.Errorf("cannot run transactions from %q: %w", path, err) } txnDir := path + "/txn" @@ -1332,13 +1332,13 @@ func openParts(pathPrefix1, pathPrefix2, path string) ([]*partWrapper, error) { tmpDir := path + "/tmp" fs.MustRemoveAll(tmpDir) if err := createPartitionDirs(path); err != nil { - return nil, fmt.Errorf("cannot create directories for partition %q: %s", path, err) + return nil, fmt.Errorf("cannot create directories for partition %q: %w", path, err) } // Open parts. fis, err := d.Readdir(-1) if err != nil { - return nil, fmt.Errorf("cannot read directory %q: %s", d.Name(), err) + return nil, fmt.Errorf("cannot read directory %q: %w", d.Name(), err) } var pws []*partWrapper for _, fi := range fis { @@ -1357,7 +1357,7 @@ func openParts(pathPrefix1, pathPrefix2, path string) ([]*partWrapper, error) { p, err := openFilePart(partPath) if err != nil { mustCloseParts(pws) - return nil, fmt.Errorf("cannot open part %q: %s", partPath, err) + return nil, fmt.Errorf("cannot open part %q: %w", partPath, err) } logger.Infof("opened part %q in %.3f seconds", partPath, time.Since(startTime).Seconds()) @@ -1391,7 +1391,7 @@ func (pt *partition) CreateSnapshotAt(smallPath, bigPath string) error { // Flush inmemory data to disk. pt.flushRawRows(true) if _, err := pt.flushInmemoryParts(nil, true); err != nil { - return fmt.Errorf("cannot flush inmemory parts: %s", err) + return fmt.Errorf("cannot flush inmemory parts: %w", err) } // The snapshot must be created under the lock in order to prevent from @@ -1400,10 +1400,10 @@ func (pt *partition) CreateSnapshotAt(smallPath, bigPath string) error { defer pt.snapshotLock.Unlock() if err := pt.createSnapshot(pt.smallPartsPath, smallPath); err != nil { - return fmt.Errorf("cannot create snapshot for %q: %s", pt.smallPartsPath, err) + return fmt.Errorf("cannot create snapshot for %q: %w", pt.smallPartsPath, err) } if err := pt.createSnapshot(pt.bigPartsPath, bigPath); err != nil { - return fmt.Errorf("cannot create snapshot for %q: %s", pt.bigPartsPath, err) + return fmt.Errorf("cannot create snapshot for %q: %w", pt.bigPartsPath, err) } logger.Infof("created partition snapshot of %q and %q at %q and %q in %.3f seconds", @@ -1413,18 +1413,18 @@ func (pt *partition) CreateSnapshotAt(smallPath, bigPath string) error { func (pt *partition) createSnapshot(srcDir, dstDir string) error { if err := fs.MkdirAllFailIfExist(dstDir); err != nil { - return fmt.Errorf("cannot create snapshot dir %q: %s", dstDir, err) + return fmt.Errorf("cannot create snapshot dir %q: %w", dstDir, err) } d, err := os.Open(srcDir) if err != nil { - return fmt.Errorf("cannot open difrectory: %s", err) + return fmt.Errorf("cannot open difrectory: %w", err) } defer fs.MustClose(d) fis, err := d.Readdir(-1) if err != nil { - return fmt.Errorf("cannot read directory: %s", err) + return fmt.Errorf("cannot read directory: %w", err) } for _, fi := range fis { if !fs.IsDirOrSymlink(fi) { @@ -1439,7 +1439,7 @@ func (pt *partition) createSnapshot(srcDir, dstDir string) error { srcPartPath := srcDir + "/" + fn dstPartPath := dstDir + "/" + fn if err := fs.HardLinkFiles(srcPartPath, dstPartPath); err != nil { - return fmt.Errorf("cannot create hard links from %q to %q: %s", srcPartPath, dstPartPath, err) + return fmt.Errorf("cannot create hard links from %q to %q: %w", srcPartPath, dstPartPath, err) } } @@ -1462,13 +1462,13 @@ func runTransactions(txnLock *sync.RWMutex, pathPrefix1, pathPrefix2, path strin if os.IsNotExist(err) { return nil } - return fmt.Errorf("cannot open %q: %s", txnDir, err) + return fmt.Errorf("cannot open %q: %w", txnDir, err) } defer fs.MustClose(d) fis, err := d.Readdir(-1) if err != nil { - return fmt.Errorf("cannot read directory %q: %s", d.Name(), err) + return fmt.Errorf("cannot read directory %q: %w", d.Name(), err) } // Sort transaction files by id. @@ -1484,7 +1484,7 @@ func runTransactions(txnLock *sync.RWMutex, pathPrefix1, pathPrefix2, path strin } txnPath := txnDir + "/" + fn if err := runTransaction(txnLock, pathPrefix1, pathPrefix2, txnPath); err != nil { - return fmt.Errorf("cannot run transaction from %q: %s", txnPath, err) + return fmt.Errorf("cannot run transaction from %q: %w", txnPath, err) } } return nil @@ -1498,7 +1498,7 @@ func runTransaction(txnLock *sync.RWMutex, pathPrefix1, pathPrefix2, txnPath str data, err := ioutil.ReadFile(txnPath) if err != nil { - return fmt.Errorf("cannot read transaction file: %s", err) + return fmt.Errorf("cannot read transaction file: %w", err) } if len(data) > 0 && data[len(data)-1] == '\n' { data = data[:len(data)-1] @@ -1519,7 +1519,7 @@ func runTransaction(txnLock *sync.RWMutex, pathPrefix1, pathPrefix2, txnPath str for _, path := range rmPaths { path, err := validatePath(pathPrefix1, pathPrefix2, path) if err != nil { - return fmt.Errorf("invalid path to remove: %s", err) + return fmt.Errorf("invalid path to remove: %w", err) } removeWG.Add(1) fs.MustRemoveAllWithDoneCallback(path, removeWG.Done) @@ -1530,17 +1530,17 @@ func runTransaction(txnLock *sync.RWMutex, pathPrefix1, pathPrefix2, txnPath str dstPath := mvPaths[1] srcPath, err = validatePath(pathPrefix1, pathPrefix2, srcPath) if err != nil { - return fmt.Errorf("invalid source path to rename: %s", err) + return fmt.Errorf("invalid source path to rename: %w", err) } if len(dstPath) > 0 { // Move srcPath to dstPath. dstPath, err = validatePath(pathPrefix1, pathPrefix2, dstPath) if err != nil { - return fmt.Errorf("invalid destination path to rename: %s", err) + return fmt.Errorf("invalid destination path to rename: %w", err) } if fs.IsPathExist(srcPath) { if err := os.Rename(srcPath, dstPath); err != nil { - return fmt.Errorf("cannot rename %q to %q: %s", srcPath, dstPath, err) + return fmt.Errorf("cannot rename %q to %q: %w", srcPath, dstPath, err) } } else if !fs.IsPathExist(dstPath) { // Emit info message for the expected condition after unclean shutdown on NFS disk. @@ -1579,16 +1579,16 @@ func validatePath(pathPrefix1, pathPrefix2, path string) (string, error) { pathPrefix1, err = filepath.Abs(pathPrefix1) if err != nil { - return path, fmt.Errorf("cannot determine absolute path for pathPrefix1=%q: %s", pathPrefix1, err) + return path, fmt.Errorf("cannot determine absolute path for pathPrefix1=%q: %w", pathPrefix1, err) } pathPrefix2, err = filepath.Abs(pathPrefix2) if err != nil { - return path, fmt.Errorf("cannot determine absolute path for pathPrefix2=%q: %s", pathPrefix2, err) + return path, fmt.Errorf("cannot determine absolute path for pathPrefix2=%q: %w", pathPrefix2, err) } path, err = filepath.Abs(path) if err != nil { - return path, fmt.Errorf("cannot determine absolute path for %q: %s", path, err) + return path, fmt.Errorf("cannot determine absolute path for %q: %w", path, err) } if !strings.HasPrefix(path, pathPrefix1+"/") && !strings.HasPrefix(path, pathPrefix2+"/") { return path, fmt.Errorf("invalid path %q; must start with either %q or %q", path, pathPrefix1+"/", pathPrefix2+"/") @@ -1600,11 +1600,11 @@ func createPartitionDirs(path string) error { path = filepath.Clean(path) txnPath := path + "/txn" if err := fs.MkdirAllFailIfExist(txnPath); err != nil { - return fmt.Errorf("cannot create txn directory %q: %s", txnPath, err) + return fmt.Errorf("cannot create txn directory %q: %w", txnPath, err) } tmpPath := path + "/tmp" if err := fs.MkdirAllFailIfExist(tmpPath); err != nil { - return fmt.Errorf("cannot create tmp directory %q: %s", tmpPath, err) + return fmt.Errorf("cannot create tmp directory %q: %w", tmpPath, err) } fs.MustSyncPath(path) return nil diff --git a/lib/storage/partition_search.go b/lib/storage/partition_search.go index b7ceaa015..1639102be 100644 --- a/lib/storage/partition_search.go +++ b/lib/storage/partition_search.go @@ -106,7 +106,7 @@ func (pts *partitionSearch) Init(pt *partition, tsids []TSID, tr TimeRange) { } if len(errors) > 0 { // Return only the first error, since it has no sense in returning all errors. - pts.err = fmt.Errorf("cannot initialize partition search: %s", errors[0]) + pts.err = fmt.Errorf("cannot initialize partition search: %w", errors[0]) return } if len(pts.psHeap) == 0 { @@ -134,7 +134,7 @@ func (pts *partitionSearch) NextBlock() bool { pts.err = pts.nextBlock() if pts.err != nil { if pts.err != io.EOF { - pts.err = fmt.Errorf("cannot obtain the next block to search in the partition: %s", pts.err) + pts.err = fmt.Errorf("cannot obtain the next block to search in the partition: %w", pts.err) } return false } diff --git a/lib/storage/partition_search_test.go b/lib/storage/partition_search_test.go index fd997600e..9342a9cbc 100644 --- a/lib/storage/partition_search_test.go +++ b/lib/storage/partition_search_test.go @@ -247,12 +247,12 @@ func testPartitionSearchSerial(pt *partition, tsids []TSID, tr TimeRange, rbsExp bs = append(bs, b) } if err := pts.Error(); err != nil { - return fmt.Errorf("unexpected error: %s", err) + return fmt.Errorf("unexpected error: %w", err) } pts.MustClose() rbs := newTestRawBlocks(bs, tr) if err := testEqualRawBlocks(rbs, rbsExpected); err != nil { - return fmt.Errorf("unequal blocks: %s", err) + return fmt.Errorf("unequal blocks: %w", err) } if rowsCountExpected >= 0 { @@ -270,7 +270,7 @@ func testPartitionSearchSerial(pt *partition, tsids []TSID, tr TimeRange, rbsExp return fmt.Errorf("unexpected block got for an empty tsids list: %+v", pts.BlockRef) } if err := pts.Error(); err != nil { - return fmt.Errorf("unexpected error on empty tsids list: %s", err) + return fmt.Errorf("unexpected error on empty tsids list: %w", err) } pts.MustClose() diff --git a/lib/storage/search.go b/lib/storage/search.go index 500608b8f..565e7c0f8 100644 --- a/lib/storage/search.go +++ b/lib/storage/search.go @@ -196,14 +196,14 @@ func (tf *TagFilter) Marshal(dst []byte) []byte { func (tf *TagFilter) Unmarshal(src []byte) ([]byte, error) { tail, k, err := encoding.UnmarshalBytes(src) if err != nil { - return tail, fmt.Errorf("cannot unmarshal Key: %s", err) + return tail, fmt.Errorf("cannot unmarshal Key: %w", err) } tf.Key = append(tf.Key[:0], k...) src = tail tail, v, err := encoding.UnmarshalBytes(src) if err != nil { - return tail, fmt.Errorf("cannot unmarshal Value: %s", err) + return tail, fmt.Errorf("cannot unmarshal Value: %w", err) } tf.Value = append(tf.Value[:0], v...) src = tail @@ -266,21 +266,21 @@ func (sq *SearchQuery) Marshal(dst []byte) []byte { func (sq *SearchQuery) Unmarshal(src []byte) ([]byte, error) { tail, minTs, err := encoding.UnmarshalVarInt64(src) if err != nil { - return src, fmt.Errorf("cannot unmarshal MinTimestamp: %s", err) + return src, fmt.Errorf("cannot unmarshal MinTimestamp: %w", err) } sq.MinTimestamp = minTs src = tail tail, maxTs, err := encoding.UnmarshalVarInt64(src) if err != nil { - return src, fmt.Errorf("cannot unmarshal MaxTimestamp: %s", err) + return src, fmt.Errorf("cannot unmarshal MaxTimestamp: %w", err) } sq.MaxTimestamp = maxTs src = tail tail, tfssCount, err := encoding.UnmarshalVarUint64(src) if err != nil { - return src, fmt.Errorf("cannot unmarshal the count of TagFilterss: %s", err) + return src, fmt.Errorf("cannot unmarshal the count of TagFilterss: %w", err) } if n := int(tfssCount) - cap(sq.TagFilterss); n > 0 { sq.TagFilterss = append(sq.TagFilterss[:cap(sq.TagFilterss)], make([][]TagFilter, n)...) @@ -291,7 +291,7 @@ func (sq *SearchQuery) Unmarshal(src []byte) ([]byte, error) { for i := 0; i < int(tfssCount); i++ { tail, tfsCount, err := encoding.UnmarshalVarUint64(src) if err != nil { - return src, fmt.Errorf("cannot unmarshal the count of TagFilters: %s", err) + return src, fmt.Errorf("cannot unmarshal the count of TagFilters: %w", err) } src = tail @@ -303,7 +303,7 @@ func (sq *SearchQuery) Unmarshal(src []byte) ([]byte, error) { for j := 0; j < int(tfsCount); j++ { tail, err := tagFilters[j].Unmarshal(src) if err != nil { - return tail, fmt.Errorf("cannot unmarshal TagFilter #%d: %s", j, err) + return tail, fmt.Errorf("cannot unmarshal TagFilter #%d: %w", j, err) } src = tail } diff --git a/lib/storage/search_test.go b/lib/storage/search_test.go index ae3041f94..c4e41b861 100644 --- a/lib/storage/search_test.go +++ b/lib/storage/search_test.go @@ -185,13 +185,13 @@ func testSearchInternal(st *Storage, tr TimeRange, mrs []MetricRow, accountsCoun tfs := NewTagFilters() metricGroupRe := fmt.Sprintf(`metric_\d*%d%d`, i, i) if err := tfs.Add(nil, []byte(metricGroupRe), false, true); err != nil { - return fmt.Errorf("cannot add metricGroupRe=%q: %s", metricGroupRe, err) + return fmt.Errorf("cannot add metricGroupRe=%q: %w", metricGroupRe, err) } if err := tfs.Add([]byte("job"), []byte("nonexisting-service"), true, false); err != nil { - return fmt.Errorf("cannot add tag filter %q=%q: %s", "job", "nonexsitsing-service", err) + return fmt.Errorf("cannot add tag filter %q=%q: %w", "job", "nonexsitsing-service", err) } if err := tfs.Add([]byte("instance"), []byte(".*"), false, true); err != nil { - return fmt.Errorf("cannot add tag filter %q=%q: %s", "instance", ".*", err) + return fmt.Errorf("cannot add tag filter %q=%q: %w", "instance", ".*", err) } // Build extectedMrs. @@ -204,7 +204,7 @@ func testSearchInternal(st *Storage, tr TimeRange, mrs []MetricRow, accountsCoun continue } if err := mn.unmarshalRaw(mr.MetricNameRaw); err != nil { - return fmt.Errorf("cannot unmarshal MetricName: %s", err) + return fmt.Errorf("cannot unmarshal MetricName: %w", err) } if !metricGroupRegexp.Match(mn.MetricGroup) { continue @@ -230,7 +230,7 @@ func testSearchInternal(st *Storage, tr TimeRange, mrs []MetricRow, accountsCoun mbs = append(mbs, mb) } if err := s.Error(); err != nil { - return fmt.Errorf("search error: %s", err) + return fmt.Errorf("search error: %w", err) } s.MustClose() @@ -239,7 +239,7 @@ func testSearchInternal(st *Storage, tr TimeRange, mrs []MetricRow, accountsCoun for _, mb := range mbs { rb := newTestRawBlock(mb.Block, tr) if err := mn.Unmarshal(mb.MetricName); err != nil { - return fmt.Errorf("cannot unmarshal MetricName: %s", err) + return fmt.Errorf("cannot unmarshal MetricName: %w", err) } metricNameRaw := mn.marshalRaw(nil) for i, timestamp := range rb.Timestamps { diff --git a/lib/storage/storage.go b/lib/storage/storage.go index 5a672df6d..298f6738a 100644 --- a/lib/storage/storage.go +++ b/lib/storage/storage.go @@ -113,7 +113,7 @@ func OpenStorage(path string, retentionMonths int) (*Storage, error) { } path, err := filepath.Abs(path) if err != nil { - return nil, fmt.Errorf("cannot determine absolute path for %q: %s", path, err) + return nil, fmt.Errorf("cannot determine absolute path for %q: %w", path, err) } s := &Storage{ @@ -125,11 +125,11 @@ func OpenStorage(path string, retentionMonths int) (*Storage, error) { } if err := fs.MkdirAllIfNotExist(path); err != nil { - return nil, fmt.Errorf("cannot create a directory for the storage at %q: %s", path, err) + return nil, fmt.Errorf("cannot create a directory for the storage at %q: %w", path, err) } snapshotsPath := path + "/snapshots" if err := fs.MkdirAllIfNotExist(snapshotsPath); err != nil { - return nil, fmt.Errorf("cannot create %q: %s", snapshotsPath, err) + return nil, fmt.Errorf("cannot create %q: %w", snapshotsPath, err) } // Protect from concurrent opens. @@ -164,11 +164,11 @@ func OpenStorage(path string, retentionMonths int) (*Storage, error) { idbPath := path + "/indexdb" idbSnapshotsPath := idbPath + "/snapshots" if err := fs.MkdirAllIfNotExist(idbSnapshotsPath); err != nil { - return nil, fmt.Errorf("cannot create %q: %s", idbSnapshotsPath, err) + return nil, fmt.Errorf("cannot create %q: %w", idbSnapshotsPath, err) } idbCurr, idbPrev, err := openIndexDBTables(idbPath, s.metricIDCache, s.metricNameCache, &s.currHourMetricIDs, &s.prevHourMetricIDs) if err != nil { - return nil, fmt.Errorf("cannot open indexdb tables at %q: %s", idbPath, err) + return nil, fmt.Errorf("cannot open indexdb tables at %q: %w", idbPath, err) } idbCurr.SetExtDB(idbPrev) s.idbCurr.Store(idbCurr) @@ -178,7 +178,7 @@ func OpenStorage(path string, retentionMonths int) (*Storage, error) { tb, err := openTable(tablePath, retentionMonths, s.getDeletedMetricIDs) if err != nil { s.idb().MustClose() - return nil, fmt.Errorf("cannot open table at %q: %s", tablePath, err) + return nil, fmt.Errorf("cannot open table at %q: %w", tablePath, err) } s.tb = tb @@ -212,24 +212,24 @@ func (s *Storage) CreateSnapshot() (string, error) { srcDir := s.path dstDir := fmt.Sprintf("%s/snapshots/%s", srcDir, snapshotName) if err := fs.MkdirAllFailIfExist(dstDir); err != nil { - return "", fmt.Errorf("cannot create dir %q: %s", dstDir, err) + return "", fmt.Errorf("cannot create dir %q: %w", dstDir, err) } dstDataDir := dstDir + "/data" if err := fs.MkdirAllFailIfExist(dstDataDir); err != nil { - return "", fmt.Errorf("cannot create dir %q: %s", dstDataDir, err) + return "", fmt.Errorf("cannot create dir %q: %w", dstDataDir, err) } smallDir, bigDir, err := s.tb.CreateSnapshot(snapshotName) if err != nil { - return "", fmt.Errorf("cannot create table snapshot: %s", err) + return "", fmt.Errorf("cannot create table snapshot: %w", err) } dstSmallDir := dstDataDir + "/small" if err := fs.SymlinkRelative(smallDir, dstSmallDir); err != nil { - return "", fmt.Errorf("cannot create symlink from %q to %q: %s", smallDir, dstSmallDir, err) + return "", fmt.Errorf("cannot create symlink from %q to %q: %w", smallDir, dstSmallDir, err) } dstBigDir := dstDataDir + "/big" if err := fs.SymlinkRelative(bigDir, dstBigDir); err != nil { - return "", fmt.Errorf("cannot create symlink from %q to %q: %s", bigDir, dstBigDir, err) + return "", fmt.Errorf("cannot create symlink from %q to %q: %w", bigDir, dstBigDir, err) } fs.MustSyncPath(dstDataDir) @@ -237,18 +237,18 @@ func (s *Storage) CreateSnapshot() (string, error) { idb := s.idb() currSnapshot := idbSnapshot + "/" + idb.name if err := idb.tb.CreateSnapshotAt(currSnapshot); err != nil { - return "", fmt.Errorf("cannot create curr indexDB snapshot: %s", err) + return "", fmt.Errorf("cannot create curr indexDB snapshot: %w", err) } ok := idb.doExtDB(func(extDB *indexDB) { prevSnapshot := idbSnapshot + "/" + extDB.name err = extDB.tb.CreateSnapshotAt(prevSnapshot) }) if ok && err != nil { - return "", fmt.Errorf("cannot create prev indexDB snapshot: %s", err) + return "", fmt.Errorf("cannot create prev indexDB snapshot: %w", err) } dstIdbDir := dstDir + "/indexdb" if err := fs.SymlinkRelative(idbSnapshot, dstIdbDir); err != nil { - return "", fmt.Errorf("cannot create symlink from %q to %q: %s", idbSnapshot, dstIdbDir, err) + return "", fmt.Errorf("cannot create symlink from %q to %q: %w", idbSnapshot, dstIdbDir, err) } fs.MustSyncPath(dstDir) @@ -265,13 +265,13 @@ func (s *Storage) ListSnapshots() ([]string, error) { snapshotsPath := s.path + "/snapshots" d, err := os.Open(snapshotsPath) if err != nil { - return nil, fmt.Errorf("cannot open %q: %s", snapshotsPath, err) + return nil, fmt.Errorf("cannot open %q: %w", snapshotsPath, err) } defer fs.MustClose(d) fnames, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("cannot read contents of %q: %s", snapshotsPath, err) + return nil, fmt.Errorf("cannot read contents of %q: %w", snapshotsPath, err) } snapshotNames := make([]string, 0, len(fnames)) for _, fname := range fnames { @@ -799,7 +799,7 @@ func (s *Storage) searchTSIDs(tfss []*TagFilters, tr TimeRange, maxMetrics int) // on idb level. tsids, err := s.idb().searchTSIDs(tfss, tr, maxMetrics) if err != nil { - return nil, fmt.Errorf("error when searching tsids for tfss %q: %s", tfss, err) + return nil, fmt.Errorf("error when searching tsids for tfss %q: %w", tfss, err) } return tsids, nil } @@ -833,7 +833,7 @@ func (s *Storage) prefetchMetricNames(tsids []TSID) error { for _, metricID := range metricIDs { metricName, err = is.searchMetricName(metricName[:0], metricID) if err != nil && err != io.EOF { - return fmt.Errorf("error in pre-fetching metricName for metricID=%d: %s", metricID, err) + return fmt.Errorf("error in pre-fetching metricName for metricID=%d: %w", metricID, err) } } @@ -852,7 +852,7 @@ func (s *Storage) prefetchMetricNames(tsids []TSID) error { func (s *Storage) DeleteMetrics(tfss []*TagFilters) (int, error) { deletedCount, err := s.idb().DeleteTSIDs(tfss) if err != nil { - return deletedCount, fmt.Errorf("cannot delete tsids: %s", err) + return deletedCount, fmt.Errorf("cannot delete tsids: %w", err) } // Do not reset MetricName -> TSID cache (tsidCache), since the obtained // entries must be checked against deleted metricIDs. @@ -884,7 +884,7 @@ func (s *Storage) SearchTagEntries(maxTagKeys, maxTagValues int) ([]TagEntry, er idb := s.idb() keys, err := idb.SearchTagKeys(maxTagKeys) if err != nil { - return nil, fmt.Errorf("cannot search tag keys: %s", err) + return nil, fmt.Errorf("cannot search tag keys: %w", err) } // Sort keys for faster seeks below @@ -894,7 +894,7 @@ func (s *Storage) SearchTagEntries(maxTagKeys, maxTagValues int) ([]TagEntry, er for i, key := range keys { values, err := idb.SearchTagValues([]byte(key), maxTagValues) if err != nil { - return nil, fmt.Errorf("cannot search values for tag %q: %s", key, err) + return nil, fmt.Errorf("cannot search values for tag %q: %w", key, err) } te := &tes[i] te.Key = key @@ -966,7 +966,7 @@ func (mr *MetricRow) Marshal(dst []byte) []byte { func (mr *MetricRow) Unmarshal(src []byte) ([]byte, error) { tail, metricNameRaw, err := encoding.UnmarshalBytes(src) if err != nil { - return tail, fmt.Errorf("cannot unmarshal MetricName: %s", err) + return tail, fmt.Errorf("cannot unmarshal MetricName: %w", err) } mr.MetricNameRaw = append(mr.MetricNameRaw[:0], metricNameRaw...) @@ -1141,7 +1141,7 @@ func (s *Storage) add(rows []rawRow, mrs []MetricRow, precisionBits uint8) ([]ra // This guarantees that invalid rows don't prevent // from adding valid rows into the storage. if firstWarn == nil { - firstWarn = fmt.Errorf("cannot obtain or create TSID for MetricName %q: %s", pmr.MetricName, err) + firstWarn = fmt.Errorf("cannot obtain or create TSID for MetricName %q: %w", pmr.MetricName, err) } j-- continue @@ -1158,13 +1158,13 @@ func (s *Storage) add(rows []rawRow, mrs []MetricRow, precisionBits uint8) ([]ra var firstError error if err := s.tb.AddRows(rows); err != nil { - firstError = fmt.Errorf("cannot add rows to table: %s", err) + firstError = fmt.Errorf("cannot add rows to table: %w", err) } if err := s.updatePerDateData(rows); err != nil && firstError == nil { - firstError = fmt.Errorf("cannot update per-date data: %s", err) + firstError = fmt.Errorf("cannot update per-date data: %w", err) } if firstError != nil { - return rows, fmt.Errorf("error occurred during rows addition: %s", firstError) + return rows, fmt.Errorf("error occurred during rows addition: %w", firstError) } return rows, nil } @@ -1200,7 +1200,7 @@ func (pmrs *pendingMetricRows) addRow(mr *MetricRow) error { // of many rows for the same metric. if string(mr.MetricNameRaw) != string(pmrs.lastMetricNameRaw) { if err := pmrs.mn.unmarshalRaw(mr.MetricNameRaw); err != nil { - return fmt.Errorf("cannot unmarshal MetricNameRaw %q: %s", mr.MetricNameRaw, err) + return fmt.Errorf("cannot unmarshal MetricNameRaw %q: %w", mr.MetricNameRaw, err) } pmrs.mn.sortTags() metricNamesBufLen := len(pmrs.metricNamesBuf) @@ -1341,7 +1341,7 @@ func (s *Storage) updatePerDateData(rows []rawRow) error { ok, err := is.hasDateMetricID(date, metricID) if err != nil { if firstError == nil { - firstError = fmt.Errorf("error when locating (date=%d, metricID=%d) in database: %s", date, metricID, err) + firstError = fmt.Errorf("error when locating (date=%d, metricID=%d) in database: %w", date, metricID, err) } continue } @@ -1349,7 +1349,7 @@ func (s *Storage) updatePerDateData(rows []rawRow) error { // The (date, metricID) entry is missing in the indexDB. Add it there. if err := is.storeDateMetricID(date, metricID); err != nil { if firstError == nil { - firstError = fmt.Errorf("error when storing (date=%d, metricID=%d) in database: %s", date, metricID, err) + firstError = fmt.Errorf("error when storing (date=%d, metricID=%d) in database: %w", date, metricID, err) } continue } @@ -1584,12 +1584,12 @@ func (s *Storage) putTSIDToCache(tsid *TSID, metricName []byte) { func openIndexDBTables(path string, metricIDCache, metricNameCache *workingsetcache.Cache, currHourMetricIDs, prevHourMetricIDs *atomic.Value) (curr, prev *indexDB, err error) { if err := fs.MkdirAllIfNotExist(path); err != nil { - return nil, nil, fmt.Errorf("cannot create directory %q: %s", path, err) + return nil, nil, fmt.Errorf("cannot create directory %q: %w", path, err) } d, err := os.Open(path) if err != nil { - return nil, nil, fmt.Errorf("cannot open directory: %s", err) + return nil, nil, fmt.Errorf("cannot open directory: %w", err) } defer fs.MustClose(d) @@ -1597,7 +1597,7 @@ func openIndexDBTables(path string, metricIDCache, metricNameCache *workingsetca // the previous one contains backup data. fis, err := d.Readdir(-1) if err != nil { - return nil, nil, fmt.Errorf("cannot read directory: %s", err) + return nil, nil, fmt.Errorf("cannot read directory: %w", err) } var tableNames []string for _, fi := range fis { @@ -1643,13 +1643,13 @@ func openIndexDBTables(path string, metricIDCache, metricNameCache *workingsetca curr, err = openIndexDB(currPath, metricIDCache, metricNameCache, currHourMetricIDs, prevHourMetricIDs) if err != nil { - return nil, nil, fmt.Errorf("cannot open curr indexdb table at %q: %s", currPath, err) + return nil, nil, fmt.Errorf("cannot open curr indexdb table at %q: %w", currPath, err) } prevPath := path + "/" + tableNames[len(tableNames)-2] prev, err = openIndexDB(prevPath, metricIDCache, metricNameCache, currHourMetricIDs, prevHourMetricIDs) if err != nil { curr.MustClose() - return nil, nil, fmt.Errorf("cannot open prev indexdb table at %q: %s", prevPath, err) + return nil, nil, fmt.Errorf("cannot open prev indexdb table at %q: %w", prevPath, err) } // Adjust startDateForPerDayInvertedIndex for the previous index. diff --git a/lib/storage/storage_test.go b/lib/storage/storage_test.go index 8cd44faa1..39e45a584 100644 --- a/lib/storage/storage_test.go +++ b/lib/storage/storage_test.go @@ -431,7 +431,7 @@ func testStorageRandTimestamps(s *Storage) error { if err := s.AddRows(mrs, defaultPrecisionBits); err != nil { errStr := err.Error() if !strings.Contains(errStr, "too big timestamp") && !strings.Contains(errStr, "too small timestamp") { - return fmt.Errorf("unexpected error when adding mrs: %s", err) + return fmt.Errorf("unexpected error when adding mrs: %w", err) } } } @@ -554,7 +554,7 @@ func testStorageDeleteMetrics(s *Storage, workerNum int) error { mrs = append(mrs, mr) } if err := s.AddRows(mrs, defaultPrecisionBits); err != nil { - return fmt.Errorf("unexpected error when adding mrs: %s", err) + return fmt.Errorf("unexpected error when adding mrs: %w", err) } } s.debugFlush() @@ -562,7 +562,7 @@ func testStorageDeleteMetrics(s *Storage, workerNum int) error { // Verify tag values exist tvs, err := s.SearchTagValues(workerTag, 1e5) if err != nil { - return fmt.Errorf("error in SearchTagValues before metrics removal: %s", err) + return fmt.Errorf("error in SearchTagValues before metrics removal: %w", err) } if len(tvs) == 0 { return fmt.Errorf("unexpected empty number of tag values for workerTag") @@ -571,10 +571,10 @@ func testStorageDeleteMetrics(s *Storage, workerNum int) error { // Verify tag keys exist tks, err := s.SearchTagKeys(1e5) if err != nil { - return fmt.Errorf("error in SearchTagKeys before metrics removal: %s", err) + return fmt.Errorf("error in SearchTagKeys before metrics removal: %w", err) } if err := checkTagKeys(tks, tksAll); err != nil { - return fmt.Errorf("unexpected tag keys before metrics removal: %s", err) + return fmt.Errorf("unexpected tag keys before metrics removal: %w", err) } var sr Search @@ -595,18 +595,18 @@ func testStorageDeleteMetrics(s *Storage, workerNum int) error { for i := 0; i < metricsCount; i++ { tfs := NewTagFilters() if err := tfs.Add(nil, []byte("metric_.+"), false, true); err != nil { - return fmt.Errorf("cannot add regexp tag filter: %s", err) + return fmt.Errorf("cannot add regexp tag filter: %w", err) } job := fmt.Sprintf("job_%d_%d", i, workerNum) if err := tfs.Add([]byte("job"), []byte(job), false, false); err != nil { - return fmt.Errorf("cannot add job tag filter: %s", err) + return fmt.Errorf("cannot add job tag filter: %w", err) } if n := metricBlocksCount(tfs); n == 0 { return fmt.Errorf("expecting non-zero number of metric blocks for tfs=%s", tfs) } deletedCount, err := s.DeleteMetrics([]*TagFilters{tfs}) if err != nil { - return fmt.Errorf("cannot delete metrics: %s", err) + return fmt.Errorf("cannot delete metrics: %w", err) } if deletedCount == 0 { return fmt.Errorf("expecting non-zero number of deleted metrics on iteration %d", i) @@ -618,7 +618,7 @@ func testStorageDeleteMetrics(s *Storage, workerNum int) error { // Try deleting empty tfss deletedCount, err = s.DeleteMetrics(nil) if err != nil { - return fmt.Errorf("cannot delete empty tfss: %s", err) + return fmt.Errorf("cannot delete empty tfss: %w", err) } if deletedCount != 0 { return fmt.Errorf("expecting zero deleted metrics for empty tfss; got %d", deletedCount) @@ -628,14 +628,14 @@ func testStorageDeleteMetrics(s *Storage, workerNum int) error { // Make sure no more metrics left for the given workerNum tfs := NewTagFilters() if err := tfs.Add(nil, []byte(fmt.Sprintf("metric_.+_%d", workerNum)), false, true); err != nil { - return fmt.Errorf("cannot add regexp tag filter for worker metrics: %s", err) + return fmt.Errorf("cannot add regexp tag filter for worker metrics: %w", err) } if n := metricBlocksCount(tfs); n != 0 { return fmt.Errorf("expecting zero metric blocks after deleting all the metrics; got %d blocks", n) } tvs, err = s.SearchTagValues(workerTag, 1e5) if err != nil { - return fmt.Errorf("error in SearchTagValues after all the metrics are removed: %s", err) + return fmt.Errorf("error in SearchTagValues after all the metrics are removed: %w", err) } if len(tvs) != 0 { return fmt.Errorf("found non-empty tag values for %q after metrics removal: %q", workerTag, tvs) @@ -732,7 +732,7 @@ func testStorageAddRows(s *Storage) error { mrs = append(mrs, mr) } if err := s.AddRows(mrs, defaultPrecisionBits); err != nil { - return fmt.Errorf("unexpected error when adding mrs: %s", err) + return fmt.Errorf("unexpected error when adding mrs: %w", err) } } @@ -747,13 +747,13 @@ func testStorageAddRows(s *Storage) error { // Try creating a snapshot from the storage. snapshotName, err := s.CreateSnapshot() if err != nil { - return fmt.Errorf("cannot create snapshot from the storage: %s", err) + return fmt.Errorf("cannot create snapshot from the storage: %w", err) } // Verify the snapshot is visible snapshots, err := s.ListSnapshots() if err != nil { - return fmt.Errorf("cannot list snapshots: %s", err) + return fmt.Errorf("cannot list snapshots: %w", err) } if !containsString(snapshots, snapshotName) { return fmt.Errorf("cannot find snapshot %q in %q", snapshotName, snapshots) @@ -763,7 +763,7 @@ func testStorageAddRows(s *Storage) error { snapshotPath := s.path + "/snapshots/" + snapshotName s1, err := OpenStorage(snapshotPath, 0) if err != nil { - return fmt.Errorf("cannot open storage from snapshot: %s", err) + return fmt.Errorf("cannot open storage from snapshot: %w", err) } // Verify the snapshot contains rows @@ -777,11 +777,11 @@ func testStorageAddRows(s *Storage) error { // Delete the snapshot and make sure it is no longer visible. if err := s.DeleteSnapshot(snapshotName); err != nil { - return fmt.Errorf("cannot delete snapshot %q: %s", snapshotName, err) + return fmt.Errorf("cannot delete snapshot %q: %w", snapshotName, err) } snapshots, err = s.ListSnapshots() if err != nil { - return fmt.Errorf("cannot list snapshots: %s", err) + return fmt.Errorf("cannot list snapshots: %w", err) } if containsString(snapshots, snapshotName) { return fmt.Errorf("snapshot %q must be deleted, but is still visible in %q", snapshotName, snapshots) @@ -860,7 +860,7 @@ func testStorageAddMetrics(s *Storage, workerNum int) error { Value: value, } if err := s.AddRows([]MetricRow{mr}, defaultPrecisionBits); err != nil { - return fmt.Errorf("unexpected error when adding mrs: %s", err) + return fmt.Errorf("unexpected error when adding mrs: %w", err) } } diff --git a/lib/storage/storage_timing_test.go b/lib/storage/storage_timing_test.go index abdd5f46b..d5d7507b7 100644 --- a/lib/storage/storage_timing_test.go +++ b/lib/storage/storage_timing_test.go @@ -50,7 +50,7 @@ func benchmarkStorageAddRows(b *testing.B, rowsPerBatch int) { mr.Value = float64(offset + i) } if err := s.AddRows(mrs, defaultPrecisionBits); err != nil { - panic(fmt.Errorf("cannot add rows to storage: %s", err)) + panic(fmt.Errorf("cannot add rows to storage: %w", err)) } } }) diff --git a/lib/storage/table.go b/lib/storage/table.go index 7cdd54ae2..f3d6dfb05 100644 --- a/lib/storage/table.go +++ b/lib/storage/table.go @@ -86,7 +86,7 @@ func openTable(path string, retentionMonths int, getDeletedMetricIDs func() *uin // Create a directory for the table if it doesn't exist yet. if err := fs.MkdirAllIfNotExist(path); err != nil { - return nil, fmt.Errorf("cannot create directory for table %q: %s", path, err) + return nil, fmt.Errorf("cannot create directory for table %q: %w", path, err) } // Protect from concurrent opens. @@ -98,25 +98,25 @@ func openTable(path string, retentionMonths int, getDeletedMetricIDs func() *uin // Create directories for small and big partitions if they don't exist yet. smallPartitionsPath := path + "/small" if err := fs.MkdirAllIfNotExist(smallPartitionsPath); err != nil { - return nil, fmt.Errorf("cannot create directory for small partitions %q: %s", smallPartitionsPath, err) + return nil, fmt.Errorf("cannot create directory for small partitions %q: %w", smallPartitionsPath, err) } smallSnapshotsPath := smallPartitionsPath + "/snapshots" if err := fs.MkdirAllIfNotExist(smallSnapshotsPath); err != nil { - return nil, fmt.Errorf("cannot create %q: %s", smallSnapshotsPath, err) + return nil, fmt.Errorf("cannot create %q: %w", smallSnapshotsPath, err) } bigPartitionsPath := path + "/big" if err := fs.MkdirAllIfNotExist(bigPartitionsPath); err != nil { - return nil, fmt.Errorf("cannot create directory for big partitions %q: %s", bigPartitionsPath, err) + return nil, fmt.Errorf("cannot create directory for big partitions %q: %w", bigPartitionsPath, err) } bigSnapshotsPath := bigPartitionsPath + "/snapshots" if err := fs.MkdirAllIfNotExist(bigSnapshotsPath); err != nil { - return nil, fmt.Errorf("cannot create %q: %s", bigSnapshotsPath, err) + return nil, fmt.Errorf("cannot create %q: %w", bigSnapshotsPath, err) } // Open partitions. pts, err := openPartitions(smallPartitionsPath, bigPartitionsPath, getDeletedMetricIDs) if err != nil { - return nil, fmt.Errorf("cannot open partitions in the table %q: %s", path, err) + return nil, fmt.Errorf("cannot open partitions in the table %q: %w", path, err) } tb := &table{ @@ -151,18 +151,18 @@ func (tb *table) CreateSnapshot(snapshotName string) (string, string, error) { dstSmallDir := fmt.Sprintf("%s/small/snapshots/%s", tb.path, snapshotName) if err := fs.MkdirAllFailIfExist(dstSmallDir); err != nil { - return "", "", fmt.Errorf("cannot create dir %q: %s", dstSmallDir, err) + return "", "", fmt.Errorf("cannot create dir %q: %w", dstSmallDir, err) } dstBigDir := fmt.Sprintf("%s/big/snapshots/%s", tb.path, snapshotName) if err := fs.MkdirAllFailIfExist(dstBigDir); err != nil { - return "", "", fmt.Errorf("cannot create dir %q: %s", dstBigDir, err) + return "", "", fmt.Errorf("cannot create dir %q: %w", dstBigDir, err) } for _, ptw := range ptws { smallPath := dstSmallDir + "/" + ptw.pt.name bigPath := dstBigDir + "/" + ptw.pt.name if err := ptw.pt.CreateSnapshotAt(smallPath, bigPath); err != nil { - return "", "", fmt.Errorf("cannot create snapshot for partition %q in %q: %s", ptw.pt.name, tb.path, err) + return "", "", fmt.Errorf("cannot create snapshot for partition %q in %q: %w", ptw.pt.name, tb.path, err) } } @@ -348,7 +348,7 @@ func (tb *table) AddRows(rows []rawRow) error { if len(errors) > 0 { // Return only the first error, since it has no sense in returning all errors. - return fmt.Errorf("errors while adding rows to table %q: %s", tb.path, errors[0]) + return fmt.Errorf("errors while adding rows to table %q: %w", tb.path, errors[0]) } return nil } @@ -453,7 +453,7 @@ func openPartitions(smallPartitionsPath, bigPartitionsPath string, getDeletedMet pt, err := openPartition(smallPartsPath, bigPartsPath, getDeletedMetricIDs) if err != nil { mustClosePartitions(pts) - return nil, fmt.Errorf("cannot open partition %q: %s", ptName, err) + return nil, fmt.Errorf("cannot open partition %q: %w", ptName, err) } pts = append(pts, pt) } @@ -463,13 +463,13 @@ func openPartitions(smallPartitionsPath, bigPartitionsPath string, getDeletedMet func populatePartitionNames(partitionsPath string, ptNames map[string]bool) error { d, err := os.Open(partitionsPath) if err != nil { - return fmt.Errorf("cannot open directory with partitions %q: %s", partitionsPath, err) + return fmt.Errorf("cannot open directory with partitions %q: %w", partitionsPath, err) } defer fs.MustClose(d) fis, err := d.Readdir(-1) if err != nil { - return fmt.Errorf("cannot read directory with partitions %q: %s", partitionsPath, err) + return fmt.Errorf("cannot read directory with partitions %q: %w", partitionsPath, err) } for _, fi := range fis { if !fs.IsDirOrSymlink(fi) { diff --git a/lib/storage/table_search.go b/lib/storage/table_search.go index 113c8053c..50b5ea35b 100644 --- a/lib/storage/table_search.go +++ b/lib/storage/table_search.go @@ -107,7 +107,7 @@ func (ts *tableSearch) Init(tb *table, tsids []TSID, tr TimeRange) { } if len(errors) > 0 { // Return only the first error, since it has no sense in returning all errors. - ts.err = fmt.Errorf("cannot initialize table search: %s", errors[0]) + ts.err = fmt.Errorf("cannot initialize table search: %w", errors[0]) return } if len(ts.ptsHeap) == 0 { @@ -135,7 +135,7 @@ func (ts *tableSearch) NextBlock() bool { ts.err = ts.nextBlock() if ts.err != nil { if ts.err != io.EOF { - ts.err = fmt.Errorf("cannot obtain the next block to search in the table: %s", ts.err) + ts.err = fmt.Errorf("cannot obtain the next block to search in the table: %w", ts.err) } return false } diff --git a/lib/storage/table_search_test.go b/lib/storage/table_search_test.go index c821b5e93..d2f81e813 100644 --- a/lib/storage/table_search_test.go +++ b/lib/storage/table_search_test.go @@ -258,12 +258,12 @@ func testTableSearchSerial(tb *table, tsids []TSID, tr TimeRange, rbsExpected [] bs = append(bs, b) } if err := ts.Error(); err != nil { - return fmt.Errorf("unexpected error: %s", err) + return fmt.Errorf("unexpected error: %w", err) } ts.MustClose() rbs := newTestRawBlocks(bs, tr) if err := testEqualRawBlocks(rbs, rbsExpected); err != nil { - return fmt.Errorf("unequal blocks: %s", err) + return fmt.Errorf("unequal blocks: %w", err) } if rowsCountExpected >= 0 { @@ -281,7 +281,7 @@ func testTableSearchSerial(tb *table, tsids []TSID, tr TimeRange, rbsExpected [] return fmt.Errorf("unexpected block got for an empty tsids list: %+v", ts.BlockRef) } if err := ts.Error(); err != nil { - return fmt.Errorf("unexpected error on empty tsids list: %s", err) + return fmt.Errorf("unexpected error on empty tsids list: %w", err) } ts.MustClose() diff --git a/lib/storage/table_search_timing_test.go b/lib/storage/table_search_timing_test.go index fe4d8c8df..42b450681 100644 --- a/lib/storage/table_search_timing_test.go +++ b/lib/storage/table_search_timing_test.go @@ -14,7 +14,7 @@ import ( func TestMain(m *testing.M) { n := m.Run() if err := os.RemoveAll("./benchmarkTableSearch"); err != nil { - panic(fmt.Errorf("cannot remove benchmark tables: %s", err)) + panic(fmt.Errorf("cannot remove benchmark tables: %w", err)) } os.Exit(n) } @@ -96,7 +96,7 @@ func createBenchTable(b *testing.B, path string, startTimestamp int64, rowsPerIn r.Value = value } if err := tb.AddRows(rows); err != nil { - panic(fmt.Errorf("cannot add %d rows: %s", rowsPerInsert, err)) + panic(fmt.Errorf("cannot add %d rows: %w", rowsPerInsert, err)) } } wg.Done() diff --git a/lib/storage/table_timing_test.go b/lib/storage/table_timing_test.go index 840b33351..2a20add41 100644 --- a/lib/storage/table_timing_test.go +++ b/lib/storage/table_timing_test.go @@ -78,7 +78,7 @@ func benchmarkTableAddRows(b *testing.B, rowsPerInsert, tsidsCount int) { } // Add updated rowsCopy. if err := tb.AddRows(rowsCopy); err != nil { - panic(fmt.Errorf("cannot add rows to table %q: %s", tablePath, err)) + panic(fmt.Errorf("cannot add rows to table %q: %w", tablePath, err)) } } diff --git a/lib/storage/tag_filters.go b/lib/storage/tag_filters.go index 9f238ae98..75d337803 100644 --- a/lib/storage/tag_filters.go +++ b/lib/storage/tag_filters.go @@ -58,7 +58,7 @@ func (tfs *TagFilters) Add(key, value []byte, isNegative, isRegexp bool) error { tf := tfs.addTagFilter() if err := tf.Init(tfs.commonPrefix, key, value, isNegative, isRegexp); err != nil { - return fmt.Errorf("cannot initialize tagFilter: %s", err) + return fmt.Errorf("cannot initialize tagFilter: %w", err) } if tf.isNegative && tf.isEmptyMatch { // We have {key!~"|foo"} tag filter, which matches non=empty key values. @@ -66,14 +66,14 @@ func (tfs *TagFilters) Add(key, value []byte, isNegative, isRegexp bool) error { // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/546 for details. tfNew := tfs.addTagFilter() if err := tfNew.Init(tfs.commonPrefix, key, []byte(".+"), false, true); err != nil { - return fmt.Errorf(`cannot initialize {%s=".+"} tag filter: %s`, key, err) + return fmt.Errorf(`cannot initialize {%s=".+"} tag filter: %w`, key, err) } } if len(tf.graphiteReverseSuffix) > 0 { re := regexp.QuoteMeta(string(tf.graphiteReverseSuffix)) + ".*" tfNew := tfs.addTagFilter() if err := tfNew.Init(tfs.commonPrefix, graphiteReverseTagKey, []byte(re), false, true); err != nil { - return fmt.Errorf("cannot initialize reverse tag filter for Graphite wildcard: %s", err) + return fmt.Errorf("cannot initialize reverse tag filter for Graphite wildcard: %w", err) } } return nil @@ -333,7 +333,7 @@ func getRegexpFromCache(expr []byte) (regexpCacheValue, error) { exprStr := fmt.Sprintf("^(%s)$", expr) re, err := regexp.Compile(exprStr) if err != nil { - return rcv, fmt.Errorf("invalid regexp %q: %s", exprStr, err) + return rcv, fmt.Errorf("invalid regexp %q: %w", exprStr, err) } sExpr := string(expr) diff --git a/lib/storage/time.go b/lib/storage/time.go index 269dd3ab6..a97c9ae90 100644 --- a/lib/storage/time.go +++ b/lib/storage/time.go @@ -41,7 +41,7 @@ func timestampToPartitionName(timestamp int64) string { func (tr *TimeRange) fromPartitionName(name string) error { t, err := time.Parse("2006_01", name) if err != nil { - return fmt.Errorf("cannot parse partition name %q: %s", name, err) + return fmt.Errorf("cannot parse partition name %q: %w", name, err) } tr.fromPartitionTime(t) return nil diff --git a/lib/uint64set/uint64set_test.go b/lib/uint64set/uint64set_test.go index 46021d514..7fcb0523e 100644 --- a/lib/uint64set/uint64set_test.go +++ b/lib/uint64set/uint64set_test.go @@ -211,7 +211,7 @@ func expectEqual(s *Set, m map[uint64]bool) error { s.ForEach(func(part []uint64) bool { for _, v := range part { if !m[v] { - err = fmt.Errorf("miising value %d in m inside s.ForEach; s=%v\nm=%v", v, s.AppendTo(nil), m) + err = fmt.Errorf("missing value %d in m inside s.ForEach; s=%v\nm=%v", v, s.AppendTo(nil), m) return false } }