all: consistently use %w instead of %s in when error is passed to fmt.Errorf()

This allows consistently using errors.Is() for verifying whether the given error wraps some other known error.
This commit is contained in:
Aliaksandr Valialkin 2023-10-25 21:24:01 +02:00
parent 305c96e384
commit 42dd71bb63
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
42 changed files with 83 additions and 83 deletions

View file

@ -120,10 +120,10 @@ func compressData(s string) string {
var bb bytes.Buffer
zw := gzip.NewWriter(&bb)
if _, err := zw.Write([]byte(s)); err != nil {
panic(fmt.Errorf("unexpected error when compressing data: %s", err))
panic(fmt.Errorf("unexpected error when compressing data: %w", err))
}
if err := zw.Close(); err != nil {
panic(fmt.Errorf("unexpected error when closing gzip writer: %s", err))
panic(fmt.Errorf("unexpected error when closing gzip writer: %w", err))
}
return bb.String()
}

View file

@ -43,7 +43,7 @@ func benchmarkReadBulkRequest(b *testing.B, isGzip bool) {
r.Reset(dataBytes)
_, err := readBulkRequest(r, isGzip, timeField, msgField, processLogMessage)
if err != nil {
panic(fmt.Errorf("unexpected error: %s", err))
panic(fmt.Errorf("unexpected error: %w", err))
}
}
})

View file

@ -29,7 +29,7 @@ func benchmarkParseJSONRequest(b *testing.B, streams, rows, labels int) {
for pb.Next() {
_, err := parseJSONRequest(data, func(timestamp int64, fields []logstorage.Field) {})
if err != nil {
panic(fmt.Errorf("unexpected error: %s", err))
panic(fmt.Errorf("unexpected error: %w", err))
}
}
})

View file

@ -84,7 +84,7 @@ func parseProtobufRequest(data []byte, processLogMessage func(timestamp int64, f
err = req.Unmarshal(bb.B)
if err != nil {
return 0, fmt.Errorf("cannot parse request body: %s", err)
return 0, fmt.Errorf("cannot parse request body: %w", err)
}
var commonFields []logstorage.Field
@ -97,7 +97,7 @@ func parseProtobufRequest(data []byte, processLogMessage func(timestamp int64, f
// Labels are same for all entries in the stream.
commonFields, err = parsePromLabels(commonFields[:0], stream.Labels)
if err != nil {
return rowsIngested, fmt.Errorf("cannot parse stream labels %q: %s", stream.Labels, err)
return rowsIngested, fmt.Errorf("cannot parse stream labels %q: %w", stream.Labels, err)
}
fields := commonFields

View file

@ -31,7 +31,7 @@ func benchmarkParseProtobufRequest(b *testing.B, streams, rows, labels int) {
for pb.Next() {
_, err := parseProtobufRequest(body, func(timestamp int64, fields []logstorage.Field) {})
if err != nil {
panic(fmt.Errorf("unexpected error: %s", err))
panic(fmt.Errorf("unexpected error: %w", err))
}
}
})

View file

@ -236,7 +236,7 @@ func ParseSilent(pathPatterns []string, validateTplFn ValidateTplFn, validateExp
files, err := readFromFS(pathPatterns)
if err != nil {
return nil, fmt.Errorf("failed to read from the config: %s", err)
return nil, fmt.Errorf("failed to read from the config: %w", err)
}
return parse(files, validateTplFn, validateExpressions)
}
@ -245,11 +245,11 @@ func ParseSilent(pathPatterns []string, validateTplFn ValidateTplFn, validateExp
func Parse(pathPatterns []string, validateTplFn ValidateTplFn, validateExpressions bool) ([]Group, error) {
files, err := readFromFS(pathPatterns)
if err != nil {
return nil, fmt.Errorf("failed to read from the config: %s", err)
return nil, fmt.Errorf("failed to read from the config: %w", err)
}
groups, err := parse(files, validateTplFn, validateExpressions)
if err != nil {
return nil, fmt.Errorf("failed to parse %s: %s", pathPatterns, err)
return nil, fmt.Errorf("failed to parse %s: %w", pathPatterns, err)
}
if len(groups) < 1 {
cLogger.Warnf("no groups found in %s", strings.Join(pathPatterns, ";"))

View file

@ -49,7 +49,7 @@ func (fs *FS) Read(files []string) (map[string][]byte, error) {
path, resp.StatusCode, http.StatusOK, data)
}
if err != nil {
return nil, fmt.Errorf("cannot read %q: %s", path, err)
return nil, fmt.Errorf("cannot read %q: %w", path, err)
}
result[path] = data
}

View file

@ -117,7 +117,7 @@ func Init(extraParams url.Values) (QuerierBuilder, error) {
}
_, err = authCfg.GetAuthHeader()
if err != nil {
return nil, fmt.Errorf("failed to set request auth header to datasource %q: %s", *addr, err)
return nil, fmt.Errorf("failed to set request auth header to datasource %q: %w", *addr, err)
}
return &VMStorage{

View file

@ -219,7 +219,7 @@ func (s *VMStorage) do(ctx context.Context, req *http.Request) (*http.Response,
func (s *VMStorage) newQueryRangeRequest(query string, start, end time.Time) (*http.Request, error) {
req, err := s.newRequest()
if err != nil {
return nil, fmt.Errorf("cannot create query_range request to datasource %q: %s", s.datasourceURL, err)
return nil, fmt.Errorf("cannot create query_range request to datasource %q: %w", s.datasourceURL, err)
}
s.setPrometheusRangeReqParams(req, query, start, end)
return req, nil
@ -228,7 +228,7 @@ func (s *VMStorage) newQueryRangeRequest(query string, start, end time.Time) (*h
func (s *VMStorage) newQueryRequest(query string, ts time.Time) (*http.Request, error) {
req, err := s.newRequest()
if err != nil {
return nil, fmt.Errorf("cannot create query request to datasource %q: %s", s.datasourceURL, err)
return nil, fmt.Errorf("cannot create query request to datasource %q: %w", s.datasourceURL, err)
}
switch s.dataSourceType {
case "", datasourcePrometheus:

View file

@ -119,7 +119,7 @@ func parsePrometheusResponse(req *http.Request, resp *http.Response) (res Result
case rtVector:
var pi promInstant
if err := json.Unmarshal(r.Data.Result, &pi.Result); err != nil {
return res, fmt.Errorf("umarshal err %s; \n %#v", err, string(r.Data.Result))
return res, fmt.Errorf("unmarshal err %w; \n %#v", err, string(r.Data.Result))
}
parseFn = pi.metrics
case rtMatrix:

View file

@ -87,7 +87,7 @@ func (cw *configWatcher) reload(path string) error {
func (cw *configWatcher) add(typeK TargetType, interval time.Duration, labelsFn getLabels) error {
targets, errors := targetsFromLabels(labelsFn, cw.cfg, cw.genFn)
for _, err := range errors {
return fmt.Errorf("failed to init notifier for %q: %s", typeK, err)
return fmt.Errorf("failed to init notifier for %q: %w", typeK, err)
}
cw.setTargets(typeK, targets)
@ -107,7 +107,7 @@ func (cw *configWatcher) add(typeK TargetType, interval time.Duration, labelsFn
}
updateTargets, errors := targetsFromLabels(labelsFn, cw.cfg, cw.genFn)
for _, err := range errors {
logger.Errorf("failed to init notifier for %q: %s", typeK, err)
logger.Errorf("failed to init notifier for %q: %w", typeK, err)
}
cw.setTargets(typeK, updateTargets)
}
@ -118,7 +118,7 @@ func (cw *configWatcher) add(typeK TargetType, interval time.Duration, labelsFn
func targetsFromLabels(labelsFn getLabels, cfg *Config, genFn AlertURLGenerator) ([]Target, []error) {
metaLabels, err := labelsFn()
if err != nil {
return nil, []error{fmt.Errorf("failed to get labels: %s", err)}
return nil, []error{fmt.Errorf("failed to get labels: %w", err)}
}
var targets []Target
var errors []error
@ -167,11 +167,11 @@ func (cw *configWatcher) start() error {
for _, target := range cfg.Targets {
address, labels, err := parseLabels(target, nil, cw.cfg)
if err != nil {
return fmt.Errorf("failed to parse labels for target %q: %s", target, err)
return fmt.Errorf("failed to parse labels for target %q: %w", target, err)
}
notifier, err := NewAlertManager(address, cw.genFn, httpCfg, cw.cfg.parsedAlertRelabelConfigs, cw.cfg.Timeout.Duration())
if err != nil {
return fmt.Errorf("failed to init alertmanager for addr %q: %s", address, err)
return fmt.Errorf("failed to init alertmanager for addr %q: %w", address, err)
}
targets = append(targets, Target{
Notifier: notifier,
@ -189,14 +189,14 @@ func (cw *configWatcher) start() error {
sdc := &cw.cfg.ConsulSDConfigs[i]
targetLabels, err := sdc.GetLabels(cw.cfg.baseDir)
if err != nil {
return nil, fmt.Errorf("got labels err: %s", err)
return nil, fmt.Errorf("got labels err: %w", err)
}
labels = append(labels, targetLabels...)
}
return labels, nil
})
if err != nil {
return fmt.Errorf("failed to start consulSD discovery: %s", err)
return fmt.Errorf("failed to start consulSD discovery: %w", err)
}
}
@ -207,14 +207,14 @@ func (cw *configWatcher) start() error {
sdc := &cw.cfg.DNSSDConfigs[i]
targetLabels, err := sdc.GetLabels(cw.cfg.baseDir)
if err != nil {
return nil, fmt.Errorf("got labels err: %s", err)
return nil, fmt.Errorf("got labels err: %w", err)
}
labels = append(labels, targetLabels...)
}
return labels, nil
})
if err != nil {
return fmt.Errorf("failed to start DNSSD discovery: %s", err)
return fmt.Errorf("failed to start DNSSD discovery: %w", err)
}
}
return nil

View file

@ -90,7 +90,7 @@ func Init(gen AlertURLGenerator, extLabels map[string]string, extURL string) (fu
externalLabels = extLabels
eu, err := url.Parse(externalURL)
if err != nil {
return nil, fmt.Errorf("failed to parse external URL: %s", err)
return nil, fmt.Errorf("failed to parse external URL: %w", err)
}
templates.UpdateWithFuncs(templates.FuncsWithExternalURL(eu))
@ -116,7 +116,7 @@ func Init(gen AlertURLGenerator, extLabels map[string]string, extURL string) (fu
if len(*addrs) > 0 {
notifiers, err := notifiersFromFlags(gen)
if err != nil {
return nil, fmt.Errorf("failed to create notifier from flag values: %s", err)
return nil, fmt.Errorf("failed to create notifier from flag values: %w", err)
}
staticNotifiersFn = func() []Notifier {
return notifiers
@ -126,7 +126,7 @@ func Init(gen AlertURLGenerator, extLabels map[string]string, extURL string) (fu
cw, err = newWatcher(*configPath, gen)
if err != nil {
return nil, fmt.Errorf("failed to init config watcher: %s", err)
return nil, fmt.Errorf("failed to init config watcher: %w", err)
}
return cw.notifiers, nil
}

View file

@ -36,11 +36,11 @@ func replay(groupsCfg []config.Group, qb datasource.QuerierBuilder, rw remotewri
}
tFrom, err := time.Parse(time.RFC3339, *replayFrom)
if err != nil {
return fmt.Errorf("failed to parse %q: %s", *replayFrom, err)
return fmt.Errorf("failed to parse %q: %w", *replayFrom, err)
}
tTo, err := time.Parse(time.RFC3339, *replayTo)
if err != nil {
return fmt.Errorf("failed to parse %q: %s", *replayTo, err)
return fmt.Errorf("failed to parse %q: %w", *replayTo, err)
}
if !tTo.After(tFrom) {
return fmt.Errorf("replay.timeTo must be bigger than replay.timeFrom")

View file

@ -269,7 +269,7 @@ func (ar *AlertingRule) toLabels(m datasource.Metric, qFn templates.QueryFn) (*l
Expr: ar.Expr,
})
if err != nil {
return nil, fmt.Errorf("failed to expand labels: %s", err)
return nil, fmt.Errorf("failed to expand labels: %w", err)
}
for k, v := range extraLabels {
ls.processed[k] = v
@ -310,7 +310,7 @@ func (ar *AlertingRule) execRange(ctx context.Context, start, end time.Time) ([]
for _, s := range res.Data {
a, err := ar.newAlert(s, nil, time.Time{}, qFn) // initial alert
if err != nil {
return nil, fmt.Errorf("failed to create alert: %s", err)
return nil, fmt.Errorf("failed to create alert: %w", err)
}
if ar.For == 0 { // if alert is instant
a.State = notifier.StateFiring
@ -388,7 +388,7 @@ func (ar *AlertingRule) exec(ctx context.Context, ts time.Time, limit int) ([]pr
for _, m := range res.Data {
ls, err := ar.toLabels(m, qFn)
if err != nil {
curState.Err = fmt.Errorf("failed to expand labels: %s", err)
curState.Err = fmt.Errorf("failed to expand labels: %w", err)
return nil, curState.Err
}
h := hash(ls.processed)
@ -513,7 +513,7 @@ func (ar *AlertingRule) newAlert(m datasource.Metric, ls *labelSet, start time.T
if ls == nil {
ls, err = ar.toLabels(m, qFn)
if err != nil {
return nil, fmt.Errorf("failed to expand labels: %s", err)
return nil, fmt.Errorf("failed to expand labels: %w", err)
}
}
a := &notifier.Alert{

View file

@ -166,7 +166,7 @@ func replayRule(r Rule, start, end time.Time, rw remotewrite.RWClient, replayRul
var n int
for _, ts := range tss {
if err := rw.Push(ts); err != nil {
return n, fmt.Errorf("remote write failure: %s", err)
return n, fmt.Errorf("remote write failure: %w", err)
}
n += len(ts.Samples)
}

View file

@ -147,11 +147,11 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
func (rh *requestHandler) getRule(r *http.Request) (apiRule, error) {
groupID, err := strconv.ParseUint(r.FormValue(paramGroupID), 10, 64)
if err != nil {
return apiRule{}, fmt.Errorf("failed to read %q param: %s", paramGroupID, err)
return apiRule{}, fmt.Errorf("failed to read %q param: %w", paramGroupID, err)
}
ruleID, err := strconv.ParseUint(r.FormValue(paramRuleID), 10, 64)
if err != nil {
return apiRule{}, fmt.Errorf("failed to read %q param: %s", paramRuleID, err)
return apiRule{}, fmt.Errorf("failed to read %q param: %w", paramRuleID, err)
}
obj, err := rh.m.ruleAPI(groupID, ruleID)
if err != nil {
@ -163,11 +163,11 @@ func (rh *requestHandler) getRule(r *http.Request) (apiRule, error) {
func (rh *requestHandler) getAlert(r *http.Request) (*apiAlert, error) {
groupID, err := strconv.ParseUint(r.FormValue(paramGroupID), 10, 64)
if err != nil {
return nil, fmt.Errorf("failed to read %q param: %s", paramGroupID, err)
return nil, fmt.Errorf("failed to read %q param: %w", paramGroupID, err)
}
alertID, err := strconv.ParseUint(r.FormValue(paramAlertID), 10, 64)
if err != nil {
return nil, fmt.Errorf("failed to read %q param: %s", paramAlertID, err)
return nil, fmt.Errorf("failed to read %q param: %w", paramAlertID, err)
}
a, err := rh.m.alertAPI(groupID, alertID)
if err != nil {

View file

@ -168,7 +168,7 @@ See the docs at https://docs.victoriametrics.com/vmbackup.html .
func newSrcFS() (*fslocal.FS, error) {
if err := snapshot.Validate(*snapshotName); err != nil {
return nil, fmt.Errorf("invalid -snapshotName=%q: %s", *snapshotName, err)
return nil, fmt.Errorf("invalid -snapshotName=%q: %w", *snapshotName, err)
}
snapshotPath := filepath.Join(*storageDataPath, "snapshots", *snapshotName)

View file

@ -81,7 +81,7 @@ var funcs = func() map[string]*funcInfo {
var m map[string]*funcInfo
if err := json.Unmarshal(funcsJSON, &m); err != nil {
// Do not use logger.Panicf, since it isn't ready yet.
panic(fmt.Errorf("cannot parse funcsJSON: %s", err))
panic(fmt.Errorf("cannot parse funcsJSON: %w", err))
}
return m
}()

View file

@ -197,7 +197,7 @@ func ExportCSVHandler(startTime time.Time, w http.ResponseWriter, r *http.Reques
return err
}
if err := b.UnmarshalData(); err != nil {
return fmt.Errorf("cannot unmarshal block during export: %s", err)
return fmt.Errorf("cannot unmarshal block during export: %w", err)
}
xb := exportBlockPool.Get().(*exportBlock)
xb.mn = mn
@ -407,7 +407,7 @@ func exportHandler(qt *querytracer.Tracer, w http.ResponseWriter, cp *commonPara
return err
}
if err := b.UnmarshalData(); err != nil {
return fmt.Errorf("cannot unmarshal block during export: %s", err)
return fmt.Errorf("cannot unmarshal block during export: %w", err)
}
xb := exportBlockPool.Get().(*exportBlock)
xb.mn = mn

View file

@ -418,7 +418,7 @@ func ReadFileOrHTTP(path string) ([]byte, error) {
return nil, fmt.Errorf("unexpected status code when fetching %q: %d, expecting %d; response: %q", path, resp.StatusCode, http.StatusOK, data)
}
if err != nil {
return nil, fmt.Errorf("cannot read %q: %s", path, err)
return nil, fmt.Errorf("cannot read %q: %w", path, err)
}
return data, nil
}

View file

@ -204,7 +204,7 @@ func gzipHandler(s *server, rh RequestHandler) http.HandlerFunc {
var gzipHandlerWrapper = func() func(http.Handler) http.HandlerFunc {
hw, err := gzhttp.NewWrapper(gzhttp.CompressionLevel(1))
if err != nil {
panic(fmt.Errorf("BUG: cannot initialize gzip http wrapper: %s", err))
panic(fmt.Errorf("BUG: cannot initialize gzip http wrapper: %w", err))
}
return hw
}()

View file

@ -899,7 +899,7 @@ func unmarshalCommonPrefix(dstTenantID *TenantID, src []byte) ([]byte, byte, err
src = src[1:]
tail, err := dstTenantID.unmarshal(src)
if err != nil {
return nil, 0, fmt.Errorf("cannot unmarshal tenantID: %s", err)
return nil, 0, fmt.Errorf("cannot unmarshal tenantID: %w", err)
}
return tail, prefix, nil
}

View file

@ -310,7 +310,7 @@ func TestStorageRunQuery(t *testing.T) {
func mustParseQuery(query string) *Query {
q, err := ParseQuery(query)
if err != nil {
panic(fmt.Errorf("BUG: cannot parse %s: %s", query, err))
panic(fmt.Errorf("BUG: cannot parse %s: %w", query, err))
}
return q
}
@ -657,7 +657,7 @@ func TestStorageSearch(t *testing.T) {
func mustNewStreamFilter(s string) *StreamFilter {
sf, err := newStreamFilter(s)
if err != nil {
panic(fmt.Errorf("unexpected error in newStreamFilter(%q): %s", s, err))
panic(fmt.Errorf("unexpected error in newStreamFilter(%q): %w", s, err))
}
return sf
}

View file

@ -19,7 +19,7 @@ func newProxyProtocolConn(c net.Conn) (net.Conn, error) {
// Limit the time needed for reading the proxy protocol header.
d := time.Now().Add(5 * time.Second)
if err := c.SetReadDeadline(d); err != nil {
return nil, fmt.Errorf("cannot set deadline for reading proxy protocol header: %s", err)
return nil, fmt.Errorf("cannot set deadline for reading proxy protocol header: %w", err)
}
remoteAddr, err := readProxyProto(c)
@ -32,7 +32,7 @@ func newProxyProtocolConn(c net.Conn) (net.Conn, error) {
// Reset the read deadline.
if err := c.SetReadDeadline(time.Time{}); err != nil {
return nil, fmt.Errorf("cannot reset deadline after reading proxy protocol header: %s", err)
return nil, fmt.Errorf("cannot reset deadline after reading proxy protocol header: %w", err)
}
return &proxyProtocolConn{

View file

@ -599,7 +599,7 @@ func (actx *authContext) initFromAuthorization(baseDir string, az *Authorization
actx.getAuthHeader = func() (string, error) {
token, err := readPasswordFromFile(filePath)
if err != nil {
return "", fmt.Errorf("cannot read credentials from `credentials_file`=%q: %s", az.CredentialsFile, err)
return "", fmt.Errorf("cannot read credentials from `credentials_file`=%q: %w", az.CredentialsFile, err)
}
return azType + " " + token, nil
}
@ -628,7 +628,7 @@ func (actx *authContext) initFromBasicAuthConfig(baseDir string, ba *BasicAuthCo
actx.getAuthHeader = func() (string, error) {
password, err := readPasswordFromFile(filePath)
if err != nil {
return "", fmt.Errorf("cannot read password from `password_file`=%q set in `basic_auth` section: %s", ba.PasswordFile, err)
return "", fmt.Errorf("cannot read password from `password_file`=%q set in `basic_auth` section: %w", ba.PasswordFile, err)
}
// See https://en.wikipedia.org/wiki/Basic_access_authentication
token := ba.Username + ":" + password
@ -644,7 +644,7 @@ func (actx *authContext) initFromBearerTokenFile(baseDir string, bearerTokenFile
actx.getAuthHeader = func() (string, error) {
token, err := readPasswordFromFile(filePath)
if err != nil {
return "", fmt.Errorf("cannot read bearer token from `bearer_token_file`=%q: %s", bearerTokenFile, err)
return "", fmt.Errorf("cannot read bearer token from `bearer_token_file`=%q: %w", bearerTokenFile, err)
}
return "Bearer " + token, nil
}
@ -672,11 +672,11 @@ func (actx *authContext) initFromOAuth2Config(baseDir string, o *OAuth2Config) e
}
ts, err := oi.getTokenSource()
if err != nil {
return "", fmt.Errorf("cannot get OAuth2 tokenSource: %s", err)
return "", fmt.Errorf("cannot get OAuth2 tokenSource: %w", err)
}
t, err := ts.Token()
if err != nil {
return "", fmt.Errorf("cannot get OAuth2 token: %s", err)
return "", fmt.Errorf("cannot get OAuth2 token: %w", err)
}
return t.Type() + " " + t.AccessToken, nil
}

View file

@ -204,7 +204,7 @@ var (
defaultPromRegex = func() *regexutil.PromRegex {
pr, err := regexutil.NewPromRegex(".*")
if err != nil {
panic(fmt.Errorf("BUG: unexpected error: %s", err))
panic(fmt.Errorf("BUG: unexpected error: %w", err))
}
return pr
}()

View file

@ -28,13 +28,13 @@ func writeRelabelDebug(w io.Writer, isTargetRelabel bool, targetID, metric, rela
}
labels, err := promutils.NewLabelsFromString(metric)
if err != nil {
err = fmt.Errorf("cannot parse metric: %s", err)
err = fmt.Errorf("cannot parse metric: %w", err)
WriteRelabelDebugSteps(w, targetURL, targetID, format, nil, metric, relabelConfigs, err)
return
}
pcs, err := ParseRelabelConfigsData([]byte(relabelConfigs))
if err != nil {
err = fmt.Errorf("cannot parse relabel configs: %s", err)
err = fmt.Errorf("cannot parse relabel configs: %w", err)
WriteRelabelDebugSteps(w, targetURL, targetID, format, nil, metric, relabelConfigs, err)
return
}

View file

@ -927,7 +927,7 @@ func newTestRegexRelabelConfig(pattern string) *parsedRelabelConfig {
}
prc, err := parseRelabelConfig(rc)
if err != nil {
panic(fmt.Errorf("unexpected error in parseRelabelConfig: %s", err))
panic(fmt.Errorf("unexpected error in parseRelabelConfig: %w", err))
}
return prc
}

View file

@ -119,7 +119,7 @@ func newClient(ctx context.Context, sw *ScrapeWork) (*client, error) {
dialAddr = addMissingPort(dialAddr, isTLS)
dialFunc, err := newStatDialFunc(proxyURL, sw.ProxyAuthConfig)
if err != nil {
return nil, fmt.Errorf("cannot create dial func: %s", err)
return nil, fmt.Errorf("cannot create dial func: %w", err)
}
hc := &fasthttp.HostClient{
Addr: dialAddr,
@ -199,12 +199,12 @@ func (c *client) GetStreamReader() (*streamReader, error) {
err = c.setHeaders(req)
if err != nil {
cancel()
return nil, fmt.Errorf("failed to create request to %q: %s", c.scrapeURL, err)
return nil, fmt.Errorf("failed to create request to %q: %w", c.scrapeURL, err)
}
err = c.setProxyHeaders(req)
if err != nil {
cancel()
return nil, fmt.Errorf("failed to create request to %q: %s", c.scrapeURL, err)
return nil, fmt.Errorf("failed to create request to %q: %w", c.scrapeURL, err)
}
scrapeRequests.Inc()
resp, err := c.sc.Do(req)
@ -254,11 +254,11 @@ func (c *client) ReadData(dst []byte) ([]byte, error) {
req.Header.Set("X-Prometheus-Scrape-Timeout-Seconds", c.scrapeTimeoutSecondsStr)
err := c.setFasthttpHeaders(req)
if err != nil {
return nil, fmt.Errorf("failed to create request to %q: %s", c.scrapeURL, err)
return nil, fmt.Errorf("failed to create request to %q: %w", c.scrapeURL, err)
}
err = c.setFasthttpProxyHeaders(req)
if err != nil {
return nil, fmt.Errorf("failed to create request to %q: %s", c.scrapeURL, err)
return nil, fmt.Errorf("failed to create request to %q: %w", c.scrapeURL, err)
}
if !*disableCompression && !c.disableCompression {
req.Header.Set("Accept-Encoding", "gzip")

View file

@ -110,7 +110,7 @@ func (r *listDropletResponse) nextURLPath() (string, error) {
}
u, err := url.Parse(r.Links.Pages.Next)
if err != nil {
return "", fmt.Errorf("cannot parse digital ocean next url: %s, err: %s", r.Links.Pages.Next, err)
return "", fmt.Errorf("cannot parse digital ocean next url: %s: %w", r.Links.Pages.Next, err)
}
return u.RequestURI(), nil
}

View file

@ -85,7 +85,7 @@ func getHTTPTargets(cfg *apiConfig) ([]httpGroupTarget, error) {
func parseAPIResponse(data []byte, path string) ([]httpGroupTarget, error) {
var r []httpGroupTarget
if err := json.Unmarshal(data, &r); err != nil {
return nil, fmt.Errorf("cannot parse http_sd api response path: %s, err: %w", path, err)
return nil, fmt.Errorf("cannot parse http_sd api response path=%q: %w", path, err)
}
return r, nil
}

View file

@ -10,7 +10,7 @@ func BenchmarkPodGetTargetLabels(b *testing.B) {
r := bytes.NewBufferString(testPodsList)
objectsByKey, _, err := parsePodList(r)
if err != nil {
panic(fmt.Errorf("BUG: unexpected error: %s", err))
panic(fmt.Errorf("BUG: unexpected error: %w", err))
}
var o object
for _, srcObject := range objectsByKey {

View file

@ -579,7 +579,7 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
samplesDropped := 0
sr, err := sw.GetStreamReader()
if err != nil {
err = fmt.Errorf("cannot read data: %s", err)
err = fmt.Errorf("cannot read data: %w", err)
} else {
var mu sync.Mutex
err = sbr.Init(sr)
@ -827,7 +827,7 @@ func (sw *scrapeWork) sendStaleSeries(lastScrape, currScrape string, timestamp i
return nil
}, sw.logError)
if err != nil {
sw.logError(fmt.Errorf("cannot send stale markers: %s", err).Error())
sw.logError(fmt.Errorf("cannot send stale markers: %w", err).Error())
}
}
if addAutoSeries {

View file

@ -64,7 +64,7 @@ func Parse(r io.Reader, contentEncoding string, callback func(series []datadog.S
defer putRequest(req)
if err := req.Unmarshal(ctx.reqBuf.B); err != nil {
unmarshalErrors.Inc()
return fmt.Errorf("cannot unmarshal DataDog POST request with size %d bytes: %s", len(ctx.reqBuf.B), err)
return fmt.Errorf("cannot unmarshal DataDog POST request with size %d bytes: %w", len(ctx.reqBuf.B), err)
}
rows := 0
series := req.Series

View file

@ -58,7 +58,7 @@ func BenchmarkRowsUnmarshal(b *testing.B) {
var r Rows
for pb.Next() {
if err := r.Unmarshal(reqBody); err != nil {
panic(fmt.Errorf("unmarshal error: %s", err))
panic(fmt.Errorf("unmarshal error: %w", err))
}
if len(r.Rows) != 1 {
panic(fmt.Errorf("unexpected number of items unmarshaled; got %d; want %d", len(r.Rows), 1))

View file

@ -132,10 +132,10 @@ func checkParseStream(data []byte, checkSeries func(tss []prompbmarshal.TimeSeri
var bb bytes.Buffer
zw := gzip.NewWriter(&bb)
if _, err := zw.Write(data); err != nil {
return fmt.Errorf("cannot compress data: %s", err)
return fmt.Errorf("cannot compress data: %w", err)
}
if err := zw.Close(); err != nil {
return fmt.Errorf("cannot close gzip writer: %s", err)
return fmt.Errorf("cannot close gzip writer: %w", err)
}
if err := ParseStream(&bb, true, checkSeries); err != nil {
return fmt.Errorf("error when parsing compressed data: %w", err)

View file

@ -173,7 +173,7 @@ func (u *URL) NewDialFunc(ac *promauth.Config) (fasthttp.DialFunc, error) {
}
authHeader, err := u.getAuthHeader(ac)
if err != nil {
return nil, fmt.Errorf("cannot get auth header: %s", err)
return nil, fmt.Errorf("cannot get auth header: %w", err)
}
if authHeader != "" {
authHeader = "Proxy-Authorization: " + authHeader + "\r\n"

View file

@ -141,7 +141,7 @@ func (t *Tracer) AddJSON(jsonTrace []byte) error {
}
var s *span
if err := json.Unmarshal(jsonTrace, &s); err != nil {
return fmt.Errorf("cannot unmarshal json trace: %s", err)
return fmt.Errorf("cannot unmarshal json trace: %w", err)
}
child := &Tracer{
span: s,

View file

@ -87,7 +87,7 @@ func BenchmarkPromRegexMatchString(b *testing.B) {
func benchmarkPromRegexMatchString(b *testing.B, expr, s string, resultExpected bool) {
pr, err := NewPromRegex(expr)
if err != nil {
panic(fmt.Errorf("unexpected error: %s", err))
panic(fmt.Errorf("unexpected error: %w", err))
}
re := regexp.MustCompile("^(?:" + expr + ")$")
f := func(b *testing.B, matchString func(s string) bool) {

View file

@ -112,7 +112,7 @@ func getTimestampsForPrecisionBits(timestamps []int64, precisionBits uint8) []in
data, marshalType, firstTimestamp := encoding.MarshalTimestamps(nil, timestamps, precisionBits)
timestampsAdjusted, err := encoding.UnmarshalTimestamps(nil, data, marshalType, firstTimestamp, len(timestamps))
if err != nil {
panic(fmt.Errorf("BUG: cannot unmarshal timestamps with precisionBits %d: %s", precisionBits, err))
panic(fmt.Errorf("BUG: cannot unmarshal timestamps with precisionBits %d: %w", precisionBits, err))
}
minTimestamp := timestamps[0]
maxTimestamp := timestamps[len(timestamps)-1]
@ -124,7 +124,7 @@ func getValuesForPrecisionBits(values []int64, precisionBits uint8) []int64 {
data, marshalType, firstValue := encoding.MarshalValues(nil, values, precisionBits)
valuesAdjusted, err := encoding.UnmarshalValues(nil, data, marshalType, firstValue, len(values))
if err != nil {
panic(fmt.Errorf("BUG: cannot unmarshal values with precisionBits %d: %s", precisionBits, err))
panic(fmt.Errorf("BUG: cannot unmarshal values with precisionBits %d: %w", precisionBits, err))
}
return valuesAdjusted
}

View file

@ -747,7 +747,7 @@ func (is *indexSearch) getLabelNamesForMetricIDs(qt *querytracer.Tracer, metricI
continue
}
if err := mn.Unmarshal(buf); err != nil {
logger.Panicf("FATAL: cannot unmarshal metricName %q: %w", buf, err)
logger.Panicf("FATAL: cannot unmarshal metricName %q: %s", buf, err)
}
for _, tag := range mn.Tags {
if _, ok := lns[string(tag.Key)]; !ok {
@ -1610,7 +1610,7 @@ func (db *indexDB) searchMetricIDs(qt *querytracer.Tracer, tfss []*TagFilters, t
localMetricIDs, err := is.searchMetricIDs(qtChild, tfss, tr, maxMetrics)
db.putIndexSearch(is)
if err != nil {
return nil, fmt.Errorf("error when searching for metricIDs in the current indexdb: %s", err)
return nil, fmt.Errorf("error when searching for metricIDs in the current indexdb: %w", err)
}
qtChild.Done()
@ -1635,7 +1635,7 @@ func (db *indexDB) searchMetricIDs(qt *querytracer.Tracer, tfss []*TagFilters, t
extDB.putMetricIDsToTagFiltersCache(qtChild, extMetricIDs, tfKeyExtBuf.B)
})
if err != nil {
return nil, fmt.Errorf("error when searching for metricIDs in the previous indexdb: %s", err)
return nil, fmt.Errorf("error when searching for metricIDs in the previous indexdb: %w", err)
}
// Merge localMetricIDs with extMetricIDs.
@ -1834,7 +1834,7 @@ func (is *indexSearch) searchMetricName(dst []byte, metricID uint64) ([]byte, bo
if err == io.EOF {
return dst, false
}
logger.Panicf("FATAL: error when searching metricName by metricID; searchPrefix %q: %w", kb.B, err)
logger.Panicf("FATAL: error when searching metricName by metricID; searchPrefix %q: %s", kb.B, err)
}
v := ts.Item[len(kb.B):]
dst = append(dst, v...)

View file

@ -50,7 +50,7 @@ func benchmarkPartSearch(b *testing.B, p *part, tsids []TSID, tr TimeRange, spar
blocksRead++
}
if err := ps.Error(); err != nil {
panic(fmt.Errorf("BUG: unexpected error: %s", err))
panic(fmt.Errorf("BUG: unexpected error: %w", err))
}
blocksWant := len(tsids) / sparseness
if blocksRead != blocksWant {