all: use %w instead of %s for wrapping errors in fmt.Errorf

This will simplify examining the returned errors such as httpserver.ErrorWithStatusCode .
See https://blog.golang.org/go1.13-errors for details.
This commit is contained in:
Aliaksandr Valialkin 2020-06-30 22:58:18 +03:00
parent 5a43842bd3
commit d962568e93
156 changed files with 994 additions and 994 deletions

View file

@ -162,7 +162,7 @@ func getTLSConfig(argIdx int) (*tls.Config, error) {
}
cfg, err := promauth.NewConfig(".", nil, "", "", tlsConfig)
if err != nil {
return nil, fmt.Errorf("cannot populate TLS config: %s", err)
return nil, fmt.Errorf("cannot populate TLS config: %w", err)
}
tlsCfg := cfg.NewTLSConfig()
return tlsCfg, nil

View file

@ -33,7 +33,7 @@ func loadRelabelConfigs() (*relabelConfigs, error) {
if *relabelConfigPathGlobal != "" {
global, err := promrelabel.LoadRelabelConfigs(*relabelConfigPathGlobal)
if err != nil {
return nil, fmt.Errorf("cannot load -remoteWrite.relabelConfig=%q: %s", *relabelConfigPathGlobal, err)
return nil, fmt.Errorf("cannot load -remoteWrite.relabelConfig=%q: %w", *relabelConfigPathGlobal, err)
}
rcs.global = global
}
@ -45,7 +45,7 @@ func loadRelabelConfigs() (*relabelConfigs, error) {
for i, path := range *relabelConfigPaths {
prc, err := promrelabel.LoadRelabelConfigs(path)
if err != nil {
return nil, fmt.Errorf("cannot load relabel configs from -remoteWrite.urlRelabelConfig=%q: %s", path, err)
return nil, fmt.Errorf("cannot load relabel configs from -remoteWrite.urlRelabelConfig=%q: %w", path, err)
}
rcs.perURL[i] = prc
}

View file

@ -72,7 +72,7 @@ func (ar *AlertingRule) Exec(ctx context.Context, q datasource.Querier, series b
ar.lastExecError = err
ar.lastExecTime = time.Now()
if err != nil {
return nil, fmt.Errorf("failed to execute query %q: %s", ar.Expr, err)
return nil, fmt.Errorf("failed to execute query %q: %w", ar.Expr, err)
}
for h, a := range ar.alerts {
@ -103,7 +103,7 @@ func (ar *AlertingRule) Exec(ctx context.Context, q datasource.Querier, series b
a, err := ar.newAlert(m, ar.lastExecTime)
if err != nil {
ar.lastExecError = err
return nil, fmt.Errorf("failed to create alert: %s", err)
return nil, fmt.Errorf("failed to create alert: %w", err)
}
a.ID = h
a.State = notifier.StatePending
@ -363,7 +363,7 @@ func (ar *AlertingRule) Restore(ctx context.Context, q datasource.Querier, lookb
a, err := ar.newAlert(m, time.Unix(int64(m.Value), 0))
if err != nil {
return fmt.Errorf("failed to create alert: %s", err)
return fmt.Errorf("failed to create alert: %w", err)
}
a.ID = hash(m)
a.State = notifier.StatePending

View file

@ -46,19 +46,19 @@ func (g *Group) Validate(validateAnnotations, validateExpressions bool) error {
}
uniqueRules[r.ID] = struct{}{}
if err := r.Validate(); err != nil {
return fmt.Errorf("invalid rule %q.%q: %s", g.Name, ruleName, err)
return fmt.Errorf("invalid rule %q.%q: %w", g.Name, ruleName, err)
}
if validateExpressions {
if _, err := metricsql.Parse(r.Expr); err != nil {
return fmt.Errorf("invalid expression for rule %q.%q: %s", g.Name, ruleName, err)
return fmt.Errorf("invalid expression for rule %q.%q: %w", g.Name, ruleName, err)
}
}
if validateAnnotations {
if err := notifier.ValidateTemplates(r.Annotations); err != nil {
return fmt.Errorf("invalid annotations for rule %q.%q: %s", g.Name, ruleName, err)
return fmt.Errorf("invalid annotations for rule %q.%q: %w", g.Name, ruleName, err)
}
if err := notifier.ValidateTemplates(r.Labels); err != nil {
return fmt.Errorf("invalid labels for rule %q.%q: %s", g.Name, ruleName, err)
return fmt.Errorf("invalid labels for rule %q.%q: %w", g.Name, ruleName, err)
}
}
}
@ -137,7 +137,7 @@ func Parse(pathPatterns []string, validateAnnotations, validateExpressions bool)
for _, pattern := range pathPatterns {
matches, err := filepath.Glob(pattern)
if err != nil {
return nil, fmt.Errorf("error reading file pattern %s: %v", pattern, err)
return nil, fmt.Errorf("error reading file pattern %s: %w", pattern, err)
}
fp = append(fp, matches...)
}
@ -150,7 +150,7 @@ func Parse(pathPatterns []string, validateAnnotations, validateExpressions bool)
}
for _, g := range gr {
if err := g.Validate(validateAnnotations, validateExpressions); err != nil {
return nil, fmt.Errorf("invalid group %q in file %q: %s", g.Name, file, err)
return nil, fmt.Errorf("invalid group %q in file %q: %w", g.Name, file, err)
}
if _, ok := uniqueGroups[g.Name]; ok {
return nil, fmt.Errorf("group name %q duplicate in file %q", g.Name, file)

View file

@ -31,7 +31,7 @@ func Init() (Querier, error) {
}
tr, err := utils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
if err != nil {
return nil, fmt.Errorf("failed to create transport: %s", err)
return nil, fmt.Errorf("failed to create transport: %w", err)
}
c := &http.Client{Transport: tr}
return NewVMStorage(*addr, *basicAuthUsername, *basicAuthPassword, c), nil

View file

@ -32,7 +32,7 @@ func (r response) metrics() ([]Metric, error) {
for i, res := range r.Data.Result {
f, err = strconv.ParseFloat(res.TV[1].(string), 64)
if err != nil {
return nil, fmt.Errorf("metric %v, unable to parse float64 from %s: %s", res, res.TV[1], err)
return nil, fmt.Errorf("metric %v, unable to parse float64 from %s: %w", res, res.TV[1], err)
}
m.Labels = nil
for k, v := range r.Data.Result[i].Labels {
@ -80,25 +80,25 @@ func (s *VMStorage) Query(ctx context.Context, query string) ([]Metric, error) {
}
resp, err := s.c.Do(req.WithContext(ctx))
if err != nil {
return nil, fmt.Errorf("error getting response from %s:%s", req.URL, err)
return nil, fmt.Errorf("error getting response from %s: %w", req.URL, err)
}
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode != http.StatusOK {
body, _ := ioutil.ReadAll(resp.Body)
return nil, fmt.Errorf("datasource returns unxeprected response code %d for %s with err %s. Reponse body %s", resp.StatusCode, req.URL, err, body)
return nil, fmt.Errorf("datasource returns unxeprected response code %d for %s with err %w; reponse body: %s", resp.StatusCode, req.URL, err, body)
}
r := &response{}
if err := json.NewDecoder(resp.Body).Decode(r); err != nil {
return nil, fmt.Errorf("error parsing metrics for %s:%s", req.URL, err)
return nil, fmt.Errorf("error parsing metrics for %s: %w", req.URL, err)
}
if r.Status == statusError {
return nil, fmt.Errorf("response error, query: %s, errorType: %s, error: %s", req.URL, r.ErrorType, r.Error)
}
if r.Status != statusSuccess {
return nil, fmt.Errorf("unkown status:%s, Expected success or error ", r.Status)
return nil, fmt.Errorf("unknown status: %s, Expected success or error ", r.Status)
}
if r.Data.ResultType != rtVector {
return nil, fmt.Errorf("unkown restul type:%s. Expected vector", r.Data.ResultType)
return nil, fmt.Errorf("unknown restul type:%s. Expected vector", r.Data.ResultType)
}
return r.metrics()
}

View file

@ -84,7 +84,7 @@ func (g *Group) Restore(ctx context.Context, q datasource.Querier, lookback time
continue
}
if err := rr.Restore(ctx, q, lookback); err != nil {
return fmt.Errorf("error while restoring rule %q: %s", rule, err)
return fmt.Errorf("error while restoring rule %q: %w", rule, err)
}
}
return nil
@ -251,7 +251,7 @@ func (e *executor) exec(ctx context.Context, rule Rule, returnSeries bool, inter
tss, err := rule.Exec(ctx, e.querier, returnSeries)
if err != nil {
execErrors.Inc()
return fmt.Errorf("rule %q: failed to execute: %s", rule, err)
return fmt.Errorf("rule %q: failed to execute: %w", rule, err)
}
if len(tss) > 0 && e.rw != nil {
@ -259,7 +259,7 @@ func (e *executor) exec(ctx context.Context, rule Rule, returnSeries bool, inter
for _, ts := range tss {
if err := e.rw.Push(ts); err != nil {
remoteWriteErrors.Inc()
return fmt.Errorf("rule %q: remote write failure: %s", rule, err)
return fmt.Errorf("rule %q: remote write failure: %w", rule, err)
}
}
}
@ -293,7 +293,7 @@ func (e *executor) exec(ctx context.Context, rule Rule, returnSeries bool, inter
for _, nt := range e.notifiers {
if err := nt.Send(ctx, alerts); err != nil {
alertsSendErrors.Inc()
errGr.Add(fmt.Errorf("rule %q: failed to send alerts: %s", rule, err))
errGr.Add(fmt.Errorf("rule %q: failed to send alerts: %w", rule, err))
}
}
return errGr.Err()

View file

@ -105,20 +105,20 @@ var (
func newManager(ctx context.Context) (*manager, error) {
q, err := datasource.Init()
if err != nil {
return nil, fmt.Errorf("failed to init datasource: %s", err)
return nil, fmt.Errorf("failed to init datasource: %w", err)
}
eu, err := getExternalURL(*externalURL, *httpListenAddr, false)
if err != nil {
return nil, fmt.Errorf("failed to init `external.url`: %s", err)
return nil, fmt.Errorf("failed to init `external.url`: %w", err)
}
notifier.InitTemplateFunc(eu)
aug, err := getAlertURLGenerator(eu, *externalAlertSource, *validateTemplates)
if err != nil {
return nil, fmt.Errorf("failed to init `external.alert.source`: %s", err)
return nil, fmt.Errorf("failed to init `external.alert.source`: %w", err)
}
nts, err := notifier.Init(aug)
if err != nil {
return nil, fmt.Errorf("failed to init notifier: %s", err)
return nil, fmt.Errorf("failed to init notifier: %w", err)
}
manager := &manager{
@ -128,13 +128,13 @@ func newManager(ctx context.Context) (*manager, error) {
}
rw, err := remotewrite.Init(ctx)
if err != nil {
return nil, fmt.Errorf("failed to init remoteWrite: %s", err)
return nil, fmt.Errorf("failed to init remoteWrite: %w", err)
}
manager.rw = rw
rr, err := remoteread.Init()
if err != nil {
return nil, fmt.Errorf("failed to init remoteRead: %s", err)
return nil, fmt.Errorf("failed to init remoteRead: %w", err)
}
manager.rr = rr
return manager, nil
@ -169,7 +169,7 @@ func getAlertURLGenerator(externalURL *url.URL, externalAlertSource string, vali
if err := notifier.ValidateTemplates(map[string]string{
"tpl": externalAlertSource,
}); err != nil {
return nil, fmt.Errorf("error validating source template %s:%w", externalAlertSource, err)
return nil, fmt.Errorf("error validating source template %s: %w", externalAlertSource, err)
}
}
m := map[string]string{

View file

@ -83,7 +83,7 @@ func (m *manager) update(ctx context.Context, path []string, validateTpl, valida
logger.Infof("reading rules configuration file from %q", strings.Join(path, ";"))
groupsCfg, err := config.Parse(path, validateTpl, validateExpr)
if err != nil {
return fmt.Errorf("cannot parse configuration file: %s", err)
return fmt.Errorf("cannot parse configuration file: %w", err)
}
groupsRegistry := make(map[uint64]*Group)

View file

@ -89,7 +89,7 @@ func templateAnnotations(annotations map[string]string, header string, data aler
builder.WriteString(header)
builder.WriteString(text)
if err := templateAnnotation(&buf, builder.String(), data); err != nil {
eg.Add(fmt.Errorf("key %q, template %q: %s", key, text, err))
eg.Add(fmt.Errorf("key %q, template %q: %w", key, text, err))
continue
}
r[key] = buf.String()

View file

@ -43,7 +43,7 @@ func (am *AlertManager) Send(ctx context.Context, alerts []Alert) error {
if resp.StatusCode != http.StatusOK {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("failed to read response from %q: %s", am.alertURL, err)
return fmt.Errorf("failed to read response from %q: %w", am.alertURL, err)
}
return fmt.Errorf("invalid SC %d from %q; response body: %s", resp.StatusCode, am.alertURL, string(body))
}

View file

@ -36,7 +36,7 @@ func Init(gen AlertURLGenerator) ([]Notifier, error) {
ca, serverName := tlsCAFile.GetOptionalArg(i), tlsServerName.GetOptionalArg(i)
tr, err := utils.Transport(addr, cert, key, ca, serverName, *tlsInsecureSkipVerify)
if err != nil {
return nil, fmt.Errorf("failed to create transport: %s", err)
return nil, fmt.Errorf("failed to create transport: %w", err)
}
user, pass := basicAuthUsername.GetOptionalArg(i), basicAuthPassword.GetOptionalArg(i)
am := NewAlertManager(addr, user, pass, gen, &http.Client{Transport: tr})

View file

@ -71,7 +71,7 @@ func (rr *RecordingRule) Exec(ctx context.Context, q datasource.Querier, series
rr.lastExecTime = time.Now()
rr.lastExecError = err
if err != nil {
return nil, fmt.Errorf("failed to execute query %q: %s", rr.Expr, err)
return nil, fmt.Errorf("failed to execute query %q: %w", rr.Expr, err)
}
duplicates := make(map[uint64]prompbmarshal.TimeSeries, len(qMetrics))

View file

@ -32,7 +32,7 @@ func Init() (datasource.Querier, error) {
}
tr, err := utils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
if err != nil {
return nil, fmt.Errorf("failed to create transport: %s", err)
return nil, fmt.Errorf("failed to create transport: %w", err)
}
c := &http.Client{Transport: tr}
return datasource.NewVMStorage(*addr, *basicAuthUsername, *basicAuthPassword, c), nil

View file

@ -38,7 +38,7 @@ func Init(ctx context.Context) (*Client, error) {
t, err := utils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
if err != nil {
return nil, fmt.Errorf("failed to create transport: %s", err)
return nil, fmt.Errorf("failed to create transport: %w", err)
}
return NewClient(ctx, Config{

View file

@ -30,7 +30,7 @@ func TLSConfig(certFile, keyFile, CAFile, serverName string, insecureSkipVerify
if certFile != "" {
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return nil, fmt.Errorf("cannot load TLS certificate from `cert_file`=%q, `key_file`=%q: %s", certFile, keyFile, err)
return nil, fmt.Errorf("cannot load TLS certificate from `cert_file`=%q, `key_file`=%q: %w", certFile, keyFile, err)
}
certs = []tls.Certificate{cert}
@ -40,7 +40,7 @@ func TLSConfig(certFile, keyFile, CAFile, serverName string, insecureSkipVerify
if CAFile != "" {
pem, err := ioutil.ReadFile(CAFile)
if err != nil {
return nil, fmt.Errorf("cannot read `ca_file` %q: %s", CAFile, err)
return nil, fmt.Errorf("cannot read `ca_file` %q: %w", CAFile, err)
}
rootCAs = x509.NewCertPool()

View file

@ -80,7 +80,7 @@ func (rh *requestHandler) listGroups() ([]byte, error) {
b, err := json.Marshal(lr)
if err != nil {
return nil, &httpserver.ErrorWithStatusCode{
Err: fmt.Errorf(`error encoding list of active alerts: %s`, err),
Err: fmt.Errorf(`error encoding list of active alerts: %w`, err),
StatusCode: http.StatusInternalServerError,
}
}
@ -117,7 +117,7 @@ func (rh *requestHandler) listAlerts() ([]byte, error) {
b, err := json.Marshal(lr)
if err != nil {
return nil, &httpserver.ErrorWithStatusCode{
Err: fmt.Errorf(`error encoding list of active alerts: %s`, err),
Err: fmt.Errorf(`error encoding list of active alerts: %w`, err),
StatusCode: http.StatusInternalServerError,
}
}
@ -138,11 +138,11 @@ func (rh *requestHandler) alert(path string) ([]byte, error) {
groupID, err := uint64FromPath(parts[0])
if err != nil {
return nil, badRequest(fmt.Errorf(`cannot parse groupID: %s`, err))
return nil, badRequest(fmt.Errorf(`cannot parse groupID: %w`, err))
}
alertID, err := uint64FromPath(parts[1])
if err != nil {
return nil, badRequest(fmt.Errorf(`cannot parse alertID: %s`, err))
return nil, badRequest(fmt.Errorf(`cannot parse alertID: %w`, err))
}
resp, err := rh.m.AlertAPI(groupID, alertID)
if err != nil {

View file

@ -82,11 +82,11 @@ var stopCh chan struct{}
func readAuthConfig(path string) (map[string]*UserInfo, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("cannot read %q: %s", path, err)
return nil, fmt.Errorf("cannot read %q: %w", path, err)
}
m, err := parseAuthConfig(data)
if err != nil {
return nil, fmt.Errorf("cannot parse %q: %s", path, err)
return nil, fmt.Errorf("cannot parse %q: %w", path, err)
}
logger.Infof("Loaded information about %d users from %q", len(m), path)
return m, nil
@ -95,7 +95,7 @@ func readAuthConfig(path string) (map[string]*UserInfo, error) {
func parseAuthConfig(data []byte) (map[string]*UserInfo, error) {
var ac AuthConfig
if err := yaml.UnmarshalStrict(data, &ac); err != nil {
return nil, fmt.Errorf("cannot unmarshal AuthConfig data: %s", err)
return nil, fmt.Errorf("cannot unmarshal AuthConfig data: %w", err)
}
uis := ac.Users
if len(uis) == 0 {
@ -115,7 +115,7 @@ func parseAuthConfig(data []byte) (map[string]*UserInfo, error) {
// Validate urlPrefix
target, err := url.Parse(urlPrefix)
if err != nil {
return nil, fmt.Errorf("invalid `url_prefix: %q`: %s", urlPrefix, err)
return nil, fmt.Errorf("invalid `url_prefix: %q`: %w", urlPrefix, err)
}
if target.Scheme != "http" && target.Scheme != "https" {
return nil, fmt.Errorf("unsupported scheme for `url_prefix: %q`: %q; must be `http` or `https`", urlPrefix, target.Scheme)

View file

@ -110,12 +110,12 @@ func newSrcFS() (*fslocal.FS, error) {
// Verify the snapshot exists.
f, err := os.Open(snapshotPath)
if err != nil {
return nil, fmt.Errorf("cannot open snapshot at %q: %s", snapshotPath, err)
return nil, fmt.Errorf("cannot open snapshot at %q: %w", snapshotPath, err)
}
fi, err := f.Stat()
_ = f.Close()
if err != nil {
return nil, fmt.Errorf("cannot stat %q: %s", snapshotPath, err)
return nil, fmt.Errorf("cannot stat %q: %w", snapshotPath, err)
}
if !fi.IsDir() {
return nil, fmt.Errorf("snapshot %q must be a directory", snapshotPath)
@ -126,7 +126,7 @@ func newSrcFS() (*fslocal.FS, error) {
MaxBytesPerSecond: *maxBytesPerSecond,
}
if err := fs.Init(); err != nil {
return nil, fmt.Errorf("cannot initialize fs: %s", err)
return nil, fmt.Errorf("cannot initialize fs: %w", err)
}
return fs, nil
}
@ -134,7 +134,7 @@ func newSrcFS() (*fslocal.FS, error) {
func newDstFS() (common.RemoteFS, error) {
fs, err := actions.NewRemoteFS(*dst)
if err != nil {
return nil, fmt.Errorf("cannot parse `-dst`=%q: %s", *dst, err)
return nil, fmt.Errorf("cannot parse `-dst`=%q: %w", *dst, err)
}
return fs, nil
}
@ -145,7 +145,7 @@ func newOriginFS() (common.RemoteFS, error) {
}
fs, err := actions.NewRemoteFS(*origin)
if err != nil {
return nil, fmt.Errorf("cannot parse `-origin`=%q: %s", *origin, err)
return nil, fmt.Errorf("cannot parse `-origin`=%q: %w", *origin, err)
}
return fs, nil
}

View file

@ -41,7 +41,7 @@ func (br *bufRows) pushTo(sn *storageNode) error {
br.reset()
if err != nil {
return &httpserver.ErrorWithStatusCode{
Err: fmt.Errorf("cannot send %d bytes to storageNode %q: %s", bufLen, sn.dialer.Addr(), err),
Err: fmt.Errorf("cannot send %d bytes to storageNode %q: %w", bufLen, sn.dialer.Addr(), err),
StatusCode: http.StatusServiceUnavailable,
}
}

View file

@ -51,7 +51,7 @@ func (sn *storageNode) push(buf []byte, rows int) error {
if sn.isBroken() {
// The vmstorage node is temporarily broken. Re-route buf to healthy vmstorage nodes.
if err := addToReroutedBufMayBlock(buf, rows); err != nil {
return fmt.Errorf("%d rows dropped because the current vsmtorage is unavailable and %s", rows, err)
return fmt.Errorf("%d rows dropped because the current vsmtorage is unavailable and %w", rows, err)
}
sn.rowsReroutedFromHere.Add(rows)
return nil
@ -71,7 +71,7 @@ func (sn *storageNode) push(buf []byte, rows int) error {
// This means that the current vmstorage is slow or will become broken soon.
// Re-route buf to healthy vmstorage nodes.
if err := addToReroutedBufMayBlock(buf, rows); err != nil {
return fmt.Errorf("%d rows dropped because the current vmstorage buf is full and %s", rows, err)
return fmt.Errorf("%d rows dropped because the current vmstorage buf is full and %w", rows, err)
}
sn.rowsReroutedFromHere.Add(rows)
return nil
@ -247,7 +247,7 @@ func sendToConn(bc *handshake.BufferedConn, buf []byte) error {
timeout := time.Duration(timeoutSeconds) * time.Second
deadline := time.Now().Add(timeout)
if err := bc.SetWriteDeadline(deadline); err != nil {
return fmt.Errorf("cannot set write deadline to %s: %s", deadline, err)
return fmt.Errorf("cannot set write deadline to %s: %w", deadline, err)
}
// sizeBuf guarantees that the rows batch will be either fully
// read or fully discarded on the vmstorage side.
@ -256,23 +256,23 @@ func sendToConn(bc *handshake.BufferedConn, buf []byte) error {
defer sizeBufPool.Put(sizeBuf)
sizeBuf.B = encoding.MarshalUint64(sizeBuf.B[:0], uint64(len(buf)))
if _, err := bc.Write(sizeBuf.B); err != nil {
return fmt.Errorf("cannot write data size %d: %s", len(buf), err)
return fmt.Errorf("cannot write data size %d: %w", len(buf), err)
}
if _, err := bc.Write(buf); err != nil {
return fmt.Errorf("cannot write data with size %d: %s", len(buf), err)
return fmt.Errorf("cannot write data with size %d: %w", len(buf), err)
}
if err := bc.Flush(); err != nil {
return fmt.Errorf("cannot flush data with size %d: %s", len(buf), err)
return fmt.Errorf("cannot flush data with size %d: %w", len(buf), err)
}
// Wait for `ack` from vmstorage.
// This guarantees that the message has been fully received by vmstorage.
deadline = time.Now().Add(timeout)
if err := bc.SetReadDeadline(deadline); err != nil {
return fmt.Errorf("cannot set read deadline for reading `ack` to vmstorage: %s", err)
return fmt.Errorf("cannot set read deadline for reading `ack` to vmstorage: %w", err)
}
if _, err := io.ReadFull(bc, sizeBuf.B[:1]); err != nil {
return fmt.Errorf("cannot read `ack` from vmstorage: %s", err)
return fmt.Errorf("cannot read `ack` from vmstorage: %w", err)
}
if sizeBuf.B[0] != 1 {
return fmt.Errorf("unexpected `ack` received from vmstorage; got %d; want %d", sizeBuf.B[0], 1)
@ -296,7 +296,7 @@ func (sn *storageNode) dial() (*handshake.BufferedConn, error) {
if err != nil {
_ = c.Close()
sn.handshakeErrors.Inc()
return nil, fmt.Errorf("handshake error: %s", err)
return nil, fmt.Errorf("handshake error: %w", err)
}
return bc, nil
}

View file

@ -24,7 +24,7 @@ func InsertHandler(req *http.Request) error {
path := req.URL.Path
p, err := httpserver.ParsePath(path)
if err != nil {
return fmt.Errorf("cannot parse path %q: %s", path, err)
return fmt.Errorf("cannot parse path %q: %w", path, err)
}
if p.Prefix != "insert" {
// This is not our link.
@ -32,7 +32,7 @@ func InsertHandler(req *http.Request) error {
}
at, err := auth.NewToken(p.AuthToken)
if err != nil {
return fmt.Errorf("auth error: %s", err)
return fmt.Errorf("auth error: %w", err)
}
switch p.Suffix {
case "api/put", "opentsdb/api/put":

View file

@ -71,7 +71,7 @@ func newDstFS() (*fslocal.FS, error) {
MaxBytesPerSecond: *maxBytesPerSecond,
}
if err := fs.Init(); err != nil {
return nil, fmt.Errorf("cannot initialize local fs: %s", err)
return nil, fmt.Errorf("cannot initialize local fs: %w", err)
}
return fs, nil
}
@ -79,7 +79,7 @@ func newDstFS() (*fslocal.FS, error) {
func newSrcFS() (common.RemoteFS, error) {
fs, err := actions.NewRemoteFS(*src)
if err != nil {
return nil, fmt.Errorf("cannot parse `-src`=%q: %s", *src, err)
return nil, fmt.Errorf("cannot parse `-src`=%q: %w", *src, err)
}
return fs, nil
}

View file

@ -94,7 +94,7 @@ func timeseriesWorker(workerID uint) {
continue
}
if err := tsw.pts.Unpack(rss.tbf, &rs, rss.tr, rss.fetchData, rss.at); err != nil {
tsw.doneCh <- fmt.Errorf("error during time series unpacking: %s", err)
tsw.doneCh <- fmt.Errorf("error during time series unpacking: %w", err)
continue
}
if len(rs.Timestamps) > 0 || !rss.fetchData {
@ -188,7 +188,7 @@ func unpackWorker() {
sb := getSortBlock()
if err := sb.unpackFrom(upw.tbf, upw.addr, upw.tr, upw.fetchData, upw.at); err != nil {
putSortBlock(sb)
upw.doneCh <- fmt.Errorf("cannot unpack block: %s", err)
upw.doneCh <- fmt.Errorf("cannot unpack block: %w", err)
continue
}
upw.sb = sb
@ -201,7 +201,7 @@ func (pts *packedTimeseries) Unpack(tbf *tmpBlocksFile, dst *Result, tr storage.
dst.reset()
if err := dst.MetricName.Unmarshal(bytesutil.ToUnsafeBytes(pts.metricName)); err != nil {
return fmt.Errorf("cannot unmarshal metricName %q: %s", pts.metricName, err)
return fmt.Errorf("cannot unmarshal metricName %q: %w", pts.metricName, err)
}
// Feed workers with work
@ -332,7 +332,7 @@ func (sb *sortBlock) unpackFrom(tbf *tmpBlocksFile, addr tmpBlockAddr, tr storag
tbf.MustReadBlockAt(&sb.b, addr)
if fetchData {
if err := sb.b.UnmarshalData(); err != nil {
return fmt.Errorf("cannot unmarshal block: %s", err)
return fmt.Errorf("cannot unmarshal block: %w", err)
}
}
timestamps := sb.b.Timestamps()
@ -427,7 +427,7 @@ func DeleteSeries(at *auth.Token, sq *storage.SearchQuery, deadline Deadline) (i
}
if len(errors) > 0 {
// Return only the first error, since it has no sense in returning all errors.
return deletedTotal, fmt.Errorf("error occured during deleting time series: %s", errors[0])
return deletedTotal, fmt.Errorf("error occured during deleting time series: %w", errors[0])
}
return deletedTotal, nil
}
@ -446,7 +446,7 @@ func GetLabels(at *auth.Token, deadline Deadline) ([]string, bool, error) {
labels, err := sn.getLabels(at.AccountID, at.ProjectID, deadline)
if err != nil {
sn.labelsRequestErrors.Inc()
err = fmt.Errorf("cannot get labels from vmstorage %s: %s", sn.connPool.Addr(), err)
err = fmt.Errorf("cannot get labels from vmstorage %s: %w", sn.connPool.Addr(), err)
}
resultsCh <- nodeResult{
labels: labels,
@ -472,7 +472,7 @@ func GetLabels(at *auth.Token, deadline Deadline) ([]string, bool, error) {
if len(errors) > 0 {
if len(errors) == len(storageNodes) {
// Return only the first error, since it has no sense in returning all errors.
return nil, true, fmt.Errorf("error occured during fetching labels: %s", errors[0])
return nil, true, fmt.Errorf("error occured during fetching labels: %w", errors[0])
}
// Just log errors and return partial results.
@ -519,7 +519,7 @@ func GetLabelValues(at *auth.Token, labelName string, deadline Deadline) ([]stri
labelValues, err := sn.getLabelValues(at.AccountID, at.ProjectID, labelName, deadline)
if err != nil {
sn.labelValuesRequestErrors.Inc()
err = fmt.Errorf("cannot get label values from vmstorage %s: %s", sn.connPool.Addr(), err)
err = fmt.Errorf("cannot get label values from vmstorage %s: %w", sn.connPool.Addr(), err)
}
resultsCh <- nodeResult{
labelValues: labelValues,
@ -545,7 +545,7 @@ func GetLabelValues(at *auth.Token, labelName string, deadline Deadline) ([]stri
if len(errors) > 0 {
if len(errors) == len(storageNodes) {
// Return only the first error, since it has no sense in returning all errors.
return nil, true, fmt.Errorf("error occured during fetching label values: %s", errors[0])
return nil, true, fmt.Errorf("error occured during fetching label values: %w", errors[0])
}
// Just log errors and return partial results.
@ -580,7 +580,7 @@ func GetLabelEntries(at *auth.Token, deadline Deadline) ([]storage.TagEntry, boo
labelEntries, err := sn.getLabelEntries(at.AccountID, at.ProjectID, deadline)
if err != nil {
sn.labelEntriesRequestErrors.Inc()
err = fmt.Errorf("cannot get label entries from vmstorage %s: %s", sn.connPool.Addr(), err)
err = fmt.Errorf("cannot get label entries from vmstorage %s: %w", sn.connPool.Addr(), err)
}
resultsCh <- nodeResult{
labelEntries: labelEntries,
@ -606,7 +606,7 @@ func GetLabelEntries(at *auth.Token, deadline Deadline) ([]storage.TagEntry, boo
if len(errors) > 0 {
if len(errors) == len(storageNodes) {
// Return only the first error, since it has no sense in returning all errors.
return nil, true, fmt.Errorf("error occured during fetching label entries: %s", errors[0])
return nil, true, fmt.Errorf("error occured during fetching label entries: %w", errors[0])
}
// Just log errors and return partial results.
@ -685,7 +685,7 @@ func GetTSDBStatusForDate(at *auth.Token, deadline Deadline, date uint64, topN i
status, err := sn.getTSDBStatusForDate(at.AccountID, at.ProjectID, date, topN, deadline)
if err != nil {
sn.tsdbStatusRequestErrors.Inc()
err = fmt.Errorf("cannot obtain tsdb status from vmstorage %s: %s", sn.connPool.Addr(), err)
err = fmt.Errorf("cannot obtain tsdb status from vmstorage %s: %w", sn.connPool.Addr(), err)
}
resultsCh <- nodeResult{
status: status,
@ -711,7 +711,7 @@ func GetTSDBStatusForDate(at *auth.Token, deadline Deadline, date uint64, topN i
if len(errors) > 0 {
if len(errors) == len(storageNodes) {
// Return only the first error, since it has no sense in returning all errors.
return nil, true, fmt.Errorf("error occured during fetching tsdb stats: %s", errors[0])
return nil, true, fmt.Errorf("error occured during fetching tsdb stats: %w", errors[0])
}
// Just log errors and return partial results.
// This allows gracefully degrade vmselect in the case
@ -786,7 +786,7 @@ func GetSeriesCount(at *auth.Token, deadline Deadline) (uint64, bool, error) {
n, err := sn.getSeriesCount(at.AccountID, at.ProjectID, deadline)
if err != nil {
sn.seriesCountRequestErrors.Inc()
err = fmt.Errorf("cannot get series count from vmstorage %s: %s", sn.connPool.Addr(), err)
err = fmt.Errorf("cannot get series count from vmstorage %s: %w", sn.connPool.Addr(), err)
}
resultsCh <- nodeResult{
n: n,
@ -812,7 +812,7 @@ func GetSeriesCount(at *auth.Token, deadline Deadline) (uint64, bool, error) {
if len(errors) > 0 {
if len(errors) == len(storageNodes) {
// Return only the first error, since it has no sense in returning all errors.
return 0, true, fmt.Errorf("error occured during fetching series count: %s", errors[0])
return 0, true, fmt.Errorf("error occured during fetching series count: %w", errors[0])
}
// Just log errors and return partial results.
// This allows gracefully degrade vmselect in the case
@ -871,7 +871,7 @@ func ProcessSearchQuery(at *auth.Token, sq *storage.SearchQuery, fetchData bool,
err := sn.processSearchQuery(tbfw, requestData, tr, fetchData, deadline)
if err != nil {
sn.searchRequestErrors.Inc()
err = fmt.Errorf("cannot perform search on vmstorage %s: %s", sn.connPool.Addr(), err)
err = fmt.Errorf("cannot perform search on vmstorage %s: %w", sn.connPool.Addr(), err)
}
resultsCh <- err
}(sn)
@ -893,7 +893,7 @@ func ProcessSearchQuery(at *auth.Token, sq *storage.SearchQuery, fetchData bool,
if len(errors) == len(storageNodes) {
// Return only the first error, since it has no sense in returning all errors.
putTmpBlocksFile(tbfw.tbf)
return nil, true, fmt.Errorf("error occured during search: %s", errors[0])
return nil, true, fmt.Errorf("error occured during search: %w", errors[0])
}
// Just return partial results.
@ -906,7 +906,7 @@ func ProcessSearchQuery(at *auth.Token, sq *storage.SearchQuery, fetchData bool,
}
if err := tbfw.tbf.Finalize(); err != nil {
putTmpBlocksFile(tbfw.tbf)
return nil, false, fmt.Errorf("cannot finalize temporary blocks file with %d time series: %s", len(tbfw.m), err)
return nil, false, fmt.Errorf("cannot finalize temporary blocks file with %d time series: %w", len(tbfw.m), err)
}
var rss Results
@ -1132,7 +1132,7 @@ func (sn *storageNode) execOnConn(rpcName string, f func(bc *handshake.BufferedC
bc, err := sn.connPool.Get()
if err != nil {
return fmt.Errorf("cannot obtain connection from a pool: %s", err)
return fmt.Errorf("cannot obtain connection from a pool: %w", err)
}
if err := bc.SetDeadline(deadline.Deadline); err != nil {
_ = bc.Close()
@ -1142,7 +1142,7 @@ func (sn *storageNode) execOnConn(rpcName string, f func(bc *handshake.BufferedC
// Close the connection instead of returning it to the pool,
// since it may be broken.
_ = bc.Close()
return fmt.Errorf("cannot send rpcName=%q to the server: %s", rpcName, err)
return fmt.Errorf("cannot send rpcName=%q to the server: %w", rpcName, err)
}
if err := f(bc); err != nil {
@ -1156,7 +1156,7 @@ func (sn *storageNode) execOnConn(rpcName string, f func(bc *handshake.BufferedC
// since it may be broken.
_ = bc.Close()
}
return fmt.Errorf("cannot execute rpcName=%q on vmstorage %q with timeout %s: %s", rpcName, remoteAddr, deadline.String(), err)
return fmt.Errorf("cannot execute rpcName=%q on vmstorage %q with timeout %s: %w", rpcName, remoteAddr, deadline.String(), err)
}
// Return the connection back to the pool, assuming it is healthy.
sn.connPool.Put(bc)
@ -1174,16 +1174,16 @@ func (er *errRemote) Error() string {
func (sn *storageNode) deleteMetricsOnConn(bc *handshake.BufferedConn, requestData []byte) (int, error) {
// Send the request to sn
if err := writeBytes(bc, requestData); err != nil {
return 0, fmt.Errorf("cannot send deleteMetrics request to conn: %s", err)
return 0, fmt.Errorf("cannot send deleteMetrics request to conn: %w", err)
}
if err := bc.Flush(); err != nil {
return 0, fmt.Errorf("cannot flush deleteMetrics request to conn: %s", err)
return 0, fmt.Errorf("cannot flush deleteMetrics request to conn: %w", err)
}
// Read response error.
buf, err := readBytes(nil, bc, maxErrorMessageSize)
if err != nil {
return 0, fmt.Errorf("cannot read error message: %s", err)
return 0, fmt.Errorf("cannot read error message: %w", err)
}
if len(buf) > 0 {
return 0, &errRemote{msg: string(buf)}
@ -1192,7 +1192,7 @@ func (sn *storageNode) deleteMetricsOnConn(bc *handshake.BufferedConn, requestDa
// Read deletedCount
deletedCount, err := readUint64(bc)
if err != nil {
return 0, fmt.Errorf("cannot read deletedCount value: %s", err)
return 0, fmt.Errorf("cannot read deletedCount value: %w", err)
}
return int(deletedCount), nil
}
@ -1202,19 +1202,19 @@ const maxLabelSize = 16 * 1024 * 1024
func (sn *storageNode) getLabelsOnConn(bc *handshake.BufferedConn, accountID, projectID uint32) ([]string, error) {
// Send the request to sn.
if err := writeUint32(bc, accountID); err != nil {
return nil, fmt.Errorf("cannot send accountID=%d to conn: %s", accountID, err)
return nil, fmt.Errorf("cannot send accountID=%d to conn: %w", accountID, err)
}
if err := writeUint32(bc, projectID); err != nil {
return nil, fmt.Errorf("cannot send projectID=%d to conn: %s", projectID, err)
return nil, fmt.Errorf("cannot send projectID=%d to conn: %w", projectID, err)
}
if err := bc.Flush(); err != nil {
return nil, fmt.Errorf("cannot flush request to conn: %s", err)
return nil, fmt.Errorf("cannot flush request to conn: %w", err)
}
// Read response error.
buf, err := readBytes(nil, bc, maxErrorMessageSize)
if err != nil {
return nil, fmt.Errorf("cannot read error message: %s", err)
return nil, fmt.Errorf("cannot read error message: %w", err)
}
if len(buf) > 0 {
return nil, &errRemote{msg: string(buf)}
@ -1225,7 +1225,7 @@ func (sn *storageNode) getLabelsOnConn(bc *handshake.BufferedConn, accountID, pr
for {
buf, err = readBytes(buf[:0], bc, maxLabelSize)
if err != nil {
return nil, fmt.Errorf("cannot read labels: %s", err)
return nil, fmt.Errorf("cannot read labels: %w", err)
}
if len(buf) == 0 {
// Reached the end of the response
@ -1240,22 +1240,22 @@ const maxLabelValueSize = 16 * 1024 * 1024
func (sn *storageNode) getLabelValuesOnConn(bc *handshake.BufferedConn, accountID, projectID uint32, labelName string) ([]string, error) {
// Send the request to sn.
if err := writeUint32(bc, accountID); err != nil {
return nil, fmt.Errorf("cannot send accountID=%d to conn: %s", accountID, err)
return nil, fmt.Errorf("cannot send accountID=%d to conn: %w", accountID, err)
}
if err := writeUint32(bc, projectID); err != nil {
return nil, fmt.Errorf("cannot send projectID=%d to conn: %s", projectID, err)
return nil, fmt.Errorf("cannot send projectID=%d to conn: %w", projectID, err)
}
if err := writeBytes(bc, []byte(labelName)); err != nil {
return nil, fmt.Errorf("cannot send labelName=%q to conn: %s", labelName, err)
return nil, fmt.Errorf("cannot send labelName=%q to conn: %w", labelName, err)
}
if err := bc.Flush(); err != nil {
return nil, fmt.Errorf("cannot flush labelName to conn: %s", err)
return nil, fmt.Errorf("cannot flush labelName to conn: %w", err)
}
// Read response error.
buf, err := readBytes(nil, bc, maxErrorMessageSize)
if err != nil {
return nil, fmt.Errorf("cannot read error message: %s", err)
return nil, fmt.Errorf("cannot read error message: %w", err)
}
if len(buf) > 0 {
return nil, &errRemote{msg: string(buf)}
@ -1275,7 +1275,7 @@ func readLabelValues(buf []byte, bc *handshake.BufferedConn) ([]string, []byte,
var err error
buf, err = readBytes(buf[:0], bc, maxLabelValueSize)
if err != nil {
return nil, buf, fmt.Errorf("cannot read labelValue: %s", err)
return nil, buf, fmt.Errorf("cannot read labelValue: %w", err)
}
if len(buf) == 0 {
// Reached the end of the response
@ -1288,19 +1288,19 @@ func readLabelValues(buf []byte, bc *handshake.BufferedConn) ([]string, []byte,
func (sn *storageNode) getLabelEntriesOnConn(bc *handshake.BufferedConn, accountID, projectID uint32) ([]storage.TagEntry, error) {
// Send the request to sn.
if err := writeUint32(bc, accountID); err != nil {
return nil, fmt.Errorf("cannot send accountID=%d to conn: %s", accountID, err)
return nil, fmt.Errorf("cannot send accountID=%d to conn: %w", accountID, err)
}
if err := writeUint32(bc, projectID); err != nil {
return nil, fmt.Errorf("cannot send projectID=%d to conn: %s", projectID, err)
return nil, fmt.Errorf("cannot send projectID=%d to conn: %w", projectID, err)
}
if err := bc.Flush(); err != nil {
return nil, fmt.Errorf("cannot flush request to conn: %s", err)
return nil, fmt.Errorf("cannot flush request to conn: %w", err)
}
// Read response error.
buf, err := readBytes(nil, bc, maxErrorMessageSize)
if err != nil {
return nil, fmt.Errorf("cannot read error message: %s", err)
return nil, fmt.Errorf("cannot read error message: %w", err)
}
if len(buf) > 0 {
return nil, &errRemote{msg: string(buf)}
@ -1311,7 +1311,7 @@ func (sn *storageNode) getLabelEntriesOnConn(bc *handshake.BufferedConn, account
for {
buf, err = readBytes(buf[:0], bc, maxLabelSize)
if err != nil {
return nil, fmt.Errorf("cannot read label: %s", err)
return nil, fmt.Errorf("cannot read label: %w", err)
}
if len(buf) == 0 {
// Reached the end of the response
@ -1321,7 +1321,7 @@ func (sn *storageNode) getLabelEntriesOnConn(bc *handshake.BufferedConn, account
var values []string
values, buf, err = readLabelValues(buf, bc)
if err != nil {
return nil, fmt.Errorf("cannot read values for label %q: %s", label, err)
return nil, fmt.Errorf("cannot read values for label %q: %w", label, err)
}
labelEntries = append(labelEntries, storage.TagEntry{
Key: label,
@ -1333,27 +1333,27 @@ func (sn *storageNode) getLabelEntriesOnConn(bc *handshake.BufferedConn, account
func (sn *storageNode) getTSDBStatusForDateOnConn(bc *handshake.BufferedConn, accountID, projectID uint32, date uint64, topN int) (*storage.TSDBStatus, error) {
// Send the request to sn.
if err := writeUint32(bc, accountID); err != nil {
return nil, fmt.Errorf("cannot send accountID=%d to conn: %s", accountID, err)
return nil, fmt.Errorf("cannot send accountID=%d to conn: %w", accountID, err)
}
if err := writeUint32(bc, projectID); err != nil {
return nil, fmt.Errorf("cannot send projectID=%d to conn: %s", projectID, err)
return nil, fmt.Errorf("cannot send projectID=%d to conn: %w", projectID, err)
}
// date shouldn't exceed 32 bits, so send it as uint32.
if err := writeUint32(bc, uint32(date)); err != nil {
return nil, fmt.Errorf("cannot send date=%d to conn: %s", date, err)
return nil, fmt.Errorf("cannot send date=%d to conn: %w", date, err)
}
// topN shouldn't exceed 32 bits, so send it as uint32.
if err := writeUint32(bc, uint32(topN)); err != nil {
return nil, fmt.Errorf("cannot send topN=%d to conn: %s", topN, err)
return nil, fmt.Errorf("cannot send topN=%d to conn: %w", topN, err)
}
if err := bc.Flush(); err != nil {
return nil, fmt.Errorf("cannot flush tsdbStatus args to conn: %s", err)
return nil, fmt.Errorf("cannot flush tsdbStatus args to conn: %w", err)
}
// Read response error.
buf, err := readBytes(nil, bc, maxErrorMessageSize)
if err != nil {
return nil, fmt.Errorf("cannot read error message: %s", err)
return nil, fmt.Errorf("cannot read error message: %w", err)
}
if len(buf) > 0 {
return nil, &errRemote{msg: string(buf)}
@ -1362,15 +1362,15 @@ func (sn *storageNode) getTSDBStatusForDateOnConn(bc *handshake.BufferedConn, ac
// Read response
seriesCountByMetricName, err := readTopHeapEntries(bc)
if err != nil {
return nil, fmt.Errorf("cannot read seriesCountByMetricName: %s", err)
return nil, fmt.Errorf("cannot read seriesCountByMetricName: %w", err)
}
labelValueCountByLabelName, err := readTopHeapEntries(bc)
if err != nil {
return nil, fmt.Errorf("cannot read labelValueCountByLabelName: %s", err)
return nil, fmt.Errorf("cannot read labelValueCountByLabelName: %w", err)
}
seriesCountByLabelValuePair, err := readTopHeapEntries(bc)
if err != nil {
return nil, fmt.Errorf("cannot read seriesCountByLabelValuePair: %s", err)
return nil, fmt.Errorf("cannot read seriesCountByLabelValuePair: %w", err)
}
status := &storage.TSDBStatus{
SeriesCountByMetricName: seriesCountByMetricName,
@ -1383,18 +1383,18 @@ func (sn *storageNode) getTSDBStatusForDateOnConn(bc *handshake.BufferedConn, ac
func readTopHeapEntries(bc *handshake.BufferedConn) ([]storage.TopHeapEntry, error) {
n, err := readUint64(bc)
if err != nil {
return nil, fmt.Errorf("cannot read the number of topHeapEntries: %s", err)
return nil, fmt.Errorf("cannot read the number of topHeapEntries: %w", err)
}
var a []storage.TopHeapEntry
var buf []byte
for i := uint64(0); i < n; i++ {
buf, err = readBytes(buf[:0], bc, maxLabelSize)
if err != nil {
return nil, fmt.Errorf("cannot read label name: %s", err)
return nil, fmt.Errorf("cannot read label name: %w", err)
}
count, err := readUint64(bc)
if err != nil {
return nil, fmt.Errorf("cannot read label count: %s", err)
return nil, fmt.Errorf("cannot read label count: %w", err)
}
a = append(a, storage.TopHeapEntry{
Name: string(buf),
@ -1407,19 +1407,19 @@ func readTopHeapEntries(bc *handshake.BufferedConn) ([]storage.TopHeapEntry, err
func (sn *storageNode) getSeriesCountOnConn(bc *handshake.BufferedConn, accountID, projectID uint32) (uint64, error) {
// Send the request to sn.
if err := writeUint32(bc, accountID); err != nil {
return 0, fmt.Errorf("cannot send accountID=%d to conn: %s", accountID, err)
return 0, fmt.Errorf("cannot send accountID=%d to conn: %w", accountID, err)
}
if err := writeUint32(bc, projectID); err != nil {
return 0, fmt.Errorf("cannot send projectID=%d to conn: %s", projectID, err)
return 0, fmt.Errorf("cannot send projectID=%d to conn: %w", projectID, err)
}
if err := bc.Flush(); err != nil {
return 0, fmt.Errorf("cannot flush seriesCount args to conn: %s", err)
return 0, fmt.Errorf("cannot flush seriesCount args to conn: %w", err)
}
// Read response error.
buf, err := readBytes(nil, bc, maxErrorMessageSize)
if err != nil {
return 0, fmt.Errorf("cannot read error message: %s", err)
return 0, fmt.Errorf("cannot read error message: %w", err)
}
if len(buf) > 0 {
return 0, &errRemote{msg: string(buf)}
@ -1428,7 +1428,7 @@ func (sn *storageNode) getSeriesCountOnConn(bc *handshake.BufferedConn, accountI
// Read response
n, err := readUint64(bc)
if err != nil {
return 0, fmt.Errorf("cannot read series count: %s", err)
return 0, fmt.Errorf("cannot read series count: %w", err)
}
return n, nil
}
@ -1443,13 +1443,13 @@ const maxErrorMessageSize = 64 * 1024
func (sn *storageNode) processSearchQueryOnConn(tbfw *tmpBlocksFileWrapper, bc *handshake.BufferedConn, requestData []byte, tr storage.TimeRange, fetchData bool) (int, error) {
// Send the request to sn.
if err := writeBytes(bc, requestData); err != nil {
return 0, fmt.Errorf("cannot write requestData: %s", err)
return 0, fmt.Errorf("cannot write requestData: %w", err)
}
if err := writeBool(bc, fetchData); err != nil {
return 0, fmt.Errorf("cannot write fetchData=%v: %s", fetchData, err)
return 0, fmt.Errorf("cannot write fetchData=%v: %w", fetchData, err)
}
if err := bc.Flush(); err != nil {
return 0, fmt.Errorf("cannot flush requestData to conn: %s", err)
return 0, fmt.Errorf("cannot flush requestData to conn: %w", err)
}
var err error
@ -1458,7 +1458,7 @@ func (sn *storageNode) processSearchQueryOnConn(tbfw *tmpBlocksFileWrapper, bc *
// Read response error.
buf, err = readBytes(buf[:0], bc, maxErrorMessageSize)
if err != nil {
return 0, fmt.Errorf("cannot read error message: %s", err)
return 0, fmt.Errorf("cannot read error message: %w", err)
}
if len(buf) > 0 {
return 0, &errRemote{msg: string(buf)}
@ -1470,7 +1470,7 @@ func (sn *storageNode) processSearchQueryOnConn(tbfw *tmpBlocksFileWrapper, bc *
for {
buf, err = readBytes(buf[:0], bc, maxMetricBlockSize)
if err != nil {
return blocksRead, fmt.Errorf("cannot read MetricBlock #%d: %s", blocksRead, err)
return blocksRead, fmt.Errorf("cannot read MetricBlock #%d: %w", blocksRead, err)
}
if len(buf) == 0 {
// Reached the end of the response
@ -1478,7 +1478,7 @@ func (sn *storageNode) processSearchQueryOnConn(tbfw *tmpBlocksFileWrapper, bc *
}
tail, err := mb.Unmarshal(buf)
if err != nil {
return blocksRead, fmt.Errorf("cannot unmarshal MetricBlock #%d: %s", blocksRead, err)
return blocksRead, fmt.Errorf("cannot unmarshal MetricBlock #%d: %w", blocksRead, err)
}
if len(tail) != 0 {
return blocksRead, fmt.Errorf("non-empty tail after unmarshaling MetricBlock #%d: (len=%d) %q", blocksRead, len(tail), tail)
@ -1487,7 +1487,7 @@ func (sn *storageNode) processSearchQueryOnConn(tbfw *tmpBlocksFileWrapper, bc *
sn.metricBlocksRead.Inc()
sn.metricRowsRead.Add(mb.Block.RowsCount())
if err := tbfw.WriteBlock(&mb); err != nil {
return blocksRead, fmt.Errorf("cannot write MetricBlock #%d to temporary blocks file: %s", blocksRead, err)
return blocksRead, fmt.Errorf("cannot write MetricBlock #%d to temporary blocks file: %w", blocksRead, err)
}
}
}
@ -1525,7 +1525,7 @@ func writeBool(bc *handshake.BufferedConn, b bool) error {
func readBytes(buf []byte, bc *handshake.BufferedConn, maxDataSize int) ([]byte, error) {
buf = bytesutil.Resize(buf, 8)
if n, err := io.ReadFull(bc, buf); err != nil {
return buf, fmt.Errorf("cannot read %d bytes with data size: %s; read only %d bytes", len(buf), err, n)
return buf, fmt.Errorf("cannot read %d bytes with data size: %w; read only %d bytes", len(buf), err, n)
}
dataSize := encoding.UnmarshalUint64(buf)
if dataSize > uint64(maxDataSize) {
@ -1536,7 +1536,7 @@ func readBytes(buf []byte, bc *handshake.BufferedConn, maxDataSize int) ([]byte,
return buf, nil
}
if n, err := io.ReadFull(bc, buf); err != nil {
return buf, fmt.Errorf("cannot read data with size %d: %s; read only %d bytes", dataSize, err, n)
return buf, fmt.Errorf("cannot read data with size %d: %w; read only %d bytes", dataSize, err, n)
}
return buf, nil
}
@ -1544,7 +1544,7 @@ func readBytes(buf []byte, bc *handshake.BufferedConn, maxDataSize int) ([]byte,
func readUint64(bc *handshake.BufferedConn) (uint64, error) {
var buf [8]byte
if _, err := io.ReadFull(bc, buf[:]); err != nil {
return 0, fmt.Errorf("cannot read uint64: %s", err)
return 0, fmt.Errorf("cannot read uint64: %w", err)
}
n := encoding.UnmarshalUint64(buf[:])
return n, nil

View file

@ -119,7 +119,7 @@ func (tbf *tmpBlocksFile) WriteBlockData(b []byte) (tmpBlockAddr, error) {
_, err := tbf.f.Write(tbf.buf)
tbf.buf = append(tbf.buf[:0], b...)
if err != nil {
return addr, fmt.Errorf("cannot write block to %q: %s", tbf.f.Name(), err)
return addr, fmt.Errorf("cannot write block to %q: %w", tbf.f.Name(), err)
}
return addr, nil
}
@ -130,7 +130,7 @@ func (tbf *tmpBlocksFile) Finalize() error {
}
fname := tbf.f.Name()
if _, err := tbf.f.Write(tbf.buf); err != nil {
return fmt.Errorf("cannot write the remaining %d bytes to %q: %s", len(tbf.buf), fname, err)
return fmt.Errorf("cannot write the remaining %d bytes to %q: %w", len(tbf.buf), fname, err)
}
tbf.buf = tbf.buf[:0]
r, err := fs.OpenReaderAt(fname)

View file

@ -84,7 +84,7 @@ func testTmpBlocksFile() error {
bb.B = storage.MarshalBlock(bb.B[:0], b)
addr, err := tbf.WriteBlockData(bb.B)
if err != nil {
return fmt.Errorf("cannot write block at offset %d: %s", tbf.offset, err)
return fmt.Errorf("cannot write block at offset %d: %w", tbf.offset, err)
}
if addr.offset+uint64(addr.size) != tbf.offset {
return fmt.Errorf("unexpected addr=%+v for offset %v", &addr, tbf.offset)
@ -93,7 +93,7 @@ func testTmpBlocksFile() error {
blocks = append(blocks, b)
}
if err := tbf.Finalize(); err != nil {
return fmt.Errorf("cannot finalize tbf: %s", err)
return fmt.Errorf("cannot finalize tbf: %w", err)
}
// Read blocks in parallel and verify them
@ -108,12 +108,12 @@ func testTmpBlocksFile() error {
addr := addrs[idx]
b := blocks[idx]
if err := b.UnmarshalData(); err != nil {
return fmt.Errorf("cannot unmarshal data from the original block: %s", err)
return fmt.Errorf("cannot unmarshal data from the original block: %w", err)
}
b1.Reset()
tbf.MustReadBlockAt(&b1, addr)
if err := b1.UnmarshalData(); err != nil {
return fmt.Errorf("cannot unmarshal data from tbf: %s", err)
return fmt.Errorf("cannot unmarshal data from tbf: %w", err)
}
if b1.RowsCount() != b.RowsCount() {
return fmt.Errorf("unexpected number of rows in tbf block; got %d; want %d", b1.RowsCount(), b.RowsCount())

View file

@ -50,7 +50,7 @@ const defaultStep = 5 * 60 * 1000
func FederateHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r *http.Request) error {
ct := currentTime()
if err := r.ParseForm(); err != nil {
return fmt.Errorf("cannot parse request form values: %s", err)
return fmt.Errorf("cannot parse request form values: %w", err)
}
matches := r.Form["match[]"]
if len(matches) == 0 {
@ -88,7 +88,7 @@ func FederateHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter,
}
rss, isPartial, err := netstorage.ProcessSearchQuery(at, sq, true, deadline)
if err != nil {
return fmt.Errorf("cannot fetch data for %q: %s", sq, err)
return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
}
if isPartial && getDenyPartialResponse(r) {
return fmt.Errorf("cannot return full response, since some of vmstorage nodes are unavailable")
@ -114,7 +114,7 @@ func FederateHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter,
err = <-doneCh
if err != nil {
return fmt.Errorf("error during data fetching: %s", err)
return fmt.Errorf("error during data fetching: %w", err)
}
federateDuration.UpdateDuration(startTime)
return nil
@ -126,7 +126,7 @@ var federateDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/fe
func ExportHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r *http.Request) error {
ct := currentTime()
if err := r.ParseForm(); err != nil {
return fmt.Errorf("cannot parse request form values: %s", err)
return fmt.Errorf("cannot parse request form values: %w", err)
}
matches := r.Form["match[]"]
if len(matches) == 0 {
@ -152,7 +152,7 @@ func ExportHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
end = start + defaultStep
}
if err := exportHandler(at, w, r, matches, start, end, format, maxRowsPerLine, deadline); err != nil {
return fmt.Errorf("error when exporting data for queries=%q on the time range (start=%d, end=%d): %s", matches, start, end, err)
return fmt.Errorf("error when exporting data for queries=%q on the time range (start=%d, end=%d): %w", matches, start, end, err)
}
exportDuration.UpdateDuration(startTime)
return nil
@ -213,7 +213,7 @@ func exportHandler(at *auth.Token, w http.ResponseWriter, r *http.Request, match
}
rss, isPartial, err := netstorage.ProcessSearchQuery(at, sq, true, deadline)
if err != nil {
return fmt.Errorf("cannot fetch data for %q: %s", sq, err)
return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
}
if isPartial && getDenyPartialResponse(r) {
rss.Cancel()
@ -242,7 +242,7 @@ func exportHandler(at *auth.Token, w http.ResponseWriter, r *http.Request, match
}
err = <-doneCh
if err != nil {
return fmt.Errorf("error during data fetching: %s", err)
return fmt.Errorf("error during data fetching: %w", err)
}
return nil
}
@ -252,7 +252,7 @@ func exportHandler(at *auth.Token, w http.ResponseWriter, r *http.Request, match
// See https://prometheus.io/docs/prometheus/latest/querying/api/#delete-series
func DeleteHandler(startTime time.Time, at *auth.Token, r *http.Request) error {
if err := r.ParseForm(); err != nil {
return fmt.Errorf("cannot parse request form values: %s", err)
return fmt.Errorf("cannot parse request form values: %w", err)
}
if r.FormValue("start") != "" || r.FormValue("end") != "" {
return fmt.Errorf("start and end aren't supported. Remove these args from the query in order to delete all the matching metrics")
@ -273,7 +273,7 @@ func DeleteHandler(startTime time.Time, at *auth.Token, r *http.Request) error {
}
deletedCount, err := netstorage.DeleteSeries(at, sq, deadline)
if err != nil {
return fmt.Errorf("cannot delete time series matching %q: %s", matches, err)
return fmt.Errorf("cannot delete time series matching %q: %w", matches, err)
}
if deletedCount > 0 {
// Reset rollup result cache on all the vmselect nodes,
@ -326,7 +326,7 @@ func LabelValuesHandler(startTime time.Time, at *auth.Token, labelName string, w
deadline := getDeadlineForQuery(r)
if err := r.ParseForm(); err != nil {
return fmt.Errorf("cannot parse form values: %s", err)
return fmt.Errorf("cannot parse form values: %w", err)
}
var labelValues []string
var isPartial bool
@ -334,7 +334,7 @@ func LabelValuesHandler(startTime time.Time, at *auth.Token, labelName string, w
var err error
labelValues, isPartial, err = netstorage.GetLabelValues(at, labelName, deadline)
if err != nil {
return fmt.Errorf(`cannot obtain label values for %q: %s`, labelName, err)
return fmt.Errorf(`cannot obtain label values for %q: %w`, labelName, err)
}
} else {
// Extended functionality that allows filtering by label filters and time range
@ -356,7 +356,7 @@ func LabelValuesHandler(startTime time.Time, at *auth.Token, labelName string, w
}
labelValues, isPartial, err = labelValuesWithMatches(at, labelName, matches, start, end, deadline)
if err != nil {
return fmt.Errorf("cannot obtain label values for %q, match[]=%q, start=%d, end=%d: %s", labelName, matches, start, end, err)
return fmt.Errorf("cannot obtain label values for %q, match[]=%q, start=%d, end=%d: %w", labelName, matches, start, end, err)
}
}
if isPartial && getDenyPartialResponse(r) {
@ -402,7 +402,7 @@ func labelValuesWithMatches(at *auth.Token, labelName string, matches []string,
}
rss, isPartial, err := netstorage.ProcessSearchQuery(at, sq, false, deadline)
if err != nil {
return nil, false, fmt.Errorf("cannot fetch data for %q: %s", sq, err)
return nil, false, fmt.Errorf("cannot fetch data for %q: %w", sq, err)
}
m := make(map[string]struct{})
@ -417,7 +417,7 @@ func labelValuesWithMatches(at *auth.Token, labelName string, matches []string,
mLock.Unlock()
})
if err != nil {
return nil, false, fmt.Errorf("error when data fetching: %s", err)
return nil, false, fmt.Errorf("error when data fetching: %w", err)
}
labelValues := make([]string, 0, len(m))
@ -435,7 +435,7 @@ func LabelsCountHandler(startTime time.Time, at *auth.Token, w http.ResponseWrit
deadline := getDeadlineForQuery(r)
labelEntries, isPartial, err := netstorage.GetLabelEntries(at, deadline)
if err != nil {
return fmt.Errorf(`cannot obtain label entries: %s`, err)
return fmt.Errorf(`cannot obtain label entries: %w`, err)
}
if isPartial && getDenyPartialResponse(r) {
return fmt.Errorf("cannot return full response, since some of vmstorage nodes are unavailable")
@ -456,14 +456,14 @@ const secsPerDay = 3600 * 24
func TSDBStatusHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r *http.Request) error {
deadline := getDeadlineForQuery(r)
if err := r.ParseForm(); err != nil {
return fmt.Errorf("cannot parse form values: %s", err)
return fmt.Errorf("cannot parse form values: %w", err)
}
date := fasttime.UnixDate()
dateStr := r.FormValue("date")
if len(dateStr) > 0 {
t, err := time.Parse("2006-01-02", dateStr)
if err != nil {
return fmt.Errorf("cannot parse `date` arg %q: %s", dateStr, err)
return fmt.Errorf("cannot parse `date` arg %q: %w", dateStr, err)
}
date = uint64(t.Unix()) / secsPerDay
}
@ -472,7 +472,7 @@ func TSDBStatusHandler(startTime time.Time, at *auth.Token, w http.ResponseWrite
if len(topNStr) > 0 {
n, err := strconv.Atoi(topNStr)
if err != nil {
return fmt.Errorf("cannot parse `topN` arg %q: %s", topNStr, err)
return fmt.Errorf("cannot parse `topN` arg %q: %w", topNStr, err)
}
if n <= 0 {
n = 1
@ -484,7 +484,7 @@ func TSDBStatusHandler(startTime time.Time, at *auth.Token, w http.ResponseWrite
}
status, isPartial, err := netstorage.GetTSDBStatusForDate(at, deadline, date, topN)
if err != nil {
return fmt.Errorf(`cannot obtain tsdb status for date=%d, topN=%d: %s`, date, topN, err)
return fmt.Errorf(`cannot obtain tsdb status for date=%d, topN=%d: %w`, date, topN, err)
}
if isPartial && getDenyPartialResponse(r) {
return fmt.Errorf("cannot return full response, since some of vmstorage nodes are unavailable")
@ -504,7 +504,7 @@ func LabelsHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
deadline := getDeadlineForQuery(r)
if err := r.ParseForm(); err != nil {
return fmt.Errorf("cannot parse form values: %s", err)
return fmt.Errorf("cannot parse form values: %w", err)
}
var labels []string
var isPartial bool
@ -512,7 +512,7 @@ func LabelsHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
var err error
labels, isPartial, err = netstorage.GetLabels(at, deadline)
if err != nil {
return fmt.Errorf("cannot obtain labels: %s", err)
return fmt.Errorf("cannot obtain labels: %w", err)
}
} else {
// Extended functionality that allows filtering by label filters and time range
@ -532,7 +532,7 @@ func LabelsHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
}
labels, isPartial, err = labelsWithMatches(at, matches, start, end, deadline)
if err != nil {
return fmt.Errorf("cannot obtain labels for match[]=%q, start=%d, end=%d: %s", matches, start, end, err)
return fmt.Errorf("cannot obtain labels for match[]=%q, start=%d, end=%d: %w", matches, start, end, err)
}
}
if isPartial && getDenyPartialResponse(r) {
@ -565,7 +565,7 @@ func labelsWithMatches(at *auth.Token, matches []string, start, end int64, deadl
}
rss, isPartial, err := netstorage.ProcessSearchQuery(at, sq, false, deadline)
if err != nil {
return nil, false, fmt.Errorf("cannot fetch data for %q: %s", sq, err)
return nil, false, fmt.Errorf("cannot fetch data for %q: %w", sq, err)
}
m := make(map[string]struct{})
@ -581,7 +581,7 @@ func labelsWithMatches(at *auth.Token, matches []string, start, end int64, deadl
mLock.Unlock()
})
if err != nil {
return nil, false, fmt.Errorf("error when data fetching: %s", err)
return nil, false, fmt.Errorf("error when data fetching: %w", err)
}
labels := make([]string, 0, len(m))
@ -599,7 +599,7 @@ func SeriesCountHandler(startTime time.Time, at *auth.Token, w http.ResponseWrit
deadline := getDeadlineForQuery(r)
n, isPartial, err := netstorage.GetSeriesCount(at, deadline)
if err != nil {
return fmt.Errorf("cannot obtain series count: %s", err)
return fmt.Errorf("cannot obtain series count: %w", err)
}
if isPartial && getDenyPartialResponse(r) {
return fmt.Errorf("cannot return full response, since some of vmstorage nodes are unavailable")
@ -620,7 +620,7 @@ func SeriesHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
ct := currentTime()
if err := r.ParseForm(); err != nil {
return fmt.Errorf("cannot parse form values: %s", err)
return fmt.Errorf("cannot parse form values: %w", err)
}
matches := r.Form["match[]"]
if len(matches) == 0 {
@ -657,7 +657,7 @@ func SeriesHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
}
rss, isPartial, err := netstorage.ProcessSearchQuery(at, sq, false, deadline)
if err != nil {
return fmt.Errorf("cannot fetch data for %q: %s", sq, err)
return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
}
if isPartial && getDenyPartialResponse(r) {
return fmt.Errorf("cannot return full response, since some of vmstorage nodes are unavailable")
@ -685,7 +685,7 @@ func SeriesHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
}
err = <-doneCh
if err != nil {
return fmt.Errorf("error during data fetching: %s", err)
return fmt.Errorf("error during data fetching: %w", err)
}
seriesDuration.UpdateDuration(startTime)
return nil
@ -732,17 +732,17 @@ func QueryHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
if childQuery, windowStr, offsetStr := promql.IsMetricSelectorWithRollup(query); childQuery != "" {
window, err := parsePositiveDuration(windowStr, step)
if err != nil {
return fmt.Errorf("cannot parse window: %s", err)
return fmt.Errorf("cannot parse window: %w", err)
}
offset, err := parseDuration(offsetStr, step)
if err != nil {
return fmt.Errorf("cannot parse offset: %s", err)
return fmt.Errorf("cannot parse offset: %w", err)
}
start -= offset
end := start
start = end - window
if err := exportHandler(at, w, r, []string{childQuery}, start, end, "promapi", 0, deadline); err != nil {
return fmt.Errorf("error when exporting data for query=%q on the time range (start=%d, end=%d): %s", childQuery, start, end, err)
return fmt.Errorf("error when exporting data for query=%q on the time range (start=%d, end=%d): %w", childQuery, start, end, err)
}
queryDuration.UpdateDuration(startTime)
return nil
@ -750,24 +750,24 @@ func QueryHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
if childQuery, windowStr, stepStr, offsetStr := promql.IsRollup(query); childQuery != "" {
newStep, err := parsePositiveDuration(stepStr, step)
if err != nil {
return fmt.Errorf("cannot parse step: %s", err)
return fmt.Errorf("cannot parse step: %w", err)
}
if newStep > 0 {
step = newStep
}
window, err := parsePositiveDuration(windowStr, step)
if err != nil {
return fmt.Errorf("cannot parse window: %s", err)
return fmt.Errorf("cannot parse window: %w", err)
}
offset, err := parseDuration(offsetStr, step)
if err != nil {
return fmt.Errorf("cannot parse offset: %s", err)
return fmt.Errorf("cannot parse offset: %w", err)
}
start -= offset
end := start
start = end - window
if err := queryRangeHandler(at, w, childQuery, start, end, step, r, ct); err != nil {
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %s", childQuery, start, end, step, err)
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %w", childQuery, start, end, step, err)
}
queryDuration.UpdateDuration(startTime)
return nil
@ -785,7 +785,7 @@ func QueryHandler(startTime time.Time, at *auth.Token, w http.ResponseWriter, r
}
result, err := promql.Exec(&ec, query, true)
if err != nil {
return fmt.Errorf("error when executing query=%q for (time=%d, step=%d): %s", query, start, step, err)
return fmt.Errorf("error when executing query=%q for (time=%d, step=%d): %w", query, start, step, err)
}
w.Header().Set("Content-Type", "application/json")
@ -833,7 +833,7 @@ func QueryRangeHandler(startTime time.Time, at *auth.Token, w http.ResponseWrite
return err
}
if err := queryRangeHandler(at, w, query, start, end, step, r, ct); err != nil {
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %s", query, start, end, step, err)
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %w", query, start, end, step, err)
}
queryRangeDuration.UpdateDuration(startTime)
return nil
@ -874,7 +874,7 @@ func queryRangeHandler(at *auth.Token, w http.ResponseWriter, query string, star
}
result, err := promql.Exec(&ec, query, false)
if err != nil {
return fmt.Errorf("cannot execute query: %s", err)
return fmt.Errorf("cannot execute query: %w", err)
}
queryOffset := getLatencyOffsetMilliseconds()
if ct-end < queryOffset {
@ -983,7 +983,7 @@ func getTime(r *http.Request, argKey string, defaultValue int64) (int64, error)
// Try parsing duration relative to the current time
d, err1 := time.ParseDuration(argValue)
if err1 != nil {
return 0, fmt.Errorf("cannot parse %q=%q: %s", argKey, argValue, err)
return 0, fmt.Errorf("cannot parse %q=%q: %w", argKey, argValue, err)
}
if d > 0 {
d = -d
@ -1025,7 +1025,7 @@ func getDuration(r *http.Request, argKey string, defaultValue int64) (int64, err
// Try parsing string format
d, err := time.ParseDuration(argValue)
if err != nil {
return 0, fmt.Errorf("cannot parse %q=%q: %s", argKey, argValue, err)
return 0, fmt.Errorf("cannot parse %q=%q: %w", argKey, argValue, err)
}
secs = d.Seconds()
}
@ -1087,7 +1087,7 @@ func getTagFilterssFromMatches(matches []string) ([][]storage.TagFilter, error)
for _, match := range matches {
tagFilters, err := promql.ParseMetricSelector(match)
if err != nil {
return nil, fmt.Errorf("cannot parse %q: %s", match, err)
return nil, fmt.Errorf("cannot parse %q: %w", match, err)
}
tagFilterss = append(tagFilterss, tagFilters)
}

View file

@ -119,7 +119,7 @@ func testIncrementalParallelAggr(iafc *incrementalAggrFuncContext, tssSrc, tssEx
wg.Wait()
tssActual := iafc.finalizeTimeseries()
if err := expectTimeseriesEqual(tssActual, tssExpected); err != nil {
return fmt.Errorf("%s; tssActual=%v, tssExpected=%v", err, tssActual, tssExpected)
return fmt.Errorf("%w; tssActual=%v, tssExpected=%v", err, tssActual, tssExpected)
}
return nil
}
@ -164,7 +164,7 @@ func expectTsEqual(actual, expected *timeseries) error {
return fmt.Errorf("unexpected timestamps; got %v; want %v", actual.Timestamps, expected.Timestamps)
}
if err := compareValues(actual.Values, expected.Values); err != nil {
return fmt.Errorf("%s; actual %v; expected %v", err, actual.Values, expected.Values)
return fmt.Errorf("%w; actual %v; expected %v", err, actual.Values, expected.Values)
}
return nil
}

View file

@ -166,14 +166,14 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
}
rv, err := evalRollupFunc(ec, "default_rollup", rollupDefault, e, re, nil)
if err != nil {
return nil, fmt.Errorf(`cannot evaluate %q: %s`, me.AppendString(nil), err)
return nil, fmt.Errorf(`cannot evaluate %q: %w`, me.AppendString(nil), err)
}
return rv, nil
}
if re, ok := e.(*metricsql.RollupExpr); ok {
rv, err := evalRollupFunc(ec, "default_rollup", rollupDefault, e, re, nil)
if err != nil {
return nil, fmt.Errorf(`cannot evaluate %q: %s`, re.AppendString(nil), err)
return nil, fmt.Errorf(`cannot evaluate %q: %w`, re.AppendString(nil), err)
}
return rv, nil
}
@ -195,7 +195,7 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
}
rv, err := tf(tfa)
if err != nil {
return nil, fmt.Errorf(`cannot evaluate %q: %s`, fe.AppendString(nil), err)
return nil, fmt.Errorf(`cannot evaluate %q: %w`, fe.AppendString(nil), err)
}
return rv, nil
}
@ -209,7 +209,7 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
}
rv, err := evalRollupFunc(ec, fe.Name, rf, e, re, nil)
if err != nil {
return nil, fmt.Errorf(`cannot evaluate %q: %s`, fe.AppendString(nil), err)
return nil, fmt.Errorf(`cannot evaluate %q: %w`, fe.AppendString(nil), err)
}
return rv, nil
}
@ -246,7 +246,7 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
}
rv, err := af(afa)
if err != nil {
return nil, fmt.Errorf(`cannot evaluate %q: %s`, ae.AppendString(nil), err)
return nil, fmt.Errorf(`cannot evaluate %q: %w`, ae.AppendString(nil), err)
}
return rv, nil
}
@ -270,7 +270,7 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
}
rv, err := bf(bfa)
if err != nil {
return nil, fmt.Errorf(`cannot evaluate %q: %s`, be.AppendString(nil), err)
return nil, fmt.Errorf(`cannot evaluate %q: %w`, be.AppendString(nil), err)
}
return rv, nil
}
@ -381,7 +381,7 @@ func evalRollupFuncArgs(ec *EvalConfig, fe *metricsql.FuncExpr) ([]interface{},
}
ts, err := evalExpr(ec, arg)
if err != nil {
return nil, nil, fmt.Errorf("cannot evaluate arg #%d for %q: %s", i+1, fe.AppendString(nil), err)
return nil, nil, fmt.Errorf("cannot evaluate arg #%d for %q: %w", i+1, fe.AppendString(nil), err)
}
args[i] = ts
}

View file

@ -285,7 +285,7 @@ func getRollupConfigs(name string, rf rollupFunc, expr metricsql.Expr, start, en
case "aggr_over_time":
aggrFuncNames, err := getRollupAggrFuncNames(expr)
if err != nil {
return nil, nil, fmt.Errorf("invalid args to %s: %s", expr.AppendString(nil), err)
return nil, nil, fmt.Errorf("invalid args to %s: %w", expr.AppendString(nil), err)
}
for _, aggrFuncName := range aggrFuncNames {
if rollupFuncsRemoveCounterResets[aggrFuncName] {

View file

@ -291,7 +291,7 @@ var (
var buf [8]byte
if _, err := rand.Read(buf[:]); err != nil {
// do not use logger.Panicf, since it isn't initialized yet.
panic(fmt.Errorf("FATAL: cannot read random data for rollupResultCacheKeyPrefix: %s", err))
panic(fmt.Errorf("FATAL: cannot read random data for rollupResultCacheKeyPrefix: %w", err))
}
return encoding.UnmarshalUint64(buf[:])
}()
@ -421,7 +421,7 @@ func (mi *rollupResultCacheMetainfo) Unmarshal(src []byte) error {
for i := 0; i < entriesLen; i++ {
tail, err := mi.entries[i].Unmarshal(src)
if err != nil {
return fmt.Errorf("cannot unmarshal entry #%d: %s", i, err)
return fmt.Errorf("cannot unmarshal entry #%d: %w", i, err)
}
src = tail
}

View file

@ -217,7 +217,7 @@ func (ts *timeseries) unmarshalFastNoTimestamps(src []byte) ([]byte, error) {
tail, err := unmarshalMetricNameFast(&ts.MetricName, src)
if err != nil {
return tail, fmt.Errorf("cannot unmarshal MetricName: %s", err)
return tail, fmt.Errorf("cannot unmarshal MetricName: %w", err)
}
src = tail
@ -275,7 +275,7 @@ func unmarshalMetricNameFast(mn *storage.MetricName, src []byte) ([]byte, error)
tail, metricGroup, err := unmarshalBytesFast(src)
if err != nil {
return tail, fmt.Errorf("cannot unmarshal MetricGroup: %s", err)
return tail, fmt.Errorf("cannot unmarshal MetricGroup: %w", err)
}
src = tail
mn.MetricGroup = metricGroup[:len(metricGroup):len(metricGroup)]
@ -292,13 +292,13 @@ func unmarshalMetricNameFast(mn *storage.MetricName, src []byte) ([]byte, error)
for i := range mn.Tags {
tail, key, err := unmarshalBytesFast(src)
if err != nil {
return tail, fmt.Errorf("cannot unmarshal key for tag[%d]: %s", i, err)
return tail, fmt.Errorf("cannot unmarshal key for tag[%d]: %w", i, err)
}
src = tail
tail, value, err := unmarshalBytesFast(src)
if err != nil {
return tail, fmt.Errorf("cannot unmarshal value for tag[%d]: %s", i, err)
return tail, fmt.Errorf("cannot unmarshal value for tag[%d]: %w", i, err)
}
src = tail

View file

@ -414,7 +414,7 @@ func transformHistogramShare(tfa *transformFuncArg) ([]*timeseries, error) {
}
les, err := getScalar(args[0], 0)
if err != nil {
return nil, fmt.Errorf("cannot parse le: %s", err)
return nil, fmt.Errorf("cannot parse le: %w", err)
}
// Convert buckets with `vmrange` labels to buckets with `le` labels.
@ -425,7 +425,7 @@ func transformHistogramShare(tfa *transformFuncArg) ([]*timeseries, error) {
if len(args) > 2 {
s, err := getString(args[2], 2)
if err != nil {
return nil, fmt.Errorf("cannot parse boundsLabel (arg #3): %s", err)
return nil, fmt.Errorf("cannot parse boundsLabel (arg #3): %w", err)
}
boundsLabel = s
}
@ -513,7 +513,7 @@ func transformHistogramQuantile(tfa *transformFuncArg) ([]*timeseries, error) {
}
phis, err := getScalar(args[0], 0)
if err != nil {
return nil, fmt.Errorf("cannot parse phi: %s", err)
return nil, fmt.Errorf("cannot parse phi: %w", err)
}
// Convert buckets with `vmrange` labels to buckets with `le` labels.
@ -524,7 +524,7 @@ func transformHistogramQuantile(tfa *transformFuncArg) ([]*timeseries, error) {
if len(args) > 2 {
s, err := getString(args[2], 2)
if err != nil {
return nil, fmt.Errorf("cannot parse boundsLabel (arg #3): %s", err)
return nil, fmt.Errorf("cannot parse boundsLabel (arg #3): %w", err)
}
boundsLabel = s
}
@ -1034,7 +1034,7 @@ func transformLabelMap(tfa *transformFuncArg) ([]*timeseries, error) {
}
label, err := getString(args[1], 1)
if err != nil {
return nil, fmt.Errorf("cannot read label name: %s", err)
return nil, fmt.Errorf("cannot read label name: %w", err)
}
srcValues, dstValues, err := getStringPairs(args[2:])
if err != nil {
@ -1179,7 +1179,7 @@ func transformLabelTransform(tfa *transformFuncArg) ([]*timeseries, error) {
r, err := metricsql.CompileRegexp(regex)
if err != nil {
return nil, fmt.Errorf(`cannot compile regex %q: %s`, regex, err)
return nil, fmt.Errorf(`cannot compile regex %q: %w`, regex, err)
}
return labelReplace(args[0], label, r, label, replacement)
}
@ -1208,7 +1208,7 @@ func transformLabelReplace(tfa *transformFuncArg) ([]*timeseries, error) {
r, err := metricsql.CompileRegexpAnchored(regex)
if err != nil {
return nil, fmt.Errorf(`cannot compile regex %q: %s`, regex, err)
return nil, fmt.Errorf(`cannot compile regex %q: %w`, regex, err)
}
return labelReplace(args[0], srcLabel, r, dstLabel, replacement)
}
@ -1238,7 +1238,7 @@ func transformLabelValue(tfa *transformFuncArg) ([]*timeseries, error) {
}
labelName, err := getString(args[1], 1)
if err != nil {
return nil, fmt.Errorf("cannot get label name: %s", err)
return nil, fmt.Errorf("cannot get label name: %w", err)
}
rvs := args[0]
for _, ts := range rvs {
@ -1265,15 +1265,15 @@ func transformLabelMatch(tfa *transformFuncArg) ([]*timeseries, error) {
}
labelName, err := getString(args[1], 1)
if err != nil {
return nil, fmt.Errorf("cannot get label name: %s", err)
return nil, fmt.Errorf("cannot get label name: %w", err)
}
labelRe, err := getString(args[2], 2)
if err != nil {
return nil, fmt.Errorf("cannot get regexp: %s", err)
return nil, fmt.Errorf("cannot get regexp: %w", err)
}
r, err := metricsql.CompileRegexpAnchored(labelRe)
if err != nil {
return nil, fmt.Errorf(`cannot compile regexp %q: %s`, labelRe, err)
return nil, fmt.Errorf(`cannot compile regexp %q: %w`, labelRe, err)
}
tss := args[0]
rvs := tss[:0]
@ -1293,15 +1293,15 @@ func transformLabelMismatch(tfa *transformFuncArg) ([]*timeseries, error) {
}
labelName, err := getString(args[1], 1)
if err != nil {
return nil, fmt.Errorf("cannot get label name: %s", err)
return nil, fmt.Errorf("cannot get label name: %w", err)
}
labelRe, err := getString(args[2], 2)
if err != nil {
return nil, fmt.Errorf("cannot get regexp: %s", err)
return nil, fmt.Errorf("cannot get regexp: %w", err)
}
r, err := metricsql.CompileRegexpAnchored(labelRe)
if err != nil {
return nil, fmt.Errorf(`cannot compile regexp %q: %s`, labelRe, err)
return nil, fmt.Errorf(`cannot compile regexp %q: %w`, labelRe, err)
}
tss := args[0]
rvs := tss[:0]
@ -1401,7 +1401,7 @@ func newTransformFuncSortByLabel(isDesc bool) transformFunc {
}
label, err := getString(args[1], 1)
if err != nil {
return nil, fmt.Errorf("cannot parse label name for sorting: %s", err)
return nil, fmt.Errorf("cannot parse label name for sorting: %w", err)
}
rvs := args[0]
sort.SliceStable(rvs, func(i, j int) bool {

View file

@ -126,7 +126,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request, strg *storage.Storag
w.Header().Set("Content-Type", "application/json")
snapshotPath, err := strg.CreateSnapshot()
if err != nil {
err = fmt.Errorf("cannot create snapshot: %s", err)
err = fmt.Errorf("cannot create snapshot: %w", err)
jsonResponseError(w, err)
return true
}
@ -136,7 +136,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request, strg *storage.Storag
w.Header().Set("Content-Type", "application/json")
snapshots, err := strg.ListSnapshots()
if err != nil {
err = fmt.Errorf("cannot list snapshots: %s", err)
err = fmt.Errorf("cannot list snapshots: %w", err)
jsonResponseError(w, err)
return true
}
@ -153,7 +153,7 @@ func requestHandler(w http.ResponseWriter, r *http.Request, strg *storage.Storag
w.Header().Set("Content-Type", "application/json")
snapshotName := r.FormValue("snapshot")
if err := strg.DeleteSnapshot(snapshotName); err != nil {
err = fmt.Errorf("cannot delete snapshot %q: %s", snapshotName, err)
err = fmt.Errorf("cannot delete snapshot %q: %w", snapshotName, err)
jsonResponseError(w, err)
return true
}
@ -163,13 +163,13 @@ func requestHandler(w http.ResponseWriter, r *http.Request, strg *storage.Storag
w.Header().Set("Content-Type", "application/json")
snapshots, err := strg.ListSnapshots()
if err != nil {
err = fmt.Errorf("cannot list snapshots: %s", err)
err = fmt.Errorf("cannot list snapshots: %w", err)
jsonResponseError(w, err)
return true
}
for _, snapshotName := range snapshots {
if err := strg.DeleteSnapshot(snapshotName); err != nil {
err = fmt.Errorf("cannot delete snapshot %q: %s", snapshotName, err)
err = fmt.Errorf("cannot delete snapshot %q: %w", snapshotName, err)
jsonResponseError(w, err)
return true
}

View file

@ -87,14 +87,14 @@ func (cm *connsMap) CloseAll() {
func NewServer(vminsertAddr, vmselectAddr string, storage *storage.Storage) (*Server, error) {
vminsertLN, err := netutil.NewTCPListener("vminsert", vminsertAddr)
if err != nil {
return nil, fmt.Errorf("unable to listen vminsertAddr %s: %s", vminsertAddr, err)
return nil, fmt.Errorf("unable to listen vminsertAddr %s: %w", vminsertAddr, err)
}
vmselectLN, err := netutil.NewTCPListener("vmselect", vmselectAddr)
if err != nil {
return nil, fmt.Errorf("unable to listen vmselectAddr %s: %s", vmselectAddr, err)
return nil, fmt.Errorf("unable to listen vmselectAddr %s: %w", vmselectAddr, err)
}
if err := encoding.CheckPrecisionBits(uint8(*precisionBits)); err != nil {
return nil, fmt.Errorf("invalid -precisionBits: %s", err)
return nil, fmt.Errorf("invalid -precisionBits: %w", err)
}
s := &Server{
storage: storage,
@ -302,7 +302,7 @@ func (s *Server) processVMInsertConn(bc *handshake.BufferedConn) error {
// Remote end gracefully closed the connection.
return nil
}
return fmt.Errorf("cannot read packet size: %s", err)
return fmt.Errorf("cannot read packet size: %w", err)
}
packetSize := encoding.UnmarshalUint64(sizeBuf)
if packetSize > consts.MaxInsertPacketSize {
@ -310,19 +310,19 @@ func (s *Server) processVMInsertConn(bc *handshake.BufferedConn) error {
}
buf = bytesutil.Resize(buf, int(packetSize))
if n, err := io.ReadFull(bc, buf); err != nil {
return fmt.Errorf("cannot read packet with size %d: %s; read only %d bytes", packetSize, err, n)
return fmt.Errorf("cannot read packet with size %d: %w; read only %d bytes", packetSize, err, n)
}
// Send `ack` to vminsert that we recevied the packet.
deadline := time.Now().Add(5 * time.Second)
if err := bc.SetWriteDeadline(deadline); err != nil {
return fmt.Errorf("cannot set write deadline for sending `ack` to vminsert: %s", err)
return fmt.Errorf("cannot set write deadline for sending `ack` to vminsert: %w", err)
}
sizeBuf[0] = 1
if _, err := bc.Write(sizeBuf[:1]); err != nil {
return fmt.Errorf("cannot send `ack` to vminsert: %s", err)
return fmt.Errorf("cannot send `ack` to vminsert: %w", err)
}
if err := bc.Flush(); err != nil {
return fmt.Errorf("cannot flush `ack` to vminsert: %s", err)
return fmt.Errorf("cannot flush `ack` to vminsert: %w", err)
}
vminsertPacketsRead.Inc()
@ -339,7 +339,7 @@ func (s *Server) processVMInsertConn(bc *handshake.BufferedConn) error {
var err error
tail, err = mr.Unmarshal(tail)
if err != nil {
return fmt.Errorf("cannot unmarshal MetricRow: %s", err)
return fmt.Errorf("cannot unmarshal MetricRow: %w", err)
}
if len(mrs) >= 10000 {
// Store the collected mrs in order to reduce memory usage
@ -347,14 +347,14 @@ func (s *Server) processVMInsertConn(bc *handshake.BufferedConn) error {
// This should help with https://github.com/VictoriaMetrics/VictoriaMetrics/issues/490
vminsertMetricsRead.Add(len(mrs))
if err := s.storage.AddRows(mrs, uint8(*precisionBits)); err != nil {
return fmt.Errorf("cannot store metrics: %s", err)
return fmt.Errorf("cannot store metrics: %w", err)
}
mrs = mrs[:0]
}
}
vminsertMetricsRead.Add(len(mrs))
if err := s.storage.AddRows(mrs, uint8(*precisionBits)); err != nil {
return fmt.Errorf("cannot store metrics: %s", err)
return fmt.Errorf("cannot store metrics: %w", err)
}
}
}
@ -375,10 +375,10 @@ func (s *Server) processVMSelectConn(bc *handshake.BufferedConn) error {
// Remote client gracefully closed the connection.
return nil
}
return fmt.Errorf("cannot process vmselect request: %s", err)
return fmt.Errorf("cannot process vmselect request: %w", err)
}
if err := bc.Flush(); err != nil {
return fmt.Errorf("cannot flush compressed buffers: %s", err)
return fmt.Errorf("cannot flush compressed buffers: %w", err)
}
}
}
@ -400,7 +400,7 @@ func (ctx *vmselectRequestCtx) readUint32() (uint32, error) {
if err == io.EOF {
return 0, err
}
return 0, fmt.Errorf("cannot read uint32: %s", err)
return 0, fmt.Errorf("cannot read uint32: %w", err)
}
n := encoding.UnmarshalUint32(ctx.sizeBuf)
return n, nil
@ -412,7 +412,7 @@ func (ctx *vmselectRequestCtx) readDataBufBytes(maxDataSize int) error {
if err == io.EOF {
return err
}
return fmt.Errorf("cannot read data size: %s", err)
return fmt.Errorf("cannot read data size: %w", err)
}
dataSize := encoding.UnmarshalUint64(ctx.sizeBuf)
if dataSize > uint64(maxDataSize) {
@ -423,7 +423,7 @@ func (ctx *vmselectRequestCtx) readDataBufBytes(maxDataSize int) error {
return nil
}
if n, err := io.ReadFull(ctx.bc, ctx.dataBuf); err != nil {
return fmt.Errorf("cannot read data with size %d: %s; read only %d bytes", dataSize, err, n)
return fmt.Errorf("cannot read data with size %d: %w; read only %d bytes", dataSize, err, n)
}
return nil
}
@ -434,7 +434,7 @@ func (ctx *vmselectRequestCtx) readBool() (bool, error) {
if err == io.EOF {
return false, err
}
return false, fmt.Errorf("cannot read bool: %s", err)
return false, fmt.Errorf("cannot read bool: %w", err)
}
v := ctx.dataBuf[0] != 0
return v, nil
@ -442,13 +442,13 @@ func (ctx *vmselectRequestCtx) readBool() (bool, error) {
func (ctx *vmselectRequestCtx) writeDataBufBytes() error {
if err := ctx.writeUint64(uint64(len(ctx.dataBuf))); err != nil {
return fmt.Errorf("cannot write data size: %s", err)
return fmt.Errorf("cannot write data size: %w", err)
}
if len(ctx.dataBuf) == 0 {
return nil
}
if _, err := ctx.bc.Write(ctx.dataBuf); err != nil {
return fmt.Errorf("cannot write data with size %d: %s", len(ctx.dataBuf), err)
return fmt.Errorf("cannot write data with size %d: %w", len(ctx.dataBuf), err)
}
return nil
}
@ -463,7 +463,7 @@ func (ctx *vmselectRequestCtx) writeErrorMessage(err error) error {
errMsg = errMsg[:maxErrorMessageSize]
}
if err := ctx.writeString(errMsg); err != nil {
return fmt.Errorf("cannot send error message %q to client: %s", errMsg, err)
return fmt.Errorf("cannot send error message %q to client: %w", errMsg, err)
}
return nil
}
@ -476,7 +476,7 @@ func (ctx *vmselectRequestCtx) writeString(s string) error {
func (ctx *vmselectRequestCtx) writeUint64(n uint64) error {
ctx.sizeBuf = encoding.MarshalUint64(ctx.sizeBuf[:0], n)
if _, err := ctx.bc.Write(ctx.sizeBuf); err != nil {
return fmt.Errorf("cannot write uint64 %d: %s", n, err)
return fmt.Errorf("cannot write uint64 %d: %w", n, err)
}
return nil
}
@ -494,12 +494,12 @@ func (s *Server) processVMSelectRequest(ctx *vmselectRequestCtx) error {
// Remote client gracefully closed the connection.
return err
}
return fmt.Errorf("cannot read rpcName: %s", err)
return fmt.Errorf("cannot read rpcName: %w", err)
}
// Limit the time required for reading request args.
if err := ctx.bc.SetReadDeadline(time.Now().Add(5 * time.Second)); err != nil {
return fmt.Errorf("cannot set read deadline for reading request args: %s", err)
return fmt.Errorf("cannot set read deadline for reading request args: %w", err)
}
defer func() {
_ = ctx.bc.SetReadDeadline(zeroTime)
@ -532,11 +532,11 @@ func (s *Server) processVMSelectDeleteMetrics(ctx *vmselectRequestCtx) error {
// Read request
if err := ctx.readDataBufBytes(maxTagFiltersSize); err != nil {
return fmt.Errorf("cannot read labelName: %s", err)
return fmt.Errorf("cannot read labelName: %w", err)
}
tail, err := ctx.sq.Unmarshal(ctx.dataBuf)
if err != nil {
return fmt.Errorf("cannot unmarshal SearchQuery: %s", err)
return fmt.Errorf("cannot unmarshal SearchQuery: %w", err)
}
if len(tail) > 0 {
return fmt.Errorf("unexpected non-zero tail left after unmarshaling SearchQuery: (len=%d) %q", len(tail), tail)
@ -555,11 +555,11 @@ func (s *Server) processVMSelectDeleteMetrics(ctx *vmselectRequestCtx) error {
// Send an empty error message to vmselect.
if err := ctx.writeString(""); err != nil {
return fmt.Errorf("cannot send empty error message: %s", err)
return fmt.Errorf("cannot send empty error message: %w", err)
}
// Send deletedCount to vmselect.
if err := ctx.writeUint64(uint64(deletedCount)); err != nil {
return fmt.Errorf("cannot send deletedCount=%d: %s", deletedCount, err)
return fmt.Errorf("cannot send deletedCount=%d: %w", deletedCount, err)
}
return nil
}
@ -570,11 +570,11 @@ func (s *Server) processVMSelectLabels(ctx *vmselectRequestCtx) error {
// Read request
accountID, err := ctx.readUint32()
if err != nil {
return fmt.Errorf("cannot read accountID: %s", err)
return fmt.Errorf("cannot read accountID: %w", err)
}
projectID, err := ctx.readUint32()
if err != nil {
return fmt.Errorf("cannot read projectID: %s", err)
return fmt.Errorf("cannot read projectID: %w", err)
}
// Search for tag keys
@ -585,7 +585,7 @@ func (s *Server) processVMSelectLabels(ctx *vmselectRequestCtx) error {
// Send an empty error message to vmselect.
if err := ctx.writeString(""); err != nil {
return fmt.Errorf("cannot send empty error message: %s", err)
return fmt.Errorf("cannot send empty error message: %w", err)
}
// Send labels to vmselect
@ -595,7 +595,7 @@ func (s *Server) processVMSelectLabels(ctx *vmselectRequestCtx) error {
label = "__name__"
}
if err := ctx.writeString(label); err != nil {
return fmt.Errorf("cannot write label %q: %s", label, err)
return fmt.Errorf("cannot write label %q: %w", label, err)
}
}
@ -614,14 +614,14 @@ func (s *Server) processVMSelectLabelValues(ctx *vmselectRequestCtx) error {
// Read request
accountID, err := ctx.readUint32()
if err != nil {
return fmt.Errorf("cannot read accountID: %s", err)
return fmt.Errorf("cannot read accountID: %w", err)
}
projectID, err := ctx.readUint32()
if err != nil {
return fmt.Errorf("cannot read projectID: %s", err)
return fmt.Errorf("cannot read projectID: %w", err)
}
if err := ctx.readDataBufBytes(maxLabelValueSize); err != nil {
return fmt.Errorf("cannot read labelName: %s", err)
return fmt.Errorf("cannot read labelName: %w", err)
}
labelName := ctx.dataBuf
@ -633,7 +633,7 @@ func (s *Server) processVMSelectLabelValues(ctx *vmselectRequestCtx) error {
// Send an empty error message to vmselect.
if err := ctx.writeString(""); err != nil {
return fmt.Errorf("cannot send empty error message: %s", err)
return fmt.Errorf("cannot send empty error message: %w", err)
}
return writeLabelValues(ctx, labelValues)
@ -646,7 +646,7 @@ func writeLabelValues(ctx *vmselectRequestCtx, labelValues []string) error {
continue
}
if err := ctx.writeString(labelValue); err != nil {
return fmt.Errorf("cannot write labelValue %q: %s", labelValue, err)
return fmt.Errorf("cannot write labelValue %q: %w", labelValue, err)
}
}
// Send 'end of label values' marker
@ -662,11 +662,11 @@ func (s *Server) processVMSelectLabelEntries(ctx *vmselectRequestCtx) error {
// Read request
accountID, err := ctx.readUint32()
if err != nil {
return fmt.Errorf("cannot read accountID: %s", err)
return fmt.Errorf("cannot read accountID: %w", err)
}
projectID, err := ctx.readUint32()
if err != nil {
return fmt.Errorf("cannot read projectID: %s", err)
return fmt.Errorf("cannot read projectID: %w", err)
}
// Perform the request
@ -677,7 +677,7 @@ func (s *Server) processVMSelectLabelEntries(ctx *vmselectRequestCtx) error {
// Send an empty error message to vmselect.
if err := ctx.writeString(""); err != nil {
return fmt.Errorf("cannot send empty error message: %s", err)
return fmt.Errorf("cannot send empty error message: %w", err)
}
// Send labelEntries to vmselect
@ -689,10 +689,10 @@ func (s *Server) processVMSelectLabelEntries(ctx *vmselectRequestCtx) error {
label = "__name__"
}
if err := ctx.writeString(label); err != nil {
return fmt.Errorf("cannot write label %q: %s", label, err)
return fmt.Errorf("cannot write label %q: %w", label, err)
}
if err := writeLabelValues(ctx, e.Values); err != nil {
return fmt.Errorf("cannot write label values for %q: %s", label, err)
return fmt.Errorf("cannot write label values for %q: %w", label, err)
}
}
@ -709,11 +709,11 @@ func (s *Server) processVMSelectSeriesCount(ctx *vmselectRequestCtx) error {
// Read request
accountID, err := ctx.readUint32()
if err != nil {
return fmt.Errorf("cannot read accountID: %s", err)
return fmt.Errorf("cannot read accountID: %w", err)
}
projectID, err := ctx.readUint32()
if err != nil {
return fmt.Errorf("cannot read projectID: %s", err)
return fmt.Errorf("cannot read projectID: %w", err)
}
// Execute the request
@ -724,12 +724,12 @@ func (s *Server) processVMSelectSeriesCount(ctx *vmselectRequestCtx) error {
// Send an empty error message to vmselect.
if err := ctx.writeString(""); err != nil {
return fmt.Errorf("cannot send empty error message: %s", err)
return fmt.Errorf("cannot send empty error message: %w", err)
}
// Send series count to vmselect.
if err := ctx.writeUint64(n); err != nil {
return fmt.Errorf("cannot write series count to vmselect: %s", err)
return fmt.Errorf("cannot write series count to vmselect: %w", err)
}
return nil
}
@ -740,19 +740,19 @@ func (s *Server) processVMSelectTSDBStatus(ctx *vmselectRequestCtx) error {
// Read request
accountID, err := ctx.readUint32()
if err != nil {
return fmt.Errorf("cannot read accountID: %s", err)
return fmt.Errorf("cannot read accountID: %w", err)
}
projectID, err := ctx.readUint32()
if err != nil {
return fmt.Errorf("cannot read projectID: %s", err)
return fmt.Errorf("cannot read projectID: %w", err)
}
date, err := ctx.readUint32()
if err != nil {
return fmt.Errorf("cannot read date: %s", err)
return fmt.Errorf("cannot read date: %w", err)
}
topN, err := ctx.readUint32()
if err != nil {
return fmt.Errorf("cannot read topN: %s", err)
return fmt.Errorf("cannot read topN: %w", err)
}
// Execute the request
@ -763,32 +763,32 @@ func (s *Server) processVMSelectTSDBStatus(ctx *vmselectRequestCtx) error {
// Send an empty error message to vmselect.
if err := ctx.writeString(""); err != nil {
return fmt.Errorf("cannot send empty error message: %s", err)
return fmt.Errorf("cannot send empty error message: %w", err)
}
// Send status to vmselect.
if err := writeTopHeapEntries(ctx, status.SeriesCountByMetricName); err != nil {
return fmt.Errorf("cannot write seriesCountByMetricName to vmselect: %s", err)
return fmt.Errorf("cannot write seriesCountByMetricName to vmselect: %w", err)
}
if err := writeTopHeapEntries(ctx, status.LabelValueCountByLabelName); err != nil {
return fmt.Errorf("cannot write labelValueCountByLabelName to vmselect: %s", err)
return fmt.Errorf("cannot write labelValueCountByLabelName to vmselect: %w", err)
}
if err := writeTopHeapEntries(ctx, status.SeriesCountByLabelValuePair); err != nil {
return fmt.Errorf("cannot write seriesCountByLabelValuePair to vmselect: %s", err)
return fmt.Errorf("cannot write seriesCountByLabelValuePair to vmselect: %w", err)
}
return nil
}
func writeTopHeapEntries(ctx *vmselectRequestCtx, a []storage.TopHeapEntry) error {
if err := ctx.writeUint64(uint64(len(a))); err != nil {
return fmt.Errorf("cannot write topHeapEntries size: %s", err)
return fmt.Errorf("cannot write topHeapEntries size: %w", err)
}
for _, e := range a {
if err := ctx.writeString(e.Name); err != nil {
return fmt.Errorf("cannot write topHeapEntry name: %s", err)
return fmt.Errorf("cannot write topHeapEntry name: %w", err)
}
if err := ctx.writeUint64(e.Count); err != nil {
return fmt.Errorf("cannot write topHeapEntry count: %s", err)
return fmt.Errorf("cannot write topHeapEntry count: %w", err)
}
}
return nil
@ -802,18 +802,18 @@ func (s *Server) processVMSelectSearchQuery(ctx *vmselectRequestCtx) error {
// Read search query.
if err := ctx.readDataBufBytes(maxSearchQuerySize); err != nil {
return fmt.Errorf("cannot read searchQuery: %s", err)
return fmt.Errorf("cannot read searchQuery: %w", err)
}
tail, err := ctx.sq.Unmarshal(ctx.dataBuf)
if err != nil {
return fmt.Errorf("cannot unmarshal SearchQuery: %s", err)
return fmt.Errorf("cannot unmarshal SearchQuery: %w", err)
}
if len(tail) > 0 {
return fmt.Errorf("unexpected non-zero tail left after unmarshaling SearchQuery: (len=%d) %q", len(tail), tail)
}
fetchData, err := ctx.readBool()
if err != nil {
return fmt.Errorf("cannot read `fetchData` bool: %s", err)
return fmt.Errorf("cannot read `fetchData` bool: %w", err)
}
// Setup search.
@ -832,7 +832,7 @@ func (s *Server) processVMSelectSearchQuery(ctx *vmselectRequestCtx) error {
// Send empty error message to vmselect.
if err := ctx.writeString(""); err != nil {
return fmt.Errorf("cannot send empty error message: %s", err)
return fmt.Errorf("cannot send empty error message: %w", err)
}
// Send found blocks to vmselect.
@ -845,11 +845,11 @@ func (s *Server) processVMSelectSearchQuery(ctx *vmselectRequestCtx) error {
ctx.dataBuf = ctx.mb.Marshal(ctx.dataBuf[:0])
if err := ctx.writeDataBufBytes(); err != nil {
return fmt.Errorf("cannot send MetricBlock: %s", err)
return fmt.Errorf("cannot send MetricBlock: %w", err)
}
}
if err := ctx.sr.Error(); err != nil {
return fmt.Errorf("search error: %s", err)
return fmt.Errorf("search error: %w", err)
}
// Send 'end of response' marker
@ -878,7 +878,7 @@ func (ctx *vmselectRequestCtx) setupTfss() error {
for i := range tagFilters {
tf := &tagFilters[i]
if err := tfs.Add(tf.Key, tf.Value, tf.IsNegative, tf.IsRegexp); err != nil {
return fmt.Errorf("cannot parse tag filter %s: %s", tf, err)
return fmt.Errorf("cannot parse tag filter %s: %w", tf, err)
}
}
tfss = append(tfss, tfs)

View file

@ -21,13 +21,13 @@ func NewToken(authToken string) (*Token, error) {
var at Token
accountID, err := strconv.Atoi(tmp[0])
if err != nil {
return nil, fmt.Errorf("cannot parse accountID from %q: %s", tmp[0], err)
return nil, fmt.Errorf("cannot parse accountID from %q: %w", tmp[0], err)
}
at.AccountID = uint32(accountID)
if len(tmp) > 1 {
projectID, err := strconv.Atoi(tmp[1])
if err != nil {
return nil, fmt.Errorf("cannot parse projectID from %q: %s", tmp[1], err)
return nil, fmt.Errorf("cannot parse projectID from %q: %w", tmp[1], err)
}
at.ProjectID = uint32(projectID)
}

View file

@ -55,13 +55,13 @@ func (b *Backup) Run() error {
}
if err := dst.DeleteFile(fscommon.BackupCompleteFilename); err != nil {
return fmt.Errorf("cannot delete `backup complete` file at %s: %s", dst, err)
return fmt.Errorf("cannot delete `backup complete` file at %s: %w", dst, err)
}
if err := runBackup(src, dst, origin, concurrency); err != nil {
return err
}
if err := dst.CreateFile(fscommon.BackupCompleteFilename, []byte("ok")); err != nil {
return fmt.Errorf("cannot create `backup complete` file at %s: %s", dst, err)
return fmt.Errorf("cannot create `backup complete` file at %s: %w", dst, err)
}
return nil
}
@ -74,17 +74,17 @@ func runBackup(src *fslocal.FS, dst common.RemoteFS, origin common.OriginFS, con
logger.Infof("obtaining list of parts at %s", src)
srcParts, err := src.ListParts()
if err != nil {
return fmt.Errorf("cannot list src parts: %s", err)
return fmt.Errorf("cannot list src parts: %w", err)
}
logger.Infof("obtaining list of parts at %s", dst)
dstParts, err := dst.ListParts()
if err != nil {
return fmt.Errorf("cannot list dst parts: %s", err)
return fmt.Errorf("cannot list dst parts: %w", err)
}
logger.Infof("obtaining list of parts at %s", origin)
originParts, err := origin.ListParts()
if err != nil {
return fmt.Errorf("cannot list origin parts: %s", err)
return fmt.Errorf("cannot list origin parts: %w", err)
}
backupSize := getPartsSize(srcParts)
@ -97,7 +97,7 @@ func runBackup(src *fslocal.FS, dst common.RemoteFS, origin common.OriginFS, con
err = runParallel(concurrency, partsToDelete, func(p common.Part) error {
logger.Infof("deleting %s from %s", &p, dst)
if err := dst.DeletePart(p); err != nil {
return fmt.Errorf("cannot delete %s from %s: %s", &p, dst, err)
return fmt.Errorf("cannot delete %s from %s: %w", &p, dst, err)
}
atomic.AddUint64(&deletedParts, 1)
return nil
@ -109,7 +109,7 @@ func runBackup(src *fslocal.FS, dst common.RemoteFS, origin common.OriginFS, con
return err
}
if err := dst.RemoveEmptyDirs(); err != nil {
return fmt.Errorf("cannot remove empty directories at %s: %s", dst, err)
return fmt.Errorf("cannot remove empty directories at %s: %w", dst, err)
}
}
@ -122,7 +122,7 @@ func runBackup(src *fslocal.FS, dst common.RemoteFS, origin common.OriginFS, con
err = runParallel(concurrency, originCopyParts, func(p common.Part) error {
logger.Infof("server-side copying %s from %s to %s", &p, origin, dst)
if err := dst.CopyPart(origin, p); err != nil {
return fmt.Errorf("cannot copy %s from %s to %s: %s", &p, origin, dst, err)
return fmt.Errorf("cannot copy %s from %s to %s: %w", &p, origin, dst, err)
}
atomic.AddUint64(&copiedParts, 1)
return nil
@ -144,17 +144,17 @@ func runBackup(src *fslocal.FS, dst common.RemoteFS, origin common.OriginFS, con
logger.Infof("uploading %s from %s to %s", &p, src, dst)
rc, err := src.NewReadCloser(p)
if err != nil {
return fmt.Errorf("cannot create reader for %s from %s: %s", &p, src, err)
return fmt.Errorf("cannot create reader for %s from %s: %w", &p, src, err)
}
sr := &statReader{
r: rc,
bytesRead: &bytesUploaded,
}
if err := dst.UploadPart(p, sr); err != nil {
return fmt.Errorf("cannot upload %s to %s: %s", &p, dst, err)
return fmt.Errorf("cannot upload %s to %s: %w", &p, dst, err)
}
if err = rc.Close(); err != nil {
return fmt.Errorf("cannot close reader for %s from %s: %s", &p, src, err)
return fmt.Errorf("cannot close reader for %s from %s: %w", &p, src, err)
}
return nil
}, func(elapsed time.Duration) {

View file

@ -43,11 +43,11 @@ func (r *Restore) Run() error {
// Make sure VictoriaMetrics doesn't run during the restore process.
if err := fs.MkdirAllIfNotExist(r.Dst.Dir); err != nil {
return fmt.Errorf("cannot create dir %q: %s", r.Dst.Dir, err)
return fmt.Errorf("cannot create dir %q: %w", r.Dst.Dir, err)
}
flockF, err := fs.CreateFlockFile(r.Dst.Dir)
if err != nil {
return fmt.Errorf("cannot create lock file in %q; make sure VictoriaMetrics doesn't use the dir; error: %s", r.Dst.Dir, err)
return fmt.Errorf("cannot create lock file in %q; make sure VictoriaMetrics doesn't use the dir; error: %w", r.Dst.Dir, err)
}
defer fs.MustClose(flockF)
@ -71,12 +71,12 @@ func (r *Restore) Run() error {
logger.Infof("obtaining list of parts at %s", src)
srcParts, err := src.ListParts()
if err != nil {
return fmt.Errorf("cannot list src parts: %s", err)
return fmt.Errorf("cannot list src parts: %w", err)
}
logger.Infof("obtaining list of parts at %s", dst)
dstParts, err := dst.ListParts()
if err != nil {
return fmt.Errorf("cannot list dst parts: %s", err)
return fmt.Errorf("cannot list dst parts: %w", err)
}
backupSize := getPartsSize(srcParts)
@ -129,7 +129,7 @@ func (r *Restore) Run() error {
logger.Infof("deleting %s from %s", path, dst)
size, err := dst.DeletePath(path)
if err != nil {
return fmt.Errorf("cannot delete %s from %s: %s", path, dst, err)
return fmt.Errorf("cannot delete %s from %s: %w", path, dst, err)
}
deleteSize += size
}
@ -137,14 +137,14 @@ func (r *Restore) Run() error {
return err
}
if err := dst.RemoveEmptyDirs(); err != nil {
return fmt.Errorf("cannot remove empty directories at %s: %s", dst, err)
return fmt.Errorf("cannot remove empty directories at %s: %w", dst, err)
}
}
// Re-read dstParts, since additional parts may be removed on the previous step.
dstParts, err = dst.ListParts()
if err != nil {
return fmt.Errorf("cannot list dst parts after the deletion: %s", err)
return fmt.Errorf("cannot list dst parts after the deletion: %w", err)
}
partsToCopy := common.PartsDifference(srcParts, dstParts)
@ -166,17 +166,17 @@ func (r *Restore) Run() error {
logger.Infof("downloading %s from %s to %s", &p, src, dst)
wc, err := dst.NewWriteCloser(p)
if err != nil {
return fmt.Errorf("cannot create writer for %q to %s: %s", &p, dst, err)
return fmt.Errorf("cannot create writer for %q to %s: %w", &p, dst, err)
}
sw := &statWriter{
w: wc,
bytesWritten: &bytesDownloaded,
}
if err := src.DownloadPart(p, sw); err != nil {
return fmt.Errorf("cannot download %s to %s: %s", &p, dst, err)
return fmt.Errorf("cannot download %s to %s: %w", &p, dst, err)
}
if err := wc.Close(); err != nil {
return fmt.Errorf("cannot close reader from %s from %s: %s", &p, src, err)
return fmt.Errorf("cannot close reader from %s from %s: %w", &p, src, err)
}
}
return nil

View file

@ -207,7 +207,7 @@ func NewRemoteFS(path string) (common.RemoteFS, error) {
Dir: dir,
}
if err := fs.Init(); err != nil {
return nil, fmt.Errorf("cannot initialize connection to gcs: %s", err)
return nil, fmt.Errorf("cannot initialize connection to gcs: %w", err)
}
return fs, nil
case "s3":
@ -226,7 +226,7 @@ func NewRemoteFS(path string) (common.RemoteFS, error) {
Dir: dir,
}
if err := fs.Init(); err != nil {
return nil, fmt.Errorf("cannot initialize connection to s3: %s", err)
return nil, fmt.Errorf("cannot initialize connection to s3: %w", err)
}
return fs, nil
default:

View file

@ -13,11 +13,11 @@ import (
func FsyncFile(path string) error {
if err := fsync(path); err != nil {
_ = os.RemoveAll(path)
return fmt.Errorf("cannot fsync file %q: %s", path, err)
return fmt.Errorf("cannot fsync file %q: %w", path, err)
}
dir := filepath.Dir(path)
if err := fsync(dir); err != nil {
return fmt.Errorf("cannot fsync dir %q: %s", dir, err)
return fmt.Errorf("cannot fsync dir %q: %w", dir, err)
}
return nil
}
@ -45,7 +45,7 @@ func fsync(path string) error {
func AppendFiles(dst []string, dir string) ([]string, error) {
d, err := os.Open(dir)
if err != nil {
return nil, fmt.Errorf("cannot open %q: %s", dir, err)
return nil, fmt.Errorf("cannot open %q: %w", dir, err)
}
dst, err = appendFilesInternal(dst, d)
if err1 := d.Close(); err1 != nil {
@ -58,14 +58,14 @@ func appendFilesInternal(dst []string, d *os.File) ([]string, error) {
dir := d.Name()
dfi, err := d.Stat()
if err != nil {
return nil, fmt.Errorf("cannot stat %q: %s", dir, err)
return nil, fmt.Errorf("cannot stat %q: %w", dir, err)
}
if !dfi.IsDir() {
return nil, fmt.Errorf("%q isn't a directory", dir)
}
fis, err := d.Readdir(-1)
if err != nil {
return nil, fmt.Errorf("cannot read directory contents in %q: %s", dir, err)
return nil, fmt.Errorf("cannot read directory contents in %q: %w", dir, err)
}
for _, fi := range fis {
name := fi.Name()
@ -82,7 +82,7 @@ func appendFilesInternal(dst []string, d *os.File) ([]string, error) {
// Process directory
dst, err = AppendFiles(dst, path)
if err != nil {
return nil, fmt.Errorf("cannot list %q: %s", path, err)
return nil, fmt.Errorf("cannot list %q: %w", path, err)
}
continue
}
@ -100,17 +100,17 @@ func appendFilesInternal(dst []string, d *os.File) ([]string, error) {
// Skip symlink that points to nowhere.
continue
}
return nil, fmt.Errorf("cannot resolve symlink %q: %s", pathOrig, err)
return nil, fmt.Errorf("cannot resolve symlink %q: %w", pathOrig, err)
}
sfi, err := os.Stat(pathReal)
if err != nil {
return nil, fmt.Errorf("cannot stat %q from symlink %q: %s", pathReal, path, err)
return nil, fmt.Errorf("cannot stat %q from symlink %q: %w", pathReal, path, err)
}
if sfi.IsDir() {
// Symlink points to directory
dstNew, err := AppendFiles(dst, pathReal)
if err != nil {
return nil, fmt.Errorf("cannot list files at %q from symlink %q: %s", pathReal, path, err)
return nil, fmt.Errorf("cannot list files at %q from symlink %q: %w", pathReal, path, err)
}
pathReal += "/"
for i := len(dst); i < len(dstNew); i++ {
@ -163,14 +163,14 @@ func removeEmptyDirsInternal(d *os.File) (bool, error) {
dir := d.Name()
dfi, err := d.Stat()
if err != nil {
return false, fmt.Errorf("cannot stat %q: %s", dir, err)
return false, fmt.Errorf("cannot stat %q: %w", dir, err)
}
if !dfi.IsDir() {
return false, fmt.Errorf("%q isn't a directory", dir)
}
fis, err := d.Readdir(-1)
if err != nil {
return false, fmt.Errorf("cannot read directory contents in %q: %s", dir, err)
return false, fmt.Errorf("cannot read directory contents in %q: %w", dir, err)
}
dirEntries := 0
hasFlock := false
@ -184,7 +184,7 @@ func removeEmptyDirsInternal(d *os.File) (bool, error) {
// Process directory
ok, err := removeEmptyDirs(path)
if err != nil {
return false, fmt.Errorf("cannot list %q: %s", path, err)
return false, fmt.Errorf("cannot list %q: %w", path, err)
}
if !ok {
dirEntries++
@ -209,21 +209,21 @@ func removeEmptyDirsInternal(d *os.File) (bool, error) {
// Remove symlink that points to nowere.
logger.Infof("removing broken symlink %q", pathOrig)
if err := os.Remove(pathOrig); err != nil {
return false, fmt.Errorf("cannot remove %q: %s", pathOrig, err)
return false, fmt.Errorf("cannot remove %q: %w", pathOrig, err)
}
continue
}
return false, fmt.Errorf("cannot resolve symlink %q: %s", pathOrig, err)
return false, fmt.Errorf("cannot resolve symlink %q: %w", pathOrig, err)
}
sfi, err := os.Stat(pathReal)
if err != nil {
return false, fmt.Errorf("cannot stat %q from symlink %q: %s", pathReal, path, err)
return false, fmt.Errorf("cannot stat %q from symlink %q: %w", pathReal, path, err)
}
if sfi.IsDir() {
// Symlink points to directory
ok, err := removeEmptyDirs(pathReal)
if err != nil {
return false, fmt.Errorf("cannot list files at %q from symlink %q: %s", pathReal, path, err)
return false, fmt.Errorf("cannot list files at %q from symlink %q: %w", pathReal, path, err)
}
if !ok {
dirEntries++
@ -231,7 +231,7 @@ func removeEmptyDirsInternal(d *os.File) (bool, error) {
// Remove the symlink
logger.Infof("removing symlink that points to empty dir %q", pathOrig)
if err := os.Remove(pathOrig); err != nil {
return false, fmt.Errorf("cannot remove %q: %s", pathOrig, err)
return false, fmt.Errorf("cannot remove %q: %w", pathOrig, err)
}
}
continue
@ -252,11 +252,11 @@ func removeEmptyDirsInternal(d *os.File) (bool, error) {
if hasFlock {
flockFilepath := dir + "/flock.lock"
if err := os.Remove(flockFilepath); err != nil {
return false, fmt.Errorf("cannot remove %q: %s", flockFilepath, err)
return false, fmt.Errorf("cannot remove %q: %w", flockFilepath, err)
}
}
if err := os.Remove(dir); err != nil {
return false, fmt.Errorf("cannot remove %q: %s", dir, err)
return false, fmt.Errorf("cannot remove %q: %w", dir, err)
}
return true, nil
}

View file

@ -64,7 +64,7 @@ func (fs *FS) ListParts() ([]common.Part, error) {
}
fi, err := os.Stat(file)
if err != nil {
return nil, fmt.Errorf("cannot stat %q: %s", file, err)
return nil, fmt.Errorf("cannot stat %q: %w", file, err)
}
path := file[len(dir):]
size := uint64(fi.Size())
@ -100,7 +100,7 @@ func (fs *FS) NewReadCloser(p common.Part) (io.ReadCloser, error) {
path := fs.path(p)
r, err := filestream.OpenReaderAt(path, int64(p.Offset), true)
if err != nil {
return nil, fmt.Errorf("cannot open %q at %q: %s", p.Path, fs.Dir, err)
return nil, fmt.Errorf("cannot open %q at %q: %w", p.Path, fs.Dir, err)
}
lrc := &limitedReadCloser{
r: r,
@ -121,7 +121,7 @@ func (fs *FS) NewWriteCloser(p common.Part) (io.WriteCloser, error) {
}
w, err := filestream.OpenWriterAt(path, int64(p.Offset), true)
if err != nil {
return nil, fmt.Errorf("cannot open writer for %q at offset %d: %s", path, p.Offset, err)
return nil, fmt.Errorf("cannot open writer for %q at offset %d: %w", path, p.Offset, err)
}
wc := &writeCloser{
w: w,
@ -148,16 +148,16 @@ func (fs *FS) DeletePath(path string) (uint64, error) {
// The file could be deleted earlier via symlink.
return 0, nil
}
return 0, fmt.Errorf("cannot open %q at %q: %s", path, fullPath, err)
return 0, fmt.Errorf("cannot open %q at %q: %w", path, fullPath, err)
}
fi, err := f.Stat()
_ = f.Close()
if err != nil {
return 0, fmt.Errorf("cannot stat %q at %q: %s", path, fullPath, err)
return 0, fmt.Errorf("cannot stat %q at %q: %w", path, fullPath, err)
}
size := uint64(fi.Size())
if err := os.Remove(fullPath); err != nil {
return 0, fmt.Errorf("cannot remove %q: %s", fullPath, err)
return 0, fmt.Errorf("cannot remove %q: %w", fullPath, err)
}
return size, nil
}
@ -170,7 +170,7 @@ func (fs *FS) RemoveEmptyDirs() error {
func (fs *FS) mkdirAll(filePath string) error {
dir := filepath.Dir(filePath)
if err := os.MkdirAll(dir, 0700); err != nil {
return fmt.Errorf("cannot create directory %q: %s", dir, err)
return fmt.Errorf("cannot create directory %q: %w", dir, err)
}
return nil
}

View file

@ -60,7 +60,7 @@ func (fs *FS) ListParts() ([]common.Part, error) {
// Check for correct part size.
fi, err := os.Stat(file)
if err != nil {
return nil, fmt.Errorf("cannot stat file %q for part %q: %s", file, p.Path, err)
return nil, fmt.Errorf("cannot stat file %q for part %q: %w", file, p.Path, err)
}
p.ActualSize = uint64(fi.Size())
parts = append(parts, p)
@ -72,7 +72,7 @@ func (fs *FS) ListParts() ([]common.Part, error) {
func (fs *FS) DeletePart(p common.Part) error {
path := fs.path(p)
if err := os.Remove(path); err != nil {
return fmt.Errorf("cannot remove %q: %s", path, err)
return fmt.Errorf("cannot remove %q: %w", path, err)
}
return nil
}
@ -103,12 +103,12 @@ func (fs *FS) CopyPart(srcFS common.OriginFS, p common.Part) error {
// Cannot create hardlink. Just copy file contents
srcFile, err := os.Open(srcPath)
if err != nil {
return fmt.Errorf("cannot open file %q: %s", srcPath, err)
return fmt.Errorf("cannot open file %q: %w", srcPath, err)
}
dstFile, err := os.Create(dstPath)
if err != nil {
_ = srcFile.Close()
return fmt.Errorf("cannot create file %q: %s", dstPath, err)
return fmt.Errorf("cannot create file %q: %w", dstPath, err)
}
n, err := io.Copy(dstFile, srcFile)
if err1 := dstFile.Close(); err1 != nil {
@ -137,14 +137,14 @@ func (fs *FS) DownloadPart(p common.Part, w io.Writer) error {
path := fs.path(p)
r, err := os.Open(path)
if err != nil {
return fmt.Errorf("cannot open %q: %s", path, err)
return fmt.Errorf("cannot open %q: %w", path, err)
}
n, err := io.Copy(w, r)
if err1 := r.Close(); err1 != nil && err == nil {
err = err1
}
if err != nil {
return fmt.Errorf("cannot download data from %q: %s", path, err)
return fmt.Errorf("cannot download data from %q: %w", path, err)
}
if uint64(n) != p.Size {
return fmt.Errorf("wrong data size downloaded from %q; got %d bytes; want %d bytes", path, n, p.Size)
@ -160,7 +160,7 @@ func (fs *FS) UploadPart(p common.Part, r io.Reader) error {
}
w, err := os.Create(path)
if err != nil {
return fmt.Errorf("cannot create file %q: %s", path, err)
return fmt.Errorf("cannot create file %q: %w", path, err)
}
n, err := io.Copy(w, r)
if err1 := w.Close(); err1 != nil && err == nil {
@ -168,7 +168,7 @@ func (fs *FS) UploadPart(p common.Part, r io.Reader) error {
}
if err != nil {
_ = os.RemoveAll(path)
return fmt.Errorf("cannot upload data to %q: %s", path, err)
return fmt.Errorf("cannot upload data to %q: %w", path, err)
}
if uint64(n) != p.Size {
_ = os.RemoveAll(path)
@ -184,7 +184,7 @@ func (fs *FS) UploadPart(p common.Part, r io.Reader) error {
func (fs *FS) mkdirAll(filePath string) error {
dir := filepath.Dir(filePath)
if err := os.MkdirAll(dir, 0700); err != nil {
return fmt.Errorf("cannot create directory %q: %s", dir, err)
return fmt.Errorf("cannot create directory %q: %w", dir, err)
}
return nil
}
@ -200,7 +200,7 @@ func (fs *FS) DeleteFile(filePath string) error {
path := filepath.Join(fs.Dir, filePath)
err := os.Remove(path)
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("cannot remove %q: %s", path, err)
return fmt.Errorf("cannot remove %q: %w", path, err)
}
return nil
}
@ -214,7 +214,7 @@ func (fs *FS) CreateFile(filePath string, data []byte) error {
return err
}
if err := ioutil.WriteFile(path, data, 0600); err != nil {
return fmt.Errorf("cannot write %d bytes to %q: %s", len(data), path, err)
return fmt.Errorf("cannot write %d bytes to %q: %w", len(data), path, err)
}
return nil
}
@ -227,7 +227,7 @@ func (fs *FS) HasFile(filePath string) (bool, error) {
if os.IsNotExist(err) {
return false, nil
}
return false, fmt.Errorf("cannot stat %q: %s", path, err)
return false, fmt.Errorf("cannot stat %q: %w", path, err)
}
if fi.IsDir() {
return false, fmt.Errorf("%q is directory, while file is needed", path)

View file

@ -49,13 +49,13 @@ func (fs *FS) Init() error {
creds := option.WithCredentialsFile(fs.CredsFilePath)
c, err := storage.NewClient(ctx, creds)
if err != nil {
return fmt.Errorf("cannot create gcs client with credsFile %q: %s", fs.CredsFilePath, err)
return fmt.Errorf("cannot create gcs client with credsFile %q: %w", fs.CredsFilePath, err)
}
client = c
} else {
c, err := storage.NewClient(ctx)
if err != nil {
return fmt.Errorf("cannot create default gcs client: %q", err)
return fmt.Errorf("cannot create default gcs client: %w", err)
}
client = c
}
@ -82,7 +82,7 @@ func (fs *FS) ListParts() ([]common.Part, error) {
Prefix: dir,
}
if err := q.SetAttrSelection(selectAttrs); err != nil {
return nil, fmt.Errorf("error in SetAttrSelection: %s", err)
return nil, fmt.Errorf("error in SetAttrSelection: %w", err)
}
it := fs.bkt.Objects(ctx, q)
var parts []common.Part
@ -92,7 +92,7 @@ func (fs *FS) ListParts() ([]common.Part, error) {
return parts, nil
}
if err != nil {
return nil, fmt.Errorf("error when iterating objects at %q: %s", dir, err)
return nil, fmt.Errorf("error when iterating objects at %q: %w", dir, err)
}
file := attr.Name
if !strings.HasPrefix(file, dir) {
@ -116,7 +116,7 @@ func (fs *FS) DeletePart(p common.Part) error {
o := fs.object(p)
ctx := context.Background()
if err := o.Delete(ctx); err != nil {
return fmt.Errorf("cannot delete %q at %s (remote path %q): %s", p.Path, fs, o.ObjectName(), err)
return fmt.Errorf("cannot delete %q at %s (remote path %q): %w", p.Path, fs, o.ObjectName(), err)
}
return nil
}
@ -140,7 +140,7 @@ func (fs *FS) CopyPart(srcFS common.OriginFS, p common.Part) error {
ctx := context.Background()
attr, err := copier.Run(ctx)
if err != nil {
return fmt.Errorf("cannot copy %q from %s to %s: %s", p.Path, src, fs, err)
return fmt.Errorf("cannot copy %q from %s to %s: %w", p.Path, src, fs, err)
}
if uint64(attr.Size) != p.Size {
return fmt.Errorf("unexpected %q size after copying from %s to %s; got %d bytes; want %d bytes", p.Path, src, fs, attr.Size, p.Size)
@ -154,14 +154,14 @@ func (fs *FS) DownloadPart(p common.Part, w io.Writer) error {
ctx := context.Background()
r, err := o.NewReader(ctx)
if err != nil {
return fmt.Errorf("cannot open reader for %q at %s (remote path %q): %s", p.Path, fs, o.ObjectName(), err)
return fmt.Errorf("cannot open reader for %q at %s (remote path %q): %w", p.Path, fs, o.ObjectName(), err)
}
n, err := io.Copy(w, r)
if err1 := r.Close(); err1 != nil && err == nil {
err = err1
}
if err != nil {
return fmt.Errorf("cannot download %q from at %s (remote path %q): %s", p.Path, fs, o.ObjectName(), err)
return fmt.Errorf("cannot download %q from at %s (remote path %q): %w", p.Path, fs, o.ObjectName(), err)
}
if uint64(n) != p.Size {
return fmt.Errorf("wrong data size downloaded from %q at %s; got %d bytes; want %d bytes", p.Path, fs, n, p.Size)
@ -179,7 +179,7 @@ func (fs *FS) UploadPart(p common.Part, r io.Reader) error {
err = err1
}
if err != nil {
return fmt.Errorf("cannot upload data to %q at %s (remote path %q): %s", p.Path, fs, o.ObjectName(), err)
return fmt.Errorf("cannot upload data to %q at %s (remote path %q): %w", p.Path, fs, o.ObjectName(), err)
}
if uint64(n) != p.Size {
return fmt.Errorf("wrong data size uploaded to %q at %s; got %d bytes; want %d bytes", p.Path, fs, n, p.Size)
@ -201,7 +201,7 @@ func (fs *FS) DeleteFile(filePath string) error {
ctx := context.Background()
if err := o.Delete(ctx); err != nil {
if err != storage.ErrObjectNotExist {
return fmt.Errorf("cannot delete %q at %s (remote path %q): %s", filePath, fs, o.ObjectName(), err)
return fmt.Errorf("cannot delete %q at %s (remote path %q): %w", filePath, fs, o.ObjectName(), err)
}
}
return nil
@ -218,14 +218,14 @@ func (fs *FS) CreateFile(filePath string, data []byte) error {
n, err := w.Write(data)
if err != nil {
_ = w.Close()
return fmt.Errorf("cannot upload %d bytes to %q at %s (remote path %q): %s", len(data), filePath, fs, o.ObjectName(), err)
return fmt.Errorf("cannot upload %d bytes to %q at %s (remote path %q): %w", len(data), filePath, fs, o.ObjectName(), err)
}
if n != len(data) {
_ = w.Close()
return fmt.Errorf("wrong data size uploaded to %q at %s (remote path %q); got %d bytes; want %d bytes", filePath, fs, o.ObjectName(), n, len(data))
}
if err := w.Close(); err != nil {
return fmt.Errorf("cannot close %q at %s (remote path %q): %s", filePath, fs, o.ObjectName(), err)
return fmt.Errorf("cannot close %q at %s (remote path %q): %w", filePath, fs, o.ObjectName(), err)
}
return nil
}
@ -240,7 +240,7 @@ func (fs *FS) HasFile(filePath string) (bool, error) {
if err == storage.ErrObjectNotExist {
return false, nil
}
return false, fmt.Errorf("unexpected error when obtaining attributes for %q at %s (remote path %q): %s", filePath, fs, o.ObjectName(), err)
return false, fmt.Errorf("unexpected error when obtaining attributes for %q at %s (remote path %q): %w", filePath, fs, o.ObjectName(), err)
}
return true, nil
}

View file

@ -66,7 +66,7 @@ func (fs *FS) Init() error {
}
sess, err := session.NewSessionWithOptions(opts)
if err != nil {
return fmt.Errorf("cannot create S3 session: %s", err)
return fmt.Errorf("cannot create S3 session: %w", err)
}
if len(fs.CustomEndpoint) > 0 {
@ -81,7 +81,7 @@ func (fs *FS) Init() error {
ctx := context.Background()
region, err := s3manager.GetBucketRegion(ctx, sess, fs.Bucket, "")
if err != nil {
return fmt.Errorf("cannot determine region for bucket %q: %s", fs.Bucket, err)
return fmt.Errorf("cannot determine region for bucket %q: %w", fs.Bucket, err)
}
sess.Config.WithRegion(region)
logger.Infof("bucket %q is stored at region %q; switching to this region", fs.Bucket, region)
@ -133,7 +133,7 @@ func (fs *FS) ListParts() ([]common.Part, error) {
err = errOuter
}
if err != nil {
return nil, fmt.Errorf("error when listing s3 objects inside dir %q: %s", dir, err)
return nil, fmt.Errorf("error when listing s3 objects inside dir %q: %w", dir, err)
}
return parts, nil
}
@ -147,7 +147,7 @@ func (fs *FS) DeletePart(p common.Part) error {
}
_, err := fs.s3.DeleteObject(input)
if err != nil {
return fmt.Errorf("cannot delete %q at %s (remote path %q): %s", p.Path, fs, path, err)
return fmt.Errorf("cannot delete %q at %s (remote path %q): %w", p.Path, fs, path, err)
}
return nil
}
@ -175,7 +175,7 @@ func (fs *FS) CopyPart(srcFS common.OriginFS, p common.Part) error {
}
_, err := fs.s3.CopyObject(input)
if err != nil {
return fmt.Errorf("cannot copy %q from %s to %s (copySource %q): %s", p.Path, src, fs, copySource, err)
return fmt.Errorf("cannot copy %q from %s to %s (copySource %q): %w", p.Path, src, fs, copySource, err)
}
return nil
}
@ -189,7 +189,7 @@ func (fs *FS) DownloadPart(p common.Part, w io.Writer) error {
}
o, err := fs.s3.GetObject(input)
if err != nil {
return fmt.Errorf("cannot open %q at %s (remote path %q): %s", p.Path, fs, path, err)
return fmt.Errorf("cannot open %q at %s (remote path %q): %w", p.Path, fs, path, err)
}
r := o.Body
n, err := io.Copy(w, r)
@ -197,7 +197,7 @@ func (fs *FS) DownloadPart(p common.Part, w io.Writer) error {
err = err1
}
if err != nil {
return fmt.Errorf("cannot download %q from at %s (remote path %q): %s", p.Path, fs, path, err)
return fmt.Errorf("cannot download %q from at %s (remote path %q): %w", p.Path, fs, path, err)
}
if uint64(n) != p.Size {
return fmt.Errorf("wrong data size downloaded from %q at %s; got %d bytes; want %d bytes", p.Path, fs, n, p.Size)
@ -218,7 +218,7 @@ func (fs *FS) UploadPart(p common.Part, r io.Reader) error {
}
_, err := fs.uploader.Upload(input)
if err != nil {
return fmt.Errorf("cannot upoad data to %q at %s (remote path %q): %s", p.Path, fs, path, err)
return fmt.Errorf("cannot upoad data to %q at %s (remote path %q): %w", p.Path, fs, path, err)
}
if uint64(sr.size) != p.Size {
return fmt.Errorf("wrong data size uploaded to %q at %s; got %d bytes; want %d bytes", p.Path, fs, sr.size, p.Size)
@ -249,7 +249,7 @@ func (fs *FS) DeleteFile(filePath string) error {
Key: aws.String(path),
}
if _, err := fs.s3.DeleteObject(input); err != nil {
return fmt.Errorf("cannot delete %q at %s (remote path %q): %s", filePath, fs, path, err)
return fmt.Errorf("cannot delete %q at %s (remote path %q): %w", filePath, fs, path, err)
}
return nil
}
@ -269,7 +269,7 @@ func (fs *FS) CreateFile(filePath string, data []byte) error {
}
_, err := fs.uploader.Upload(input)
if err != nil {
return fmt.Errorf("cannot upoad data to %q at %s (remote path %q): %s", filePath, fs, path, err)
return fmt.Errorf("cannot upoad data to %q at %s (remote path %q): %w", filePath, fs, path, err)
}
l := int64(len(data))
if sr.size != l {
@ -290,10 +290,10 @@ func (fs *FS) HasFile(filePath string) (bool, error) {
if ae, ok := err.(awserr.Error); ok && ae.Code() == s3.ErrCodeNoSuchKey {
return false, nil
}
return false, fmt.Errorf("cannot open %q at %s (remote path %q): %s", filePath, fs, path, err)
return false, fmt.Errorf("cannot open %q at %s (remote path %q): %w", filePath, fs, path, err)
}
if err := o.Body.Close(); err != nil {
return false, fmt.Errorf("cannot close %q at %s (remote path %q): %s", filePath, fs, path, err)
return false, fmt.Errorf("cannot close %q at %s (remote path %q): %w", filePath, fs, path, err)
}
return true, nil
}

View file

@ -76,7 +76,7 @@ func MarshalTimestamps(dst []byte, timestamps []int64, precisionBits uint8) (res
func UnmarshalTimestamps(dst []int64, src []byte, mt MarshalType, firstTimestamp int64, itemsCount int) ([]int64, error) {
dst, err := unmarshalInt64Array(dst, src, mt, firstTimestamp, itemsCount)
if err != nil {
return nil, fmt.Errorf("cannot unmarshal %d timestamps from len(src)=%d bytes: %s", itemsCount, len(src), err)
return nil, fmt.Errorf("cannot unmarshal %d timestamps from len(src)=%d bytes: %w", itemsCount, len(src), err)
}
return dst, nil
}
@ -97,7 +97,7 @@ func MarshalValues(dst []byte, values []int64, precisionBits uint8) (result []by
func UnmarshalValues(dst []int64, src []byte, mt MarshalType, firstValue int64, itemsCount int) ([]int64, error) {
dst, err := unmarshalInt64Array(dst, src, mt, firstValue, itemsCount)
if err != nil {
return nil, fmt.Errorf("cannot unmarshal %d values from len(src)=%d bytes: %s", itemsCount, len(src), err)
return nil, fmt.Errorf("cannot unmarshal %d values from len(src)=%d bytes: %w", itemsCount, len(src), err)
}
return dst, nil
}
@ -166,36 +166,36 @@ func unmarshalInt64Array(dst []int64, src []byte, mt MarshalType, firstValue int
bb := bbPool.Get()
bb.B, err = DecompressZSTD(bb.B[:0], src)
if err != nil {
return nil, fmt.Errorf("cannot decompress zstd data of size %d: %s; src_zstd=%X", len(src), err, src)
return nil, fmt.Errorf("cannot decompress zstd data of size %d: %w; src_zstd=%X", len(src), err, src)
}
dst, err = unmarshalInt64NearestDelta(dst, bb.B, firstValue, itemsCount)
bbPool.Put(bb)
if err != nil {
return nil, fmt.Errorf("cannot unmarshal nearest delta data after zstd decompression: %s; src_zstd=%X", err, src)
return nil, fmt.Errorf("cannot unmarshal nearest delta data after zstd decompression: %w; src_zstd=%X", err, src)
}
return dst, nil
case MarshalTypeZSTDNearestDelta2:
bb := bbPool.Get()
bb.B, err = DecompressZSTD(bb.B[:0], src)
if err != nil {
return nil, fmt.Errorf("cannot decompress zstd data of size %d: %s; src_zstd=%X", len(src), err, src)
return nil, fmt.Errorf("cannot decompress zstd data of size %d: %w; src_zstd=%X", len(src), err, src)
}
dst, err = unmarshalInt64NearestDelta2(dst, bb.B, firstValue, itemsCount)
bbPool.Put(bb)
if err != nil {
return nil, fmt.Errorf("cannot unmarshal nearest delta2 data after zstd decompression: %s; src_zstd=%X", err, src)
return nil, fmt.Errorf("cannot unmarshal nearest delta2 data after zstd decompression: %w; src_zstd=%X", err, src)
}
return dst, nil
case MarshalTypeNearestDelta:
dst, err = unmarshalInt64NearestDelta(dst, src, firstValue, itemsCount)
if err != nil {
return nil, fmt.Errorf("cannot unmarshal nearest delta data: %s", err)
return nil, fmt.Errorf("cannot unmarshal nearest delta data: %w", err)
}
return dst, nil
case MarshalTypeNearestDelta2:
dst, err = unmarshalInt64NearestDelta2(dst, src, firstValue, itemsCount)
if err != nil {
return nil, fmt.Errorf("cannot unmarshal nearest delta2 data: %s", err)
return nil, fmt.Errorf("cannot unmarshal nearest delta2 data: %w", err)
}
return dst, nil
case MarshalTypeConst:
@ -219,7 +219,7 @@ func unmarshalInt64Array(dst []int64, src []byte, mt MarshalType, firstValue int
v := firstValue
tail, d, err := UnmarshalVarInt64(src)
if err != nil {
return nil, fmt.Errorf("cannot unmarshal delta value for delta const: %s", err)
return nil, fmt.Errorf("cannot unmarshal delta value for delta const: %w", err)
}
if len(tail) > 0 {
return nil, fmt.Errorf("unexpected trailing data after delta const (d=%d): %d bytes", d, len(tail))

View file

@ -34,7 +34,7 @@ func BenchmarkUnmarshalGaugeArray(b *testing.B) {
for pb.Next() {
dst, err = unmarshalInt64Array(dst[:0], benchMarshaledGaugeArray, MarshalTypeZSTDNearestDelta, benchGaugeArray[0], len(benchGaugeArray))
if err != nil {
panic(fmt.Errorf("cannot unmarshal gauge array: %s", err))
panic(fmt.Errorf("cannot unmarshal gauge array: %w", err))
}
atomic.AddUint64(&Sink, uint64(len(dst)))
}
@ -81,7 +81,7 @@ func BenchmarkUnmarshalDeltaConstArray(b *testing.B) {
for pb.Next() {
dst, err = unmarshalInt64Array(dst[:0], benchMarshaledDeltaConstArray, MarshalTypeDeltaConst, benchDeltaConstArray[0], len(benchDeltaConstArray))
if err != nil {
panic(fmt.Errorf("cannot unmarshal delta const array: %s", err))
panic(fmt.Errorf("cannot unmarshal delta const array: %w", err))
}
atomic.AddUint64(&Sink, uint64(len(dst)))
}
@ -128,7 +128,7 @@ func BenchmarkUnmarshalConstArray(b *testing.B) {
for pb.Next() {
dst, err = unmarshalInt64Array(dst[:0], benchMarshaledConstArray, MarshalTypeConst, benchConstArray[0], len(benchConstArray))
if err != nil {
panic(fmt.Errorf("cannot unmarshal const array: %s", err))
panic(fmt.Errorf("cannot unmarshal const array: %w", err))
}
atomic.AddUint64(&Sink, uint64(len(dst)))
}
@ -173,7 +173,7 @@ func BenchmarkUnmarshalZeroConstArray(b *testing.B) {
for pb.Next() {
dst, err = unmarshalInt64Array(dst[:0], benchMarshaledZeroConstArray, MarshalTypeConst, benchZeroConstArray[0], len(benchZeroConstArray))
if err != nil {
panic(fmt.Errorf("cannot unmarshal zero const array: %s", err))
panic(fmt.Errorf("cannot unmarshal zero const array: %w", err))
}
atomic.AddUint64(&Sink, uint64(len(dst)))
}
@ -212,7 +212,7 @@ func BenchmarkUnmarshalInt64Array(b *testing.B) {
for pb.Next() {
dst, err = unmarshalInt64Array(dst[:0], benchMarshaledInt64Array, benchMarshalType, benchInt64Array[0], len(benchInt64Array))
if err != nil {
panic(fmt.Errorf("cannot unmarshal int64 array: %s", err))
panic(fmt.Errorf("cannot unmarshal int64 array: %w", err))
}
atomic.AddUint64(&Sink, uint64(len(dst)))
}

View file

@ -229,7 +229,7 @@ func MarshalBytes(dst, b []byte) []byte {
func UnmarshalBytes(src []byte) ([]byte, []byte, error) {
tail, n, err := UnmarshalVarUint64(src)
if err != nil {
return nil, nil, fmt.Errorf("cannot unmarshal string size: %d", err)
return nil, nil, fmt.Errorf("cannot unmarshal string size: %w", err)
}
src = tail
if uint64(len(src)) < n {

View file

@ -135,7 +135,7 @@ func benchmarkUnmarshalVarInt64s(b *testing.B, maxValue int64) {
for pb.Next() {
tail, err := UnmarshalVarInt64s(dst, data)
if err != nil {
panic(fmt.Errorf("unexpected error: %s", err))
panic(fmt.Errorf("unexpected error: %w", err))
}
if len(tail) > 0 {
panic(fmt.Errorf("unexpected non-empty tail with len=%d: %X", len(tail), tail))

View file

@ -60,7 +60,7 @@ func unmarshalInt64NearestDelta(dst []int64, src []byte, firstValue int64, items
tail, err := UnmarshalVarInt64s(is.A, src)
if err != nil {
return nil, fmt.Errorf("cannot unmarshal nearest delta from %d bytes; src=%X: %s", len(src), src, err)
return nil, fmt.Errorf("cannot unmarshal nearest delta from %d bytes; src=%X: %w", len(src), src, err)
}
if len(tail) > 0 {
return nil, fmt.Errorf("unexpected tail left after unmarshaling %d items from %d bytes; tail size=%d; src=%X; tail=%X", itemsCount, len(src), len(tail), src, tail)

View file

@ -63,7 +63,7 @@ func unmarshalInt64NearestDelta2(dst []int64, src []byte, firstValue int64, item
tail, err := UnmarshalVarInt64s(is.A, src)
if err != nil {
return nil, fmt.Errorf("cannot unmarshal nearest delta from %d bytes; src=%X: %s", len(src), src, err)
return nil, fmt.Errorf("cannot unmarshal nearest delta from %d bytes; src=%X: %w", len(src), src, err)
}
if len(tail) > 0 {
return nil, fmt.Errorf("unexpected tail left after unmarshaling %d items from %d bytes; tail size=%d; src=%X; tail=%X", itemsCount, len(src), len(tail), src, tail)

View file

@ -35,7 +35,7 @@ func BenchmarkUnmarshalInt64NearestDelta2(b *testing.B) {
for pb.Next() {
dst, err = unmarshalInt64NearestDelta2(dst[:0], benchInt64NearestDelta2Data, 0, len(benchInt64Array))
if err != nil {
panic(fmt.Errorf("unexpected error: %s", err))
panic(fmt.Errorf("unexpected error: %w", err))
}
atomic.AddUint64(&Sink, uint64(len(dst)))
}

View file

@ -35,7 +35,7 @@ func BenchmarkUnmarshalInt64NearestDelta(b *testing.B) {
for pb.Next() {
dst, err = unmarshalInt64NearestDelta(dst[:0], benchInt64NearestDeltaData, 0, len(benchInt64Array))
if err != nil {
panic(fmt.Errorf("unexpected error: %s", err))
panic(fmt.Errorf("unexpected error: %w", err))
}
atomic.AddUint64(&Sink, uint64(len(dst)))
}

View file

@ -63,7 +63,7 @@ func OpenReaderAt(path string, offset int64, nocache bool) (*Reader, error) {
n, err := r.f.Seek(offset, io.SeekStart)
if err != nil {
r.MustClose()
return nil, fmt.Errorf("cannot seek to offset=%d for %q: %s", offset, path, err)
return nil, fmt.Errorf("cannot seek to offset=%d for %q: %w", offset, path, err)
}
if n != offset {
r.MustClose()
@ -78,7 +78,7 @@ func OpenReaderAt(path string, offset int64, nocache bool) (*Reader, error) {
func Open(path string, nocache bool) (*Reader, error) {
f, err := os.Open(path)
if err != nil {
return nil, fmt.Errorf("cannot open file %q: %s", path, err)
return nil, fmt.Errorf("cannot open file %q: %w", path, err)
}
r := &Reader{
f: f,
@ -124,7 +124,7 @@ func (r *Reader) Read(p []byte) (int, error) {
return n, err
}
if err := r.st.adviseDontNeed(n, false); err != nil {
return n, fmt.Errorf("advise error for %q: %s", r.f.Name(), err)
return n, fmt.Errorf("advise error for %q: %w", r.f.Name(), err)
}
return n, nil
}
@ -172,12 +172,12 @@ type Writer struct {
func OpenWriterAt(path string, offset int64, nocache bool) (*Writer, error) {
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0600)
if err != nil {
return nil, fmt.Errorf("cannot open %q: %s", path, err)
return nil, fmt.Errorf("cannot open %q: %w", path, err)
}
n, err := f.Seek(offset, io.SeekStart)
if err != nil {
_ = f.Close()
return nil, fmt.Errorf("cannot seek to offset=%d in %q: %s", offset, path, err)
return nil, fmt.Errorf("cannot seek to offset=%d in %q: %w", offset, path, err)
}
if n != offset {
_ = f.Close()
@ -192,7 +192,7 @@ func OpenWriterAt(path string, offset int64, nocache bool) (*Writer, error) {
func Create(path string, nocache bool) (*Writer, error) {
f, err := os.Create(path)
if err != nil {
return nil, fmt.Errorf("cannot create file %q: %s", path, err)
return nil, fmt.Errorf("cannot create file %q: %w", path, err)
}
return newWriter(f, nocache), nil
}
@ -248,7 +248,7 @@ func (w *Writer) Write(p []byte) (int, error) {
return n, err
}
if err := w.st.adviseDontNeed(n, true); err != nil {
return n, fmt.Errorf("advise error for %q: %s", w.f.Name(), err)
return n, fmt.Errorf("advise error for %q: %w", w.f.Name(), err)
}
return n, nil
}

View file

@ -18,11 +18,11 @@ func (st *streamTracker) adviseDontNeed(n int, fdatasync bool) error {
blockSize := st.length - (st.length % dontNeedBlockSize)
if fdatasync {
if err := unixFdatasync(int(st.fd)); err != nil {
return fmt.Errorf("unix.Fdatasync error: %s", err)
return fmt.Errorf("unix.Fdatasync error: %w", err)
}
}
if err := unix.Fadvise(int(st.fd), int64(st.offset), int64(blockSize), unix.FADV_DONTNEED); err != nil {
return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, %d, %d) error: %s", st.offset, blockSize, err)
return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, %d, %d) error: %w", st.offset, blockSize, err)
}
st.offset += blockSize
st.length -= blockSize
@ -35,7 +35,7 @@ func (st *streamTracker) close() error {
}
// Advise the whole file as it shouldn't be cached.
if err := unix.Fadvise(int(st.fd), 0, 0, unix.FADV_DONTNEED); err != nil {
return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, 0, 0) error: %s", err)
return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, 0, 0) error: %w", err)
}
return nil
}

View file

@ -16,11 +16,11 @@ func (st *streamTracker) adviseDontNeed(n int, fdatasync bool) error {
blockSize := st.length - (st.length % dontNeedBlockSize)
if fdatasync {
if err := unix.Fdatasync(int(st.fd)); err != nil {
return fmt.Errorf("unix.Fdatasync error: %s", err)
return fmt.Errorf("unix.Fdatasync error: %w", err)
}
}
if err := unix.Fadvise(int(st.fd), int64(st.offset), int64(blockSize), unix.FADV_DONTNEED); err != nil {
return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, %d, %d) error: %s", st.offset, blockSize, err)
return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, %d, %d) error: %w", st.offset, blockSize, err)
}
st.offset += blockSize
st.length -= blockSize
@ -33,7 +33,7 @@ func (st *streamTracker) close() error {
}
// Advise the whole file as it shouldn't be cached.
if err := unix.Fadvise(int(st.fd), 0, 0, unix.FADV_DONTNEED); err != nil {
return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, 0, 0) error: %s", err)
return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, 0, 0) error: %w", err)
}
return nil
}

View file

@ -16,7 +16,7 @@ func fadviseSequentialRead(f *os.File, prefetch bool) error {
mode |= unix.FADV_WILLNEED
}
if err := unix.Fadvise(int(fd), 0, 0, mode); err != nil {
return fmt.Errorf("error returned from unix.Fadvise(%d): %s", mode, err)
return fmt.Errorf("error returned from unix.Fadvise(%d): %w", mode, err)
}
return nil
}

View file

@ -48,12 +48,12 @@ func WriteFileAtomically(path string, data []byte) error {
tmpPath := fmt.Sprintf("%s.tmp.%d", path, n)
f, err := filestream.Create(tmpPath, false)
if err != nil {
return fmt.Errorf("cannot create file %q: %s", tmpPath, err)
return fmt.Errorf("cannot create file %q: %w", tmpPath, err)
}
if _, err := f.Write(data); err != nil {
f.MustClose()
MustRemoveAll(tmpPath)
return fmt.Errorf("cannot write %d bytes to file %q: %s", len(data), tmpPath, err)
return fmt.Errorf("cannot write %d bytes to file %q: %w", len(data), tmpPath, err)
}
// Sync and close the file.
@ -63,14 +63,14 @@ func WriteFileAtomically(path string, data []byte) error {
if err := os.Rename(tmpPath, path); err != nil {
// do not call MustRemoveAll(tmpPath) here, so the user could inspect
// the file contents during investigating the issue.
return fmt.Errorf("cannot move %q to %q: %s", tmpPath, path, err)
return fmt.Errorf("cannot move %q to %q: %w", tmpPath, path, err)
}
// Sync the containing directory, so the file is guaranteed to appear in the directory.
// See https://www.quora.com/When-should-you-fsync-the-containing-directory-in-addition-to-the-file-itself
absPath, err := filepath.Abs(path)
if err != nil {
return fmt.Errorf("cannot obtain absolute path to %q: %s", path, err)
return fmt.Errorf("cannot obtain absolute path to %q: %w", path, err)
}
parentDirPath := filepath.Dir(absPath)
MustSyncPath(parentDirPath)
@ -204,12 +204,12 @@ func MustRemoveAllWithDoneCallback(path string, done func()) {
// HardLinkFiles makes hard links for all the files from srcDir in dstDir.
func HardLinkFiles(srcDir, dstDir string) error {
if err := mkdirSync(dstDir); err != nil {
return fmt.Errorf("cannot create dstDir=%q: %s", dstDir, err)
return fmt.Errorf("cannot create dstDir=%q: %w", dstDir, err)
}
d, err := os.Open(srcDir)
if err != nil {
return fmt.Errorf("cannot open srcDir=%q: %s", srcDir, err)
return fmt.Errorf("cannot open srcDir=%q: %w", srcDir, err)
}
defer func() {
if err := d.Close(); err != nil {
@ -219,7 +219,7 @@ func HardLinkFiles(srcDir, dstDir string) error {
fis, err := d.Readdir(-1)
if err != nil {
return fmt.Errorf("cannot read files in scrDir=%q: %s", srcDir, err)
return fmt.Errorf("cannot read files in scrDir=%q: %w", srcDir, err)
}
for _, fi := range fis {
if IsDirOrSymlink(fi) {
@ -248,7 +248,7 @@ func SymlinkRelative(srcPath, dstPath string) error {
baseDir := filepath.Dir(dstPath)
srcPathRel, err := filepath.Rel(baseDir, srcPath)
if err != nil {
return fmt.Errorf("cannot make relative path for srcPath=%q: %s", srcPath, err)
return fmt.Errorf("cannot make relative path for srcPath=%q: %w", srcPath, err)
}
return os.Symlink(srcPathRel, dstPath)
}
@ -260,7 +260,7 @@ func ReadFullData(r io.Reader, data []byte) error {
if err == io.EOF {
return io.EOF
}
return fmt.Errorf("cannot read %d bytes; read only %d bytes; error: %s", len(data), n, err)
return fmt.Errorf("cannot read %d bytes; read only %d bytes; error: %w", len(data), n, err)
}
if n != len(data) {
logger.Panicf("BUG: io.ReadFull read only %d bytes; must read %d bytes", n, len(data))
@ -288,10 +288,10 @@ func CreateFlockFile(dir string) (*os.File, error) {
flockFile := dir + "/flock.lock"
flockF, err := os.Create(flockFile)
if err != nil {
return nil, fmt.Errorf("cannot create lock file %q: %s", flockFile, err)
return nil, fmt.Errorf("cannot create lock file %q: %w", flockFile, err)
}
if err := unix.Flock(int(flockF.Fd()), unix.LOCK_EX|unix.LOCK_NB); err != nil {
return nil, fmt.Errorf("cannot acquire lock on file %q: %s", flockFile, err)
return nil, fmt.Errorf("cannot acquire lock on file %q: %w", flockFile, err)
}
return flockF, nil
}

View file

@ -163,7 +163,7 @@ func (r *ReaderAt) MustFadviseSequentialRead(prefetch bool) {
func OpenReaderAt(path string) (*ReaderAt, error) {
f, err := os.Open(path)
if err != nil {
return nil, fmt.Errorf("cannot open file %q for reader: %s", path, err)
return nil, fmt.Errorf("cannot open file %q for reader: %w", path, err)
}
var r ReaderAt
r.f = f
@ -171,7 +171,7 @@ func OpenReaderAt(path string) (*ReaderAt, error) {
if !*disableMmap {
fi, err := f.Stat()
if err != nil {
return nil, fmt.Errorf("error in stat: %s", err)
return nil, fmt.Errorf("error in stat: %w", err)
}
size := fi.Size()
bm := &pageCacheBitmap{
@ -187,7 +187,7 @@ func OpenReaderAt(path string) (*ReaderAt, error) {
data, err := mmapFile(f, size)
if err != nil {
MustClose(f)
return nil, fmt.Errorf("cannot init reader for %q: %s", path, err)
return nil, fmt.Errorf("cannot init reader for %q: %w", path, err)
}
r.mmapData = data
}
@ -237,7 +237,7 @@ func mmapFile(f *os.File, size int64) ([]byte, error) {
}
data, err := unix.Mmap(int(f.Fd()), 0, int(size), unix.PROT_READ, unix.MAP_SHARED)
if err != nil {
return nil, fmt.Errorf("cannot mmap file with size %d: %s", size, err)
return nil, fmt.Errorf("cannot mmap file with size %d: %w", size, err)
}
return data[:sizeOrig], nil
}

View file

@ -57,23 +57,23 @@ func VMSelectServer(c net.Conn, compressionLevel int) (*BufferedConn, error) {
func genericServer(c net.Conn, msg string, compressionLevel int) (*BufferedConn, error) {
if err := readMessage(c, msg); err != nil {
return nil, fmt.Errorf("cannot read hello: %s", err)
return nil, fmt.Errorf("cannot read hello: %w", err)
}
if err := writeMessage(c, successResponse); err != nil {
return nil, fmt.Errorf("cannot write success response on hello: %s", err)
return nil, fmt.Errorf("cannot write success response on hello: %w", err)
}
isRemoteCompressed, err := readIsCompressed(c)
if err != nil {
return nil, fmt.Errorf("cannot read isCompressed flag: %s", err)
return nil, fmt.Errorf("cannot read isCompressed flag: %w", err)
}
if err := writeMessage(c, successResponse); err != nil {
return nil, fmt.Errorf("cannot write success response on isCompressed: %s", err)
return nil, fmt.Errorf("cannot write success response on isCompressed: %w", err)
}
if err := writeIsCompressed(c, compressionLevel > 0); err != nil {
return nil, fmt.Errorf("cannot write isCompressed flag: %s", err)
return nil, fmt.Errorf("cannot write isCompressed flag: %w", err)
}
if err := readMessage(c, successResponse); err != nil {
return nil, fmt.Errorf("cannot read success response on isCompressed: %s", err)
return nil, fmt.Errorf("cannot read success response on isCompressed: %w", err)
}
bc := newBufferedConn(c, compressionLevel, isRemoteCompressed)
return bc, nil
@ -81,23 +81,23 @@ func genericServer(c net.Conn, msg string, compressionLevel int) (*BufferedConn,
func genericClient(c net.Conn, msg string, compressionLevel int) (*BufferedConn, error) {
if err := writeMessage(c, msg); err != nil {
return nil, fmt.Errorf("cannot write hello: %s", err)
return nil, fmt.Errorf("cannot write hello: %w", err)
}
if err := readMessage(c, successResponse); err != nil {
return nil, fmt.Errorf("cannot read success response after sending hello: %s", err)
return nil, fmt.Errorf("cannot read success response after sending hello: %w", err)
}
if err := writeIsCompressed(c, compressionLevel > 0); err != nil {
return nil, fmt.Errorf("cannot write isCompressed flag: %s", err)
return nil, fmt.Errorf("cannot write isCompressed flag: %w", err)
}
if err := readMessage(c, successResponse); err != nil {
return nil, fmt.Errorf("cannot read success response on isCompressed: %s", err)
return nil, fmt.Errorf("cannot read success response on isCompressed: %w", err)
}
isRemoteCompressed, err := readIsCompressed(c)
if err != nil {
return nil, fmt.Errorf("cannot read isCompressed flag: %s", err)
return nil, fmt.Errorf("cannot read isCompressed flag: %w", err)
}
if err := writeMessage(c, successResponse); err != nil {
return nil, fmt.Errorf("cannot write success response on isCompressed: %s", err)
return nil, fmt.Errorf("cannot write success response on isCompressed: %w", err)
}
bc := newBufferedConn(c, compressionLevel, isRemoteCompressed)
return bc, nil
@ -122,18 +122,18 @@ func readIsCompressed(c net.Conn) (bool, error) {
func writeMessage(c net.Conn, msg string) error {
if err := c.SetWriteDeadline(time.Now().Add(time.Second)); err != nil {
return fmt.Errorf("cannot set write deadline: %s", err)
return fmt.Errorf("cannot set write deadline: %w", err)
}
if _, err := io.WriteString(c, msg); err != nil {
return fmt.Errorf("cannot write %q to server: %s", msg, err)
return fmt.Errorf("cannot write %q to server: %w", msg, err)
}
if fc, ok := c.(flusher); ok {
if err := fc.Flush(); err != nil {
return fmt.Errorf("cannot flush %q to server: %s", msg, err)
return fmt.Errorf("cannot flush %q to server: %w", msg, err)
}
}
if err := c.SetWriteDeadline(zeroTime); err != nil {
return fmt.Errorf("cannot reset write deadline: %s", err)
return fmt.Errorf("cannot reset write deadline: %w", err)
}
return nil
}
@ -155,14 +155,14 @@ func readMessage(c net.Conn, msg string) error {
func readData(c net.Conn, dataLen int) ([]byte, error) {
if err := c.SetReadDeadline(time.Now().Add(time.Second)); err != nil {
return nil, fmt.Errorf("cannot set read deadline: %s", err)
return nil, fmt.Errorf("cannot set read deadline: %w", err)
}
data := make([]byte, dataLen)
if n, err := io.ReadFull(c, data); err != nil {
return nil, fmt.Errorf("cannot read message with size %d: %s; read only %d bytes", dataLen, err, n)
return nil, fmt.Errorf("cannot read message with size %d: %w; read only %d bytes", dataLen, err, n)
}
if err := c.SetReadDeadline(zeroTime); err != nil {
return nil, fmt.Errorf("cannot reset read deadline: %s", err)
return nil, fmt.Errorf("cannot reset read deadline: %w", err)
}
return data, nil
}

View file

@ -23,12 +23,12 @@ func testHandshake(t *testing.T, clientFunc, serverFunc Func) {
go func() {
bcs, err := serverFunc(s, 3)
if err != nil {
ch <- fmt.Errorf("error on outer handshake: %s", err)
ch <- fmt.Errorf("error on outer handshake: %w", err)
return
}
bcc, err := clientFunc(bcs, 3)
if err != nil {
ch <- fmt.Errorf("error on inner handshake: %s", err)
ch <- fmt.Errorf("error on inner handshake: %w", err)
return
}
if bcc == nil {

View file

@ -61,7 +61,7 @@ func (bh *blockHeader) Unmarshal(src []byte) ([]byte, error) {
// Unmarshal commonPrefix
tail, cp, err := encoding.UnmarshalBytes(src)
if err != nil {
return tail, fmt.Errorf("cannot unmarshal commonPrefix: %s", err)
return tail, fmt.Errorf("cannot unmarshal commonPrefix: %w", err)
}
bh.commonPrefix = append(bh.commonPrefix[:0], cp...)
src = tail
@ -69,7 +69,7 @@ func (bh *blockHeader) Unmarshal(src []byte) ([]byte, error) {
// Unmarshal firstItem
tail, fi, err := encoding.UnmarshalBytes(src)
if err != nil {
return tail, fmt.Errorf("cannot unmarshal firstItem: %s", err)
return tail, fmt.Errorf("cannot unmarshal firstItem: %w", err)
}
bh.firstItem = append(bh.firstItem[:0], fi...)
src = tail
@ -81,7 +81,7 @@ func (bh *blockHeader) Unmarshal(src []byte) ([]byte, error) {
bh.marshalType = marshalType(src[0])
src = src[1:]
if err := checkMarshalType(bh.marshalType); err != nil {
return src, fmt.Errorf("unexpected marshalType: %s", err)
return src, fmt.Errorf("unexpected marshalType: %w", err)
}
// Unmarshal itemsCount
@ -148,7 +148,7 @@ func unmarshalBlockHeaders(dst []blockHeader, src []byte, blockHeadersCount int)
for i := 0; i < blockHeadersCount; i++ {
tail, err := dst[dstLen+i].Unmarshal(src)
if err != nil {
return dst, fmt.Errorf("cannot unmarshal block header: %s", err)
return dst, fmt.Errorf("cannot unmarshal block header: %w", err)
}
src = tail
}

View file

@ -131,31 +131,31 @@ func (bsr *blockStreamReader) InitFromFilePart(path string) error {
path = filepath.Clean(path)
if err := bsr.ph.ParseFromPath(path); err != nil {
return fmt.Errorf("cannot parse partHeader data from %q: %s", path, err)
return fmt.Errorf("cannot parse partHeader data from %q: %w", path, err)
}
metaindexPath := path + "/metaindex.bin"
metaindexFile, err := filestream.Open(metaindexPath, true)
if err != nil {
return fmt.Errorf("cannot open metaindex file in stream mode: %s", err)
return fmt.Errorf("cannot open metaindex file in stream mode: %w", err)
}
bsr.mrs, err = unmarshalMetaindexRows(bsr.mrs[:0], metaindexFile)
metaindexFile.MustClose()
if err != nil {
return fmt.Errorf("cannot unmarshal metaindex rows from file %q: %s", metaindexPath, err)
return fmt.Errorf("cannot unmarshal metaindex rows from file %q: %w", metaindexPath, err)
}
indexPath := path + "/index.bin"
indexFile, err := filestream.Open(indexPath, true)
if err != nil {
return fmt.Errorf("cannot open index file in stream mode: %s", err)
return fmt.Errorf("cannot open index file in stream mode: %w", err)
}
itemsPath := path + "/items.bin"
itemsFile, err := filestream.Open(itemsPath, true)
if err != nil {
indexFile.MustClose()
return fmt.Errorf("cannot open items file in stream mode: %s", err)
return fmt.Errorf("cannot open items file in stream mode: %w", err)
}
lensPath := path + "/lens.bin"
@ -163,7 +163,7 @@ func (bsr *blockStreamReader) InitFromFilePart(path string) error {
if err != nil {
indexFile.MustClose()
itemsFile.MustClose()
return fmt.Errorf("cannot open lens file in stream mode: %s", err)
return fmt.Errorf("cannot open lens file in stream mode: %w", err)
}
bsr.path = path
@ -200,7 +200,7 @@ func (bsr *blockStreamReader) Next() bool {
err = fmt.Errorf("unexpected last item; got %X; want %X", lastItem, bsr.ph.lastItem)
}
} else {
err = fmt.Errorf("cannot read the next index block: %s", err)
err = fmt.Errorf("cannot read the next index block: %w", err)
}
bsr.err = err
return false
@ -212,18 +212,18 @@ func (bsr *blockStreamReader) Next() bool {
bsr.sb.itemsData = bytesutil.Resize(bsr.sb.itemsData, int(bsr.bh.itemsBlockSize))
if err := fs.ReadFullData(bsr.itemsReader, bsr.sb.itemsData); err != nil {
bsr.err = fmt.Errorf("cannot read compressed items block with size %d: %s", bsr.bh.itemsBlockSize, err)
bsr.err = fmt.Errorf("cannot read compressed items block with size %d: %w", bsr.bh.itemsBlockSize, err)
return false
}
bsr.sb.lensData = bytesutil.Resize(bsr.sb.lensData, int(bsr.bh.lensBlockSize))
if err := fs.ReadFullData(bsr.lensReader, bsr.sb.lensData); err != nil {
bsr.err = fmt.Errorf("cannot read compressed lens block with size %d: %s", bsr.bh.lensBlockSize, err)
bsr.err = fmt.Errorf("cannot read compressed lens block with size %d: %w", bsr.bh.lensBlockSize, err)
return false
}
if err := bsr.Block.UnmarshalData(&bsr.sb, bsr.bh.firstItem, bsr.bh.commonPrefix, bsr.bh.itemsCount, bsr.bh.marshalType); err != nil {
bsr.err = fmt.Errorf("cannot unmarshal inmemoryBlock from storageBlock with firstItem=%X, commonPrefix=%X, itemsCount=%d, marshalType=%d: %s",
bsr.err = fmt.Errorf("cannot unmarshal inmemoryBlock from storageBlock with firstItem=%X, commonPrefix=%X, itemsCount=%d, marshalType=%d: %w",
bsr.bh.firstItem, bsr.bh.commonPrefix, bsr.bh.itemsCount, bsr.bh.marshalType, err)
return false
}
@ -260,14 +260,14 @@ func (bsr *blockStreamReader) readNextBHS() error {
// Read compressed index block.
bsr.packedBuf = bytesutil.Resize(bsr.packedBuf, int(mr.indexBlockSize))
if err := fs.ReadFullData(bsr.indexReader, bsr.packedBuf); err != nil {
return fmt.Errorf("cannot read compressed index block with size %d: %s", mr.indexBlockSize, err)
return fmt.Errorf("cannot read compressed index block with size %d: %w", mr.indexBlockSize, err)
}
// Unpack the compressed index block.
var err error
bsr.unpackedBuf, err = encoding.DecompressZSTD(bsr.unpackedBuf[:0], bsr.packedBuf)
if err != nil {
return fmt.Errorf("cannot decompress index block with size %d: %s", mr.indexBlockSize, err)
return fmt.Errorf("cannot decompress index block with size %d: %w", mr.indexBlockSize, err)
}
// Unmarshal the unpacked index block into bsr.bhs.
@ -280,7 +280,7 @@ func (bsr *blockStreamReader) readNextBHS() error {
for i := 0; i < int(mr.blockHeadersCount); i++ {
tail, err := bsr.bhs[i].Unmarshal(b)
if err != nil {
return fmt.Errorf("cannot unmarshal blockHeader #%d in the index block #%d: %s", len(bsr.bhs), bsr.mrIdx, err)
return fmt.Errorf("cannot unmarshal blockHeader #%d in the index block #%d: %w", len(bsr.bhs), bsr.mrIdx, err)
}
b = tail
}

View file

@ -84,7 +84,7 @@ func (bsw *blockStreamWriter) InitFromFilePart(path string, nocache bool, compre
// Create the directory
if err := fs.MkdirAllFailIfExist(path); err != nil {
return fmt.Errorf("cannot create directory %q: %s", path, err)
return fmt.Errorf("cannot create directory %q: %w", path, err)
}
// Create part files in the directory.
@ -95,7 +95,7 @@ func (bsw *blockStreamWriter) InitFromFilePart(path string, nocache bool, compre
metaindexFile, err := filestream.Create(metaindexPath, false)
if err != nil {
fs.MustRemoveAll(path)
return fmt.Errorf("cannot create metaindex file: %s", err)
return fmt.Errorf("cannot create metaindex file: %w", err)
}
indexPath := path + "/index.bin"
@ -103,7 +103,7 @@ func (bsw *blockStreamWriter) InitFromFilePart(path string, nocache bool, compre
if err != nil {
metaindexFile.MustClose()
fs.MustRemoveAll(path)
return fmt.Errorf("cannot create index file: %s", err)
return fmt.Errorf("cannot create index file: %w", err)
}
itemsPath := path + "/items.bin"
@ -112,7 +112,7 @@ func (bsw *blockStreamWriter) InitFromFilePart(path string, nocache bool, compre
metaindexFile.MustClose()
indexFile.MustClose()
fs.MustRemoveAll(path)
return fmt.Errorf("cannot create items file: %s", err)
return fmt.Errorf("cannot create items file: %w", err)
}
lensPath := path + "/lens.bin"
@ -122,7 +122,7 @@ func (bsw *blockStreamWriter) InitFromFilePart(path string, nocache bool, compre
indexFile.MustClose()
itemsFile.MustClose()
fs.MustRemoveAll(path)
return fmt.Errorf("cannot create lens file: %s", err)
return fmt.Errorf("cannot create lens file: %w", err)
}
bsw.reset()

View file

@ -267,7 +267,7 @@ func (ib *inmemoryBlock) UnmarshalData(sb *storageBlock, firstItem, commonPrefix
switch mt {
case marshalTypePlain:
if err := ib.unmarshalDataPlain(sb, firstItem, itemsCount); err != nil {
return fmt.Errorf("cannot unmarshal plain data: %s", err)
return fmt.Errorf("cannot unmarshal plain data: %w", err)
}
if !ib.isSorted() {
return fmt.Errorf("plain data block contains unsorted items; items:\n%s", ib.debugItemsString())
@ -289,7 +289,7 @@ func (ib *inmemoryBlock) UnmarshalData(sb *storageBlock, firstItem, commonPrefix
// Unmarshal lens data.
bb.B, err = encoding.DecompressZSTD(bb.B[:0], sb.lensData)
if err != nil {
return fmt.Errorf("cannot decompress lensData: %s", err)
return fmt.Errorf("cannot decompress lensData: %w", err)
}
lb := getLensBuffer(int(2 * itemsCount))
@ -304,7 +304,7 @@ func (ib *inmemoryBlock) UnmarshalData(sb *storageBlock, firstItem, commonPrefix
// Unmarshal prefixLens
tail, err := encoding.UnmarshalVarUint64s(is.A, bb.B)
if err != nil {
return fmt.Errorf("cannot unmarshal prefixLens from lensData: %s", err)
return fmt.Errorf("cannot unmarshal prefixLens from lensData: %w", err)
}
prefixLens[0] = 0
for i, xLen := range is.A {
@ -314,7 +314,7 @@ func (ib *inmemoryBlock) UnmarshalData(sb *storageBlock, firstItem, commonPrefix
// Unmarshal lens
tail, err = encoding.UnmarshalVarUint64s(is.A, tail)
if err != nil {
return fmt.Errorf("cannot unmarshal lens from lensData: %s", err)
return fmt.Errorf("cannot unmarshal lens from lensData: %w", err)
}
if len(tail) > 0 {
return fmt.Errorf("unexpected tail left unmarshaling %d lens; tail size=%d; contents=%X", itemsCount, len(tail), tail)
@ -331,7 +331,7 @@ func (ib *inmemoryBlock) UnmarshalData(sb *storageBlock, firstItem, commonPrefix
// Unmarshal items data.
bb.B, err = encoding.DecompressZSTD(bb.B[:0], sb.itemsData)
if err != nil {
return fmt.Errorf("cannot decompress lensData: %s", err)
return fmt.Errorf("cannot decompress lensData: %w", err)
}
data := bytesutil.Resize(ib.data, maxInmemoryBlockSize)
if n := int(itemsCount) - cap(ib.items); n > 0 {

View file

@ -30,7 +30,7 @@ type PrepareBlockCallback func(data []byte, items [][]byte) ([]byte, [][]byte)
func mergeBlockStreams(ph *partHeader, bsw *blockStreamWriter, bsrs []*blockStreamReader, prepareBlock PrepareBlockCallback, stopCh <-chan struct{}, itemsMerged *uint64) error {
bsm := bsmPool.Get().(*blockStreamMerger)
if err := bsm.Init(bsrs, prepareBlock); err != nil {
return fmt.Errorf("cannot initialize blockStreamMerger: %s", err)
return fmt.Errorf("cannot initialize blockStreamMerger: %w", err)
}
err := bsm.Merge(bsw, ph, stopCh, itemsMerged)
bsm.reset()
@ -42,7 +42,7 @@ func mergeBlockStreams(ph *partHeader, bsw *blockStreamWriter, bsrs []*blockStre
if err == errForciblyStopped {
return err
}
return fmt.Errorf("cannot merge %d block streams: %s: %s", len(bsrs), bsrs, err)
return fmt.Errorf("cannot merge %d block streams: %s: %w", len(bsrs), bsrs, err)
}
var bsmPool = &sync.Pool{
@ -88,7 +88,7 @@ func (bsm *blockStreamMerger) Init(bsrs []*blockStreamReader, prepareBlock Prepa
}
if err := bsr.Error(); err != nil {
return fmt.Errorf("cannot obtain the next block from blockStreamReader %q: %s", bsr.path, err)
return fmt.Errorf("cannot obtain the next block from blockStreamReader %q: %w", bsr.path, err)
}
}
heap.Init(&bsm.bsrHeap)
@ -143,7 +143,7 @@ again:
goto again
}
if err := bsr.Error(); err != nil {
return fmt.Errorf("cannot read storageBlock: %s", err)
return fmt.Errorf("cannot read storageBlock: %w", err)
}
goto again
}

View file

@ -121,7 +121,7 @@ func testMergeBlockStreamsSerial(blocksToMerge, maxItemsPerBlock int) error {
var bsw blockStreamWriter
bsw.InitFromInmemoryPart(&dstIP)
if err := mergeBlockStreams(&dstIP.ph, &bsw, bsrs, nil, nil, &itemsMerged); err != nil {
return fmt.Errorf("cannot merge block streams: %s", err)
return fmt.Errorf("cannot merge block streams: %w", err)
}
if itemsMerged != uint64(len(items)) {
return fmt.Errorf("unexpected itemsMerged; got %d; want %d", itemsMerged, len(items))
@ -130,7 +130,7 @@ func testMergeBlockStreamsSerial(blocksToMerge, maxItemsPerBlock int) error {
// Verify the resulting part (dstIP) contains all the items
// in the correct order.
if err := testCheckItems(&dstIP, items); err != nil {
return fmt.Errorf("error checking items: %s", err)
return fmt.Errorf("error checking items: %w", err)
}
return nil
}
@ -164,7 +164,7 @@ func testCheckItems(dstIP *inmemoryPart, items []string) error {
}
}
if err := dstBsr.Error(); err != nil {
return fmt.Errorf("unexpected error in dstBsr: %s", err)
return fmt.Errorf("unexpected error in dstBsr: %w", err)
}
if !reflect.DeepEqual(items, dstItems) {
return fmt.Errorf("unequal items\ngot\n%q\nwant\n%q", dstItems, items)

View file

@ -44,7 +44,7 @@ func (mr *metaindexRow) Unmarshal(src []byte) ([]byte, error) {
// Unmarshal firstItem
tail, fi, err := encoding.UnmarshalBytes(src)
if err != nil {
return tail, fmt.Errorf("cannot unmarshal firstItem: %s", err)
return tail, fmt.Errorf("cannot unmarshal firstItem: %w", err)
}
mr.firstItem = append(mr.firstItem[:0], fi...)
src = tail
@ -85,11 +85,11 @@ func unmarshalMetaindexRows(dst []metaindexRow, r io.Reader) ([]metaindexRow, er
// since it is quite small.
compressedData, err := ioutil.ReadAll(r)
if err != nil {
return dst, fmt.Errorf("cannot read metaindex data: %s", err)
return dst, fmt.Errorf("cannot read metaindex data: %w", err)
}
data, err := encoding.DecompressZSTD(nil, compressedData)
if err != nil {
return dst, fmt.Errorf("cannot decompress metaindex data with size %d bytes: %s", len(compressedData), err)
return dst, fmt.Errorf("cannot decompress metaindex data with size %d bytes: %w", len(compressedData), err)
}
dstLen := len(dst)
@ -102,7 +102,7 @@ func unmarshalMetaindexRows(dst []metaindexRow, r io.Reader) ([]metaindexRow, er
mr := &dst[len(dst)-1]
tail, err := mr.Unmarshal(data)
if err != nil {
return dst, fmt.Errorf("cannot unmarshal metaindexRow #%d from metaindex data: %s", len(dst)-dstLen, err)
return dst, fmt.Errorf("cannot unmarshal metaindexRow #%d from metaindex data: %w", len(dst)-dstLen, err)
}
data = tail
}

View file

@ -67,13 +67,13 @@ func openFilePart(path string) (*part, error) {
var ph partHeader
if err := ph.ParseFromPath(path); err != nil {
return nil, fmt.Errorf("cannot parse path to part: %s", err)
return nil, fmt.Errorf("cannot parse path to part: %w", err)
}
metaindexPath := path + "/metaindex.bin"
metaindexFile, err := filestream.Open(metaindexPath, true)
if err != nil {
return nil, fmt.Errorf("cannot open %q: %s", metaindexPath, err)
return nil, fmt.Errorf("cannot open %q: %w", metaindexPath, err)
}
metaindexSize := fs.MustFileSize(metaindexPath)
@ -81,7 +81,7 @@ func openFilePart(path string) (*part, error) {
indexFile, err := fs.OpenReaderAt(indexPath)
if err != nil {
metaindexFile.MustClose()
return nil, fmt.Errorf("cannot open %q: %s", indexPath, err)
return nil, fmt.Errorf("cannot open %q: %w", indexPath, err)
}
indexSize := fs.MustFileSize(indexPath)
@ -90,7 +90,7 @@ func openFilePart(path string) (*part, error) {
if err != nil {
metaindexFile.MustClose()
indexFile.MustClose()
return nil, fmt.Errorf("cannot open %q: %s", itemsPath, err)
return nil, fmt.Errorf("cannot open %q: %w", itemsPath, err)
}
itemsSize := fs.MustFileSize(itemsPath)
@ -100,7 +100,7 @@ func openFilePart(path string) (*part, error) {
metaindexFile.MustClose()
indexFile.MustClose()
itemsFile.MustClose()
return nil, fmt.Errorf("cannot open %q: %s", lensPath, err)
return nil, fmt.Errorf("cannot open %q: %w", lensPath, err)
}
lensSize := fs.MustFileSize(lensPath)
@ -112,7 +112,7 @@ func newPart(ph *partHeader, path string, size uint64, metaindexReader filestrea
var errors []error
mrs, err := unmarshalMetaindexRows(nil, metaindexReader)
if err != nil {
errors = append(errors, fmt.Errorf("cannot unmarshal metaindexRows: %s", err))
errors = append(errors, fmt.Errorf("cannot unmarshal metaindexRows: %w", err))
}
metaindexReader.MustClose()
@ -131,7 +131,7 @@ func newPart(ph *partHeader, path string, size uint64, metaindexReader filestrea
if len(errors) > 0 {
// Return only the first error, since it has no sense in returning all errors.
err := fmt.Errorf("error opening part %s: %s", p.path, errors[0])
err := fmt.Errorf("error opening part %s: %w", p.path, errors[0])
p.MustClose()
return nil, err
}

View file

@ -54,7 +54,7 @@ func (hs *hexString) UnmarshalJSON(data []byte) error {
data = data[1 : len(data)-1]
b, err := hex.DecodeString(string(data))
if err != nil {
return fmt.Errorf("cannot hex-decode %q: %s", data, err)
return fmt.Errorf("cannot hex-decode %q: %w", data, err)
}
*hs = b
return nil
@ -101,7 +101,7 @@ func (ph *partHeader) ParseFromPath(partPath string) error {
// Read itemsCount from partName.
itemsCount, err := strconv.ParseUint(a[0], 10, 64)
if err != nil {
return fmt.Errorf("cannot parse itemsCount from partName %q: %s", partName, err)
return fmt.Errorf("cannot parse itemsCount from partName %q: %w", partName, err)
}
ph.itemsCount = itemsCount
if ph.itemsCount <= 0 {
@ -111,7 +111,7 @@ func (ph *partHeader) ParseFromPath(partPath string) error {
// Read blocksCount from partName.
blocksCount, err := strconv.ParseUint(a[1], 10, 64)
if err != nil {
return fmt.Errorf("cannot parse blocksCount from partName %q: %s", partName, err)
return fmt.Errorf("cannot parse blocksCount from partName %q: %w", partName, err)
}
ph.blocksCount = blocksCount
if ph.blocksCount <= 0 {
@ -126,12 +126,12 @@ func (ph *partHeader) ParseFromPath(partPath string) error {
metadataPath := partPath + "/metadata.json"
metadata, err := ioutil.ReadFile(metadataPath)
if err != nil {
return fmt.Errorf("cannot read %q: %s", metadataPath, err)
return fmt.Errorf("cannot read %q: %w", metadataPath, err)
}
var phj partHeaderJSON
if err := json.Unmarshal(metadata, &phj); err != nil {
return fmt.Errorf("cannot parse %q: %s", metadataPath, err)
return fmt.Errorf("cannot parse %q: %w", metadataPath, err)
}
if ph.itemsCount != phj.ItemsCount {
return fmt.Errorf("invalid ItemsCount in %q; got %d; want %d", metadataPath, phj.ItemsCount, ph.itemsCount)
@ -161,11 +161,11 @@ func (ph *partHeader) WriteMetadata(partPath string) error {
}
metadata, err := json.MarshalIndent(&phj, "", "\t")
if err != nil {
return fmt.Errorf("cannot marshal metadata: %s", err)
return fmt.Errorf("cannot marshal metadata: %w", err)
}
metadataPath := partPath + "/metadata.json"
if err := fs.WriteFileAtomically(metadataPath, metadata); err != nil {
return fmt.Errorf("cannot create %q: %s", metadataPath, err)
return fmt.Errorf("cannot create %q: %w", metadataPath, err)
}
return nil
}

View file

@ -279,7 +279,7 @@ func (ps *partSearch) nextBHS() error {
var err error
idxb, err = ps.readIndexBlock(mr)
if err != nil {
return fmt.Errorf("cannot read index block: %s", err)
return fmt.Errorf("cannot read index block: %w", err)
}
ps.idxbCache.Put(idxbKey, idxb)
}
@ -294,12 +294,12 @@ func (ps *partSearch) readIndexBlock(mr *metaindexRow) (*indexBlock, error) {
var err error
ps.indexBuf, err = encoding.DecompressZSTD(ps.indexBuf[:0], ps.compressedIndexBuf)
if err != nil {
return nil, fmt.Errorf("cannot decompress index block with size %d bytes: %s", len(ps.compressedIndexBuf), err)
return nil, fmt.Errorf("cannot decompress index block with size %d bytes: %w", len(ps.compressedIndexBuf), err)
}
idxb := getIndexBlock()
idxb.bhs, err = unmarshalBlockHeaders(idxb.bhs[:0], ps.indexBuf, int(mr.blockHeadersCount))
if err != nil {
return nil, fmt.Errorf("cannot unmarshal block headers from index block (offset=%d, size=%d): %s", mr.indexBlockOffset, mr.indexBlockSize, err)
return nil, fmt.Errorf("cannot unmarshal block headers from index block (offset=%d, size=%d): %w", mr.indexBlockOffset, mr.indexBlockSize, err)
}
return idxb, nil
}
@ -340,7 +340,7 @@ func (ps *partSearch) readInmemoryBlock(bh *blockHeader) (*inmemoryBlock, error)
ib := getInmemoryBlock()
if err := ib.UnmarshalData(&ps.sb, bh.firstItem, bh.commonPrefix, bh.itemsCount, bh.marshalType); err != nil {
return nil, fmt.Errorf("cannot unmarshal storage block with %d items: %s", bh.itemsCount, err)
return nil, fmt.Errorf("cannot unmarshal storage block with %d items: %w", bh.itemsCount, err)
}
return ib, nil

View file

@ -72,7 +72,7 @@ func testPartSearchSerial(p *part, items []string) error {
return fmt.Errorf("unexpected item found past the end of all the items: %X", ps.Item)
}
if err := ps.Error(); err != nil {
return fmt.Errorf("unexpected error: %s", err)
return fmt.Errorf("unexpected error: %w", err)
}
// Search for the item bigger than the items[len(items)-1]
@ -83,7 +83,7 @@ func testPartSearchSerial(p *part, items []string) error {
return fmt.Errorf("unexpected item found: %X; want nothing", ps.Item)
}
if err := ps.Error(); err != nil {
return fmt.Errorf("unexpected error when searching past the last item: %s", err)
return fmt.Errorf("unexpected error when searching past the last item: %w", err)
}
// Search for inner items
@ -107,7 +107,7 @@ func testPartSearchSerial(p *part, items []string) error {
return fmt.Errorf("unexpected item found past the end of all the items for idx %d out of %d items; loop %d: got %X", n, len(items), loop, ps.Item)
}
if err := ps.Error(); err != nil {
return fmt.Errorf("unexpected error on loop %d: %s", loop, err)
return fmt.Errorf("unexpected error on loop %d: %w", loop, err)
}
}
@ -121,7 +121,7 @@ func testPartSearchSerial(p *part, items []string) error {
return fmt.Errorf("unexpected item found at position %d: got %X; want %X", i, ps.Item, item)
}
if err := ps.Error(); err != nil {
return fmt.Errorf("unexpected error when searching for items[%d]=%X: %s", i, item, err)
return fmt.Errorf("unexpected error when searching for items[%d]=%X: %w", i, item, err)
}
}
@ -136,7 +136,7 @@ func testPartSearchSerial(p *part, items []string) error {
return fmt.Errorf("unexpected item found at position %d: got %X; want %X", i, ps.Item, item)
}
if err := ps.Error(); err != nil {
return fmt.Errorf("unexpected error when searching for items[%d]=%X: %s", i, item, err)
return fmt.Errorf("unexpected error when searching for items[%d]=%X: %w", i, item, err)
}
}
@ -151,7 +151,7 @@ func newTestPart(blocksCount, maxItemsPerBlock int) (*part, []string, error) {
var bsw blockStreamWriter
bsw.InitFromInmemoryPart(&ip)
if err := mergeBlockStreams(&ip.ph, &bsw, bsrs, nil, nil, &itemsMerged); err != nil {
return nil, nil, fmt.Errorf("cannot merge blocks: %s", err)
return nil, nil, fmt.Errorf("cannot merge blocks: %w", err)
}
if itemsMerged != uint64(len(items)) {
return nil, nil, fmt.Errorf("unexpected itemsMerged; got %d; want %d", itemsMerged, len(items))
@ -159,7 +159,7 @@ func newTestPart(blocksCount, maxItemsPerBlock int) (*part, []string, error) {
size := ip.size()
p, err := newPart(&ip.ph, "partName", size, ip.metaindexData.NewReader(), &ip.indexData, &ip.itemsData, &ip.lensData)
if err != nil {
return nil, nil, fmt.Errorf("cannot create part: %s", err)
return nil, nil, fmt.Errorf("cannot create part: %w", err)
}
return p, items, nil
}

View file

@ -169,7 +169,7 @@ func OpenTable(path string, flushCallback func(), prepareBlock PrepareBlockCallb
// Create a directory for the table if it doesn't exist yet.
if err := fs.MkdirAllIfNotExist(path); err != nil {
return nil, fmt.Errorf("cannot create directory %q: %s", path, err)
return nil, fmt.Errorf("cannot create directory %q: %w", path, err)
}
// Protect from concurrent opens.
@ -181,7 +181,7 @@ func OpenTable(path string, flushCallback func(), prepareBlock PrepareBlockCallb
// Open table parts.
pws, err := openParts(path)
if err != nil {
return nil, fmt.Errorf("cannot open table parts at %q: %s", path, err)
return nil, fmt.Errorf("cannot open table parts at %q: %w", path, err)
}
tb := &Table{
@ -481,13 +481,13 @@ func (tb *Table) convertToV1280() {
func (tb *Table) mergePartsOptimal(pws []*partWrapper, stopCh <-chan struct{}) error {
for len(pws) > defaultPartsToMerge {
if err := tb.mergeParts(pws[:defaultPartsToMerge], stopCh, false); err != nil {
return fmt.Errorf("cannot merge %d parts: %s", defaultPartsToMerge, err)
return fmt.Errorf("cannot merge %d parts: %w", defaultPartsToMerge, err)
}
pws = pws[defaultPartsToMerge:]
}
if len(pws) > 0 {
if err := tb.mergeParts(pws, stopCh, false); err != nil {
return fmt.Errorf("cannot merge %d parts: %s", len(pws), err)
return fmt.Errorf("cannot merge %d parts: %w", len(pws), err)
}
}
return nil
@ -761,7 +761,7 @@ func (tb *Table) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isOuterP
bsr.InitFromInmemoryPart(pw.mp)
} else {
if err := bsr.InitFromFilePart(pw.p.path); err != nil {
return fmt.Errorf("cannot open source part for merging: %s", err)
return fmt.Errorf("cannot open source part for merging: %w", err)
}
}
bsrs = append(bsrs, bsr)
@ -786,7 +786,7 @@ func (tb *Table) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isOuterP
bsw := getBlockStreamWriter()
compressLevel := getCompressLevelForPartItems(outItemsCount, outBlocksCount)
if err := bsw.InitFromFilePart(tmpPartPath, nocache, compressLevel); err != nil {
return fmt.Errorf("cannot create destination part %q: %s", tmpPartPath, err)
return fmt.Errorf("cannot create destination part %q: %w", tmpPartPath, err)
}
// Merge parts into a temporary location.
@ -797,10 +797,10 @@ func (tb *Table) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isOuterP
if err == errForciblyStopped {
return err
}
return fmt.Errorf("error when merging parts to %q: %s", tmpPartPath, err)
return fmt.Errorf("error when merging parts to %q: %w", tmpPartPath, err)
}
if err := ph.WriteMetadata(tmpPartPath); err != nil {
return fmt.Errorf("cannot write metadata to destination part %q: %s", tmpPartPath, err)
return fmt.Errorf("cannot write metadata to destination part %q: %w", tmpPartPath, err)
}
// Close bsrs (aka source parts).
@ -821,18 +821,18 @@ func (tb *Table) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isOuterP
fmt.Fprintf(&bb, "%s -> %s\n", tmpPartPath, dstPartPath)
txnPath := fmt.Sprintf("%s/txn/%016X", tb.path, mergeIdx)
if err := fs.WriteFileAtomically(txnPath, bb.B); err != nil {
return fmt.Errorf("cannot create transaction file %q: %s", txnPath, err)
return fmt.Errorf("cannot create transaction file %q: %w", txnPath, err)
}
// Run the created transaction.
if err := runTransaction(&tb.snapshotLock, tb.path, txnPath); err != nil {
return fmt.Errorf("cannot execute transaction %q: %s", txnPath, err)
return fmt.Errorf("cannot execute transaction %q: %w", txnPath, err)
}
// Open the merged part.
newP, err := openFilePart(dstPartPath)
if err != nil {
return fmt.Errorf("cannot open merged part %q: %s", dstPartPath, err)
return fmt.Errorf("cannot open merged part %q: %w", dstPartPath, err)
}
newPSize := newP.size
newPW := &partWrapper{
@ -950,7 +950,7 @@ func openParts(path string) ([]*partWrapper, error) {
}
d, err := os.Open(path)
if err != nil {
return nil, fmt.Errorf("cannot open difrectory: %s", err)
return nil, fmt.Errorf("cannot open difrectory: %w", err)
}
defer fs.MustClose(d)
@ -958,19 +958,19 @@ func openParts(path string) ([]*partWrapper, error) {
// Snapshots cannot be created yet, so use fakeSnapshotLock.
var fakeSnapshotLock sync.RWMutex
if err := runTransactions(&fakeSnapshotLock, path); err != nil {
return nil, fmt.Errorf("cannot run transactions: %s", err)
return nil, fmt.Errorf("cannot run transactions: %w", err)
}
txnDir := path + "/txn"
fs.MustRemoveAll(txnDir)
if err := fs.MkdirAllFailIfExist(txnDir); err != nil {
return nil, fmt.Errorf("cannot create %q: %s", txnDir, err)
return nil, fmt.Errorf("cannot create %q: %w", txnDir, err)
}
tmpDir := path + "/tmp"
fs.MustRemoveAll(tmpDir)
if err := fs.MkdirAllFailIfExist(tmpDir); err != nil {
return nil, fmt.Errorf("cannot create %q: %s", tmpDir, err)
return nil, fmt.Errorf("cannot create %q: %w", tmpDir, err)
}
fs.MustSyncPath(path)
@ -978,7 +978,7 @@ func openParts(path string) ([]*partWrapper, error) {
// Open parts.
fis, err := d.Readdir(-1)
if err != nil {
return nil, fmt.Errorf("cannot read directory: %s", err)
return nil, fmt.Errorf("cannot read directory: %w", err)
}
var pws []*partWrapper
for _, fi := range fis {
@ -995,7 +995,7 @@ func openParts(path string) ([]*partWrapper, error) {
p, err := openFilePart(partPath)
if err != nil {
mustCloseParts(pws)
return nil, fmt.Errorf("cannot open part %q: %s", partPath, err)
return nil, fmt.Errorf("cannot open part %q: %w", partPath, err)
}
pw := &partWrapper{
p: p,
@ -1028,11 +1028,11 @@ func (tb *Table) CreateSnapshotAt(dstDir string) error {
srcDir := tb.path
srcDir, err = filepath.Abs(srcDir)
if err != nil {
return fmt.Errorf("cannot obtain absolute dir for %q: %s", srcDir, err)
return fmt.Errorf("cannot obtain absolute dir for %q: %w", srcDir, err)
}
dstDir, err = filepath.Abs(dstDir)
if err != nil {
return fmt.Errorf("cannot obtain absolute dir for %q: %s", dstDir, err)
return fmt.Errorf("cannot obtain absolute dir for %q: %w", dstDir, err)
}
if strings.HasPrefix(dstDir, srcDir+"/") {
return fmt.Errorf("cannot create snapshot %q inside the data dir %q", dstDir, srcDir)
@ -1047,18 +1047,18 @@ func (tb *Table) CreateSnapshotAt(dstDir string) error {
defer tb.snapshotLock.Unlock()
if err := fs.MkdirAllFailIfExist(dstDir); err != nil {
return fmt.Errorf("cannot create snapshot dir %q: %s", dstDir, err)
return fmt.Errorf("cannot create snapshot dir %q: %w", dstDir, err)
}
d, err := os.Open(srcDir)
if err != nil {
return fmt.Errorf("cannot open difrectory: %s", err)
return fmt.Errorf("cannot open difrectory: %w", err)
}
defer fs.MustClose(d)
fis, err := d.Readdir(-1)
if err != nil {
return fmt.Errorf("cannot read directory: %s", err)
return fmt.Errorf("cannot read directory: %w", err)
}
for _, fi := range fis {
fn := fi.Name()
@ -1068,7 +1068,7 @@ func (tb *Table) CreateSnapshotAt(dstDir string) error {
srcPath := srcDir + "/" + fn
dstPath := dstDir + "/" + fn
if err := os.Link(srcPath, dstPath); err != nil {
return fmt.Errorf("cannot hard link from %q to %q: %s", srcPath, dstPath, err)
return fmt.Errorf("cannot hard link from %q to %q: %w", srcPath, dstPath, err)
}
default:
// Skip other non-directories.
@ -1082,7 +1082,7 @@ func (tb *Table) CreateSnapshotAt(dstDir string) error {
srcPartPath := srcDir + "/" + fn
dstPartPath := dstDir + "/" + fn
if err := fs.HardLinkFiles(srcPartPath, dstPartPath); err != nil {
return fmt.Errorf("cannot create hard links from %q to %q: %s", srcPartPath, dstPartPath, err)
return fmt.Errorf("cannot create hard links from %q to %q: %w", srcPartPath, dstPartPath, err)
}
}
@ -1107,13 +1107,13 @@ func runTransactions(txnLock *sync.RWMutex, path string) error {
if os.IsNotExist(err) {
return nil
}
return fmt.Errorf("cannot open %q: %s", txnDir, err)
return fmt.Errorf("cannot open %q: %w", txnDir, err)
}
defer fs.MustClose(d)
fis, err := d.Readdir(-1)
if err != nil {
return fmt.Errorf("cannot read directory %q: %s", d.Name(), err)
return fmt.Errorf("cannot read directory %q: %w", d.Name(), err)
}
// Sort transaction files by id, since transactions must be ordered.
@ -1129,7 +1129,7 @@ func runTransactions(txnLock *sync.RWMutex, path string) error {
}
txnPath := txnDir + "/" + fn
if err := runTransaction(txnLock, path, txnPath); err != nil {
return fmt.Errorf("cannot run transaction from %q: %s", txnPath, err)
return fmt.Errorf("cannot run transaction from %q: %w", txnPath, err)
}
}
return nil
@ -1143,7 +1143,7 @@ func runTransaction(txnLock *sync.RWMutex, pathPrefix, txnPath string) error {
data, err := ioutil.ReadFile(txnPath)
if err != nil {
return fmt.Errorf("cannot read transaction file: %s", err)
return fmt.Errorf("cannot read transaction file: %w", err)
}
if len(data) > 0 && data[len(data)-1] == '\n' {
data = data[:len(data)-1]
@ -1164,7 +1164,7 @@ func runTransaction(txnLock *sync.RWMutex, pathPrefix, txnPath string) error {
for _, path := range rmPaths {
path, err := validatePath(pathPrefix, path)
if err != nil {
return fmt.Errorf("invalid path to remove: %s", err)
return fmt.Errorf("invalid path to remove: %w", err)
}
removeWG.Add(1)
fs.MustRemoveAllWithDoneCallback(path, removeWG.Done)
@ -1175,15 +1175,15 @@ func runTransaction(txnLock *sync.RWMutex, pathPrefix, txnPath string) error {
dstPath := mvPaths[1]
srcPath, err = validatePath(pathPrefix, srcPath)
if err != nil {
return fmt.Errorf("invalid source path to rename: %s", err)
return fmt.Errorf("invalid source path to rename: %w", err)
}
dstPath, err = validatePath(pathPrefix, dstPath)
if err != nil {
return fmt.Errorf("invalid destination path to rename: %s", err)
return fmt.Errorf("invalid destination path to rename: %w", err)
}
if fs.IsPathExist(srcPath) {
if err := os.Rename(srcPath, dstPath); err != nil {
return fmt.Errorf("cannot rename %q to %q: %s", srcPath, dstPath, err)
return fmt.Errorf("cannot rename %q to %q: %w", srcPath, dstPath, err)
}
} else if !fs.IsPathExist(dstPath) {
// Emit info message for the expected condition after unclean shutdown on NFS disk.
@ -1217,12 +1217,12 @@ func validatePath(pathPrefix, path string) (string, error) {
pathPrefix, err = filepath.Abs(pathPrefix)
if err != nil {
return path, fmt.Errorf("cannot determine absolute path for pathPrefix=%q: %s", pathPrefix, err)
return path, fmt.Errorf("cannot determine absolute path for pathPrefix=%q: %w", pathPrefix, err)
}
path, err = filepath.Abs(path)
if err != nil {
return path, fmt.Errorf("cannot determine absolute path for %q: %s", path, err)
return path, fmt.Errorf("cannot determine absolute path for %q: %w", path, err)
}
if !strings.HasPrefix(path, pathPrefix+"/") {
return path, fmt.Errorf("invalid path %q; must start with %q", path, pathPrefix+"/")

View file

@ -104,7 +104,7 @@ func (ts *TableSearch) Seek(k []byte) {
}
if len(errors) > 0 {
// Return only the first error, since it has no sense in returning all errors.
ts.err = fmt.Errorf("cannot seek %q: %s", k, errors[0])
ts.err = fmt.Errorf("cannot seek %q: %w", k, errors[0])
return
}
if len(ts.psHeap) == 0 {
@ -149,7 +149,7 @@ func (ts *TableSearch) NextItem() bool {
ts.err = ts.nextBlock()
if ts.err != nil {
if ts.err != io.EOF {
ts.err = fmt.Errorf("cannot obtain the next block to search in the table: %s", ts.err)
ts.err = fmt.Errorf("cannot obtain the next block to search in the table: %w", ts.err)
}
return false
}

View file

@ -98,7 +98,7 @@ func testTableSearchConcurrent(tb *Table, items []string) error {
select {
case err := <-ch:
if err != nil {
return fmt.Errorf("unexpected error: %s", err)
return fmt.Errorf("unexpected error: %w", err)
}
case <-time.After(time.Second * 5):
return fmt.Errorf("timeout")
@ -139,7 +139,7 @@ func testTableSearchSerial(tb *Table, items []string) error {
return fmt.Errorf("superflouos item found at position %d when searching for %q: %q", n, key, ts.Item)
}
if err := ts.Error(); err != nil {
return fmt.Errorf("unexpected error when searching for %q: %s", key, err)
return fmt.Errorf("unexpected error when searching for %q: %w", key, err)
}
}
ts.MustClose()
@ -153,13 +153,13 @@ func newTestTable(path string, itemsCount int) (*Table, []string, error) {
}
tb, err := OpenTable(path, flushCallback, nil)
if err != nil {
return nil, nil, fmt.Errorf("cannot open table: %s", err)
return nil, nil, fmt.Errorf("cannot open table: %w", err)
}
items := make([]string, itemsCount)
for i := 0; i < itemsCount; i++ {
item := fmt.Sprintf("%d:%d", rand.Intn(1e9), i)
if err := tb.AddItems([][]byte{[]byte(item)}); err != nil {
return nil, nil, fmt.Errorf("cannot add item: %s", err)
return nil, nil, fmt.Errorf("cannot add item: %w", err)
}
items[i] = item
}

View file

@ -27,7 +27,7 @@ func benchmarkTableSearch(b *testing.B, itemsCount int) {
tb, items, err := newTestTable(path, itemsCount)
if err != nil {
panic(fmt.Errorf("cannot create test table at %q with %d items: %s", path, itemsCount, err))
panic(fmt.Errorf("cannot create test table at %q with %d items: %w", path, itemsCount, err))
}
// Force finishing pending merges
@ -106,7 +106,7 @@ func benchmarkTableSearchKeysExt(b *testing.B, tb *Table, keys [][]byte, stripSu
}
}
if err := ts.Error(); err != nil {
panic(fmt.Errorf("BUG: unexpected error for searchKeys[%d]=%q: %s", i, searchKey, err))
panic(fmt.Errorf("BUG: unexpected error for searchKeys[%d]=%q: %w", i, searchKey, err))
}
}
}

View file

@ -56,10 +56,10 @@ func (cp *ConnPool) Get() (*handshake.BufferedConn, error) {
// Pool is empty. Create new connection.
c, err := cp.d.Dial()
if err != nil {
return nil, fmt.Errorf("cannot dial %s: %s", cp.d.Addr(), err)
return nil, fmt.Errorf("cannot dial %s: %w", cp.d.Addr(), err)
}
if bc, err = cp.handshakeFunc(c, cp.compressionLevel); err != nil {
err = fmt.Errorf("cannot perform %q handshake with server %q: %s", cp.name, cp.d.Addr(), err)
err = fmt.Errorf("cannot perform %q handshake with server %q: %w", cp.name, cp.d.Addr(), err)
_ = c.Close()
return nil, err
}

View file

@ -177,7 +177,7 @@ func tryOpeningQueue(path, name string, chunkFileSize, maxBlockSize, maxPendingB
}
if err := fs.MkdirAllIfNotExist(path); err != nil {
return nil, fmt.Errorf("cannot create directory %q: %s", path, err)
return nil, fmt.Errorf("cannot create directory %q: %w", path, err)
}
// Read metainfo.
@ -193,13 +193,13 @@ func tryOpeningQueue(path, name string, chunkFileSize, maxBlockSize, maxPendingB
mi.Reset()
mi.Name = q.name
if err := mi.WriteToFile(metainfoPath); err != nil {
return nil, fmt.Errorf("cannot create %q: %s", metainfoPath, err)
return nil, fmt.Errorf("cannot create %q: %w", metainfoPath, err)
}
// Create initial chunk file.
filepath := q.chunkFilePath(0)
if err := fs.WriteFileAtomically(filepath, nil); err != nil {
return nil, fmt.Errorf("cannot create %q: %s", filepath, err)
return nil, fmt.Errorf("cannot create %q: %w", filepath, err)
}
}
if mi.Name != q.name {
@ -209,7 +209,7 @@ func tryOpeningQueue(path, name string, chunkFileSize, maxBlockSize, maxPendingB
// Locate reader and writer chunks in the path.
fis, err := ioutil.ReadDir(path)
if err != nil {
return nil, fmt.Errorf("cannot read contents of the directory %q: %s", path, err)
return nil, fmt.Errorf("cannot read contents of the directory %q: %w", path, err)
}
for _, fi := range fis {
fname := fi.Name()
@ -406,11 +406,11 @@ func (q *Queue) writeBlockLocked(block []byte) error {
q.writerPath = q.chunkFilePath(q.writerOffset)
w, err := filestream.Create(q.writerPath, false)
if err != nil {
return fmt.Errorf("cannot create chunk file %q: %s", q.writerPath, err)
return fmt.Errorf("cannot create chunk file %q: %w", q.writerPath, err)
}
q.writer = w
if err := q.flushMetainfo(); err != nil {
return fmt.Errorf("cannot flush metainfo: %s", err)
return fmt.Errorf("cannot flush metainfo: %w", err)
}
}
@ -421,12 +421,12 @@ func (q *Queue) writeBlockLocked(block []byte) error {
err := q.write(header.B)
headerBufPool.Put(header)
if err != nil {
return fmt.Errorf("cannot write header with size 8 bytes to %q: %s", q.writerPath, err)
return fmt.Errorf("cannot write header with size 8 bytes to %q: %w", q.writerPath, err)
}
// Write block contents.
if err := q.write(block); err != nil {
return fmt.Errorf("cannot write block contents with size %d bytes to %q: %s", len(block), q.writerPath, err)
return fmt.Errorf("cannot write block contents with size %d bytes to %q: %w", len(block), q.writerPath, err)
}
q.blocksWritten.Inc()
q.bytesWritten.Add(len(block))
@ -474,11 +474,11 @@ func (q *Queue) readBlockLocked(dst []byte) ([]byte, error) {
q.readerPath = q.chunkFilePath(q.readerOffset)
r, err := filestream.Open(q.readerPath, true)
if err != nil {
return dst, fmt.Errorf("cannot open chunk file %q: %s", q.readerPath, err)
return dst, fmt.Errorf("cannot open chunk file %q: %w", q.readerPath, err)
}
q.reader = r
if err := q.flushMetainfo(); err != nil {
return dst, fmt.Errorf("cannot flush metainfo: %s", err)
return dst, fmt.Errorf("cannot flush metainfo: %w", err)
}
}
@ -489,7 +489,7 @@ func (q *Queue) readBlockLocked(dst []byte) ([]byte, error) {
blockLen := encoding.UnmarshalUint64(header.B)
headerBufPool.Put(header)
if err != nil {
return dst, fmt.Errorf("cannot read header with size 8 bytes from %q: %s", q.readerPath, err)
return dst, fmt.Errorf("cannot read header with size 8 bytes from %q: %w", q.readerPath, err)
}
if blockLen > q.maxBlockSize {
return dst, fmt.Errorf("too big block size read from %q: %d bytes; cannot exceed %d bytes", q.readerPath, blockLen, q.maxBlockSize)
@ -499,7 +499,7 @@ func (q *Queue) readBlockLocked(dst []byte) ([]byte, error) {
dstLen := len(dst)
dst = bytesutil.Resize(dst, dstLen+int(blockLen))
if err := q.readFull(dst[dstLen:]); err != nil {
return dst, fmt.Errorf("cannot read block contents with size %d bytes from %q: %s", blockLen, q.readerPath, err)
return dst, fmt.Errorf("cannot read block contents with size %d bytes from %q: %w", blockLen, q.readerPath, err)
}
q.blocksRead.Inc()
q.bytesRead.Add(int(blockLen))
@ -546,7 +546,7 @@ func (q *Queue) flushMetainfo() error {
}
metainfoPath := q.metainfoPath()
if err := mi.WriteToFile(metainfoPath); err != nil {
return fmt.Errorf("cannot write metainfo to %q: %s", metainfoPath, err)
return fmt.Errorf("cannot write metainfo to %q: %w", metainfoPath, err)
}
return nil
}
@ -567,10 +567,10 @@ func (mi *metainfo) Reset() {
func (mi *metainfo) WriteToFile(path string) error {
data, err := json.Marshal(mi)
if err != nil {
return fmt.Errorf("cannot marshal persistent queue metainfo %#v: %s", mi, err)
return fmt.Errorf("cannot marshal persistent queue metainfo %#v: %w", mi, err)
}
if err := ioutil.WriteFile(path, data, 0600); err != nil {
return fmt.Errorf("cannot write persistent queue metainfo to %q: %s", path, err)
return fmt.Errorf("cannot write persistent queue metainfo to %q: %w", path, err)
}
return nil
}
@ -582,10 +582,10 @@ func (mi *metainfo) ReadFromFile(path string) error {
if os.IsNotExist(err) {
return err
}
return fmt.Errorf("cannot read %q: %s", path, err)
return fmt.Errorf("cannot read %q: %w", path, err)
}
if err := json.Unmarshal(data, mi); err != nil {
return fmt.Errorf("cannot unmarshal persistent queue metainfo from %q: %s", path, err)
return fmt.Errorf("cannot unmarshal persistent queue metainfo from %q: %w", path, err)
}
if mi.ReaderOffset > mi.WriterOffset {
return fmt.Errorf("invalid data read from %q: readerOffset=%d cannot exceed writerOffset=%d", path, mi.ReaderOffset, mi.WriterOffset)

View file

@ -495,20 +495,20 @@ func TestQueueLimitedSize(t *testing.T) {
func mustCreateFile(path, contents string) {
if err := ioutil.WriteFile(path, []byte(contents), 0600); err != nil {
panic(fmt.Errorf("cannot create file %q with %d bytes contents: %s", path, len(contents), err))
panic(fmt.Errorf("cannot create file %q with %d bytes contents: %w", path, len(contents), err))
}
}
func mustCreateDir(path string) {
mustDeleteDir(path)
if err := os.MkdirAll(path, 0700); err != nil {
panic(fmt.Errorf("cannot create dir %q: %s", path, err))
panic(fmt.Errorf("cannot create dir %q: %w", path, err))
}
}
func mustDeleteDir(path string) {
if err := os.RemoveAll(path); err != nil {
panic(fmt.Errorf("cannot remove dir %q: %s", path, err))
panic(fmt.Errorf("cannot remove dir %q: %w", path, err))
}
}
@ -516,6 +516,6 @@ func mustCreateEmptyMetainfo(path, name string) {
var mi metainfo
mi.Name = name
if err := mi.WriteToFile(path + "/metainfo.json"); err != nil {
panic(fmt.Errorf("cannot create metainfo: %s", err))
panic(fmt.Errorf("cannot create metainfo: %w", err))
}
}

View file

@ -93,7 +93,7 @@ func NewConfig(baseDir string, basicAuth *BasicAuthConfig, bearerToken, bearerTo
path := getFilepath(baseDir, basicAuth.PasswordFile)
pass, err := readPasswordFromFile(path)
if err != nil {
return nil, fmt.Errorf("cannot read password from `password_file`=%q set in `basic_auth` section: %s", basicAuth.PasswordFile, err)
return nil, fmt.Errorf("cannot read password from `password_file`=%q set in `basic_auth` section: %w", basicAuth.PasswordFile, err)
}
password = pass
}
@ -109,7 +109,7 @@ func NewConfig(baseDir string, basicAuth *BasicAuthConfig, bearerToken, bearerTo
path := getFilepath(baseDir, bearerTokenFile)
token, err := readPasswordFromFile(path)
if err != nil {
return nil, fmt.Errorf("cannot read bearer token from `bearer_token_file`=%q: %s", bearerTokenFile, err)
return nil, fmt.Errorf("cannot read bearer token from `bearer_token_file`=%q: %w", bearerTokenFile, err)
}
bearerToken = token
}
@ -131,7 +131,7 @@ func NewConfig(baseDir string, basicAuth *BasicAuthConfig, bearerToken, bearerTo
keyPath := getFilepath(baseDir, tlsConfig.KeyFile)
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
if err != nil {
return nil, fmt.Errorf("cannot load TLS certificate from `cert_file`=%q, `key_file`=%q: %s", tlsConfig.CertFile, tlsConfig.KeyFile, err)
return nil, fmt.Errorf("cannot load TLS certificate from `cert_file`=%q, `key_file`=%q: %w", tlsConfig.CertFile, tlsConfig.KeyFile, err)
}
tlsCertificate = &cert
}
@ -139,7 +139,7 @@ func NewConfig(baseDir string, basicAuth *BasicAuthConfig, bearerToken, bearerTo
path := getFilepath(baseDir, tlsConfig.CAFile)
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("cannot read `ca_file` %q: %s", tlsConfig.CAFile, err)
return nil, fmt.Errorf("cannot read `ca_file` %q: %w", tlsConfig.CAFile, err)
}
tlsRootCA = x509.NewCertPool()
if !tlsRootCA.AppendCertsFromPEM(data) {

View file

@ -14,7 +14,7 @@ func MarshalWriteRequest(dst []byte, wr *WriteRequest) []byte {
dst = dst[:dstLen+size]
n, err := wr.MarshalToSizedBuffer(dst[dstLen:])
if err != nil {
panic(fmt.Errorf("BUG: unexpected error when marshaling WriteRequest: %s", err))
panic(fmt.Errorf("BUG: unexpected error when marshaling WriteRequest: %w", err))
}
return dst[:dstLen+n]
}

View file

@ -26,11 +26,11 @@ type RelabelConfig struct {
func LoadRelabelConfigs(path string) ([]ParsedRelabelConfig, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("cannot read `relabel_configs` from %q: %s", path, err)
return nil, fmt.Errorf("cannot read `relabel_configs` from %q: %w", path, err)
}
var rcs []RelabelConfig
if err := yaml.UnmarshalStrict(data, &rcs); err != nil {
return nil, fmt.Errorf("cannot unmarshal `relabel_configs` from %q: %s", path, err)
return nil, fmt.Errorf("cannot unmarshal `relabel_configs` from %q: %w", path, err)
}
return ParseRelabelConfigs(nil, rcs)
}
@ -44,7 +44,7 @@ func ParseRelabelConfigs(dst []ParsedRelabelConfig, rcs []RelabelConfig) ([]Pars
var err error
dst, err = parseRelabelConfig(dst, &rcs[i])
if err != nil {
return dst, fmt.Errorf("error when parsing `relabel_config` #%d: %s", i+1, err)
return dst, fmt.Errorf("error when parsing `relabel_config` #%d: %w", i+1, err)
}
}
return dst, nil
@ -67,7 +67,7 @@ func parseRelabelConfig(dst []ParsedRelabelConfig, rc *RelabelConfig) ([]ParsedR
}
re, err := regexp.Compile(regex)
if err != nil {
return dst, fmt.Errorf("cannot parse `regex` %q: %s", regex, err)
return dst, fmt.Errorf("cannot parse `regex` %q: %w", regex, err)
}
regexCompiled = re
}

View file

@ -94,13 +94,13 @@ func (c *client) ReadData(dst []byte) ([]byte, error) {
fasthttp.ReleaseResponse(resp)
if err == fasthttp.ErrTimeout {
scrapesTimedout.Inc()
return dst, fmt.Errorf("error when scraping %q with timeout %s: %s", c.scrapeURL, c.hc.ReadTimeout, err)
return dst, fmt.Errorf("error when scraping %q with timeout %s: %w", c.scrapeURL, c.hc.ReadTimeout, err)
}
if err == fasthttp.ErrBodyTooLarge {
return dst, fmt.Errorf("the response from %q exceeds -promscrape.maxScrapeSize=%d; "+
"either reduce the response size for the target or increase -promscrape.maxScrapeSize", c.scrapeURL, *maxScrapeSize)
}
return dst, fmt.Errorf("error when scraping %q: %s", c.scrapeURL, err)
return dst, fmt.Errorf("error when scraping %q: %w", c.scrapeURL, err)
}
dstLen := len(dst)
if ce := resp.Header.Peek("Content-Encoding"); string(ce) == "gzip" {
@ -109,7 +109,7 @@ func (c *client) ReadData(dst []byte) ([]byte, error) {
if err != nil {
fasthttp.ReleaseResponse(resp)
scrapesGunzipFailed.Inc()
return dst, fmt.Errorf("cannot ungzip response from %q: %s", c.scrapeURL, err)
return dst, fmt.Errorf("cannot ungzip response from %q: %w", c.scrapeURL, err)
}
scrapesGunzipped.Inc()
} else {
@ -146,7 +146,7 @@ again:
// Retry request if the server closed the keep-alive connection during the first attempt.
attempts++
if attempts > 3 {
return fmt.Errorf("the server closed 3 subsequent connections: %s", err)
return fmt.Errorf("the server closed 3 subsequent connections: %w", err)
}
goto again
}

View file

@ -99,11 +99,11 @@ type StaticConfig struct {
func loadStaticConfigs(path string) ([]StaticConfig, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("cannot read `static_configs` from %q: %s", path, err)
return nil, fmt.Errorf("cannot read `static_configs` from %q: %w", path, err)
}
var stcs []StaticConfig
if err := yaml.UnmarshalStrict(data, &stcs); err != nil {
return nil, fmt.Errorf("cannot unmarshal `static_configs` from %q: %s", path, err)
return nil, fmt.Errorf("cannot unmarshal `static_configs` from %q: %w", path, err)
}
return stcs, nil
}
@ -112,11 +112,11 @@ func loadStaticConfigs(path string) ([]StaticConfig, error) {
func loadConfig(path string) (cfg *Config, data []byte, err error) {
data, err = ioutil.ReadFile(path)
if err != nil {
return nil, nil, fmt.Errorf("cannot read Prometheus config from %q: %s", path, err)
return nil, nil, fmt.Errorf("cannot read Prometheus config from %q: %w", path, err)
}
var cfgObj Config
if err := cfgObj.parse(data, path); err != nil {
return nil, nil, fmt.Errorf("cannot parse Prometheus config from %q: %s", path, err)
return nil, nil, fmt.Errorf("cannot parse Prometheus config from %q: %w", path, err)
}
if *dryRun {
// This is a dirty hack for checking Prometheus config only.
@ -130,18 +130,18 @@ func loadConfig(path string) (cfg *Config, data []byte, err error) {
func (cfg *Config) parse(data []byte, path string) error {
if err := unmarshalMaybeStrict(data, cfg); err != nil {
return fmt.Errorf("cannot unmarshal data: %s", err)
return fmt.Errorf("cannot unmarshal data: %w", err)
}
absPath, err := filepath.Abs(path)
if err != nil {
return fmt.Errorf("cannot obtain abs path for %q: %s", path, err)
return fmt.Errorf("cannot obtain abs path for %q: %w", path, err)
}
cfg.baseDir = filepath.Dir(absPath)
for i := range cfg.ScrapeConfigs {
sc := &cfg.ScrapeConfigs[i]
swc, err := getScrapeWorkConfig(sc, cfg.baseDir, &cfg.Global)
if err != nil {
return fmt.Errorf("cannot parse `scrape_config` #%d: %s", i+1, err)
return fmt.Errorf("cannot parse `scrape_config` #%d: %w", i+1, err)
}
sc.swc = swc
}
@ -378,17 +378,17 @@ func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConf
params := sc.Params
ac, err := promauth.NewConfig(baseDir, sc.BasicAuth, sc.BearerToken, sc.BearerTokenFile, sc.TLSConfig)
if err != nil {
return nil, fmt.Errorf("cannot parse auth config for `job_name` %q: %s", jobName, err)
return nil, fmt.Errorf("cannot parse auth config for `job_name` %q: %w", jobName, err)
}
var relabelConfigs []promrelabel.ParsedRelabelConfig
relabelConfigs, err = promrelabel.ParseRelabelConfigs(relabelConfigs[:0], sc.RelabelConfigs)
if err != nil {
return nil, fmt.Errorf("cannot parse `relabel_configs` for `job_name` %q: %s", jobName, err)
return nil, fmt.Errorf("cannot parse `relabel_configs` for `job_name` %q: %w", jobName, err)
}
var metricRelabelConfigs []promrelabel.ParsedRelabelConfig
metricRelabelConfigs, err = promrelabel.ParseRelabelConfigs(metricRelabelConfigs[:0], sc.MetricRelabelConfigs)
if err != nil {
return nil, fmt.Errorf("cannot parse `metric_relabel_configs` for `job_name` %q: %s", jobName, err)
return nil, fmt.Errorf("cannot parse `metric_relabel_configs` for `job_name` %q: %w", jobName, err)
}
swc := &scrapeWorkConfig{
scrapeInterval: scrapeInterval,
@ -580,7 +580,7 @@ func appendScrapeWork(dst []ScrapeWork, swc *scrapeWorkConfig, target string, ex
paramsStr := url.Values(paramsRelabeled).Encode()
scrapeURL := fmt.Sprintf("%s://%s%s%s%s", schemeRelabeled, addressRelabeled, metricsPathRelabeled, optionalQuestion, paramsStr)
if _, err := url.Parse(scrapeURL); err != nil {
return dst, fmt.Errorf("invalid url %q for scheme=%q (%q), target=%q (%q), metrics_path=%q (%q) for `job_name` %q: %s",
return dst, fmt.Errorf("invalid url %q for scheme=%q (%q), target=%q (%q), metrics_path=%q (%q) for `job_name` %q: %w",
scrapeURL, swc.scheme, schemeRelabeled, target, addressRelabeled, swc.metricsPath, metricsPathRelabeled, swc.jobName, err)
}
// Set missing "instance" label according to https://www.robustperception.io/life-of-a-label

View file

@ -135,7 +135,7 @@ scrape_configs:
func getFileSDScrapeWork(data []byte, path string) ([]ScrapeWork, error) {
var cfg Config
if err := cfg.parse(data, path); err != nil {
return nil, fmt.Errorf("cannot parse data: %s", err)
return nil, fmt.Errorf("cannot parse data: %w", err)
}
return cfg.getFileSDScrapeWork(nil), nil
}
@ -143,7 +143,7 @@ func getFileSDScrapeWork(data []byte, path string) ([]ScrapeWork, error) {
func getStaticScrapeWork(data []byte, path string) ([]ScrapeWork, error) {
var cfg Config
if err := cfg.parse(data, path); err != nil {
return nil, fmt.Errorf("cannot parse data: %s", err)
return nil, fmt.Errorf("cannot parse data: %w", err)
}
return cfg.getStaticScrapeWork(), nil
}

View file

@ -22,7 +22,7 @@ type AgentConfig struct {
func parseAgent(data []byte) (*Agent, error) {
var a Agent
if err := json.Unmarshal(data, &a); err != nil {
return nil, fmt.Errorf("cannot unmarshal agent info from %q: %s", data, err)
return nil, fmt.Errorf("cannot unmarshal agent info from %q: %w", data, err)
}
return &a, nil
}

View file

@ -47,7 +47,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
}
ac, err := promauth.NewConfig(baseDir, ba, token, "", sdc.TLSConfig)
if err != nil {
return nil, fmt.Errorf("cannot parse auth config: %s", err)
return nil, fmt.Errorf("cannot parse auth config: %w", err)
}
apiServer := sdc.Server
if apiServer == "" {
@ -62,7 +62,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
}
client, err := discoveryutils.NewClient(apiServer, ac)
if err != nil {
return nil, fmt.Errorf("cannot create HTTP client for %q: %s", apiServer, err)
return nil, fmt.Errorf("cannot create HTTP client for %q: %w", apiServer, err)
}
tagSeparator := ","
if sdc.TagSeparator != nil {
@ -92,7 +92,7 @@ func getToken(token *string) (string, error) {
if tokenFile := os.Getenv("CONSUL_HTTP_TOKEN_FILE"); tokenFile != "" {
data, err := ioutil.ReadFile(tokenFile)
if err != nil {
return "", fmt.Errorf("cannot read consul token file %q; probably, `token` arg is missing in `consul_sd_config`? error: %s", tokenFile, err)
return "", fmt.Errorf("cannot read consul token file %q; probably, `token` arg is missing in `consul_sd_config`? error: %w", tokenFile, err)
}
return string(data), nil
}
@ -108,7 +108,7 @@ func getDatacenter(client *discoveryutils.Client, dc string) (string, error) {
// See https://www.consul.io/api/agent.html#read-configuration
data, err := client.GetAPIResponse("/v1/agent/self")
if err != nil {
return "", fmt.Errorf("cannot query consul agent info: %s", err)
return "", fmt.Errorf("cannot query consul agent info: %w", err)
}
a, err := parseAgent(data)
if err != nil {

View file

@ -30,11 +30,11 @@ type SDConfig struct {
func GetLabels(sdc *SDConfig, baseDir string) ([]map[string]string, error) {
cfg, err := getAPIConfig(sdc, baseDir)
if err != nil {
return nil, fmt.Errorf("cannot get API config: %s", err)
return nil, fmt.Errorf("cannot get API config: %w", err)
}
ms, err := getServiceNodesLabels(cfg)
if err != nil {
return nil, fmt.Errorf("error when fetching service nodes data from Consul: %s", err)
return nil, fmt.Errorf("error when fetching service nodes data from Consul: %w", err)
}
return ms, nil
}

View file

@ -28,11 +28,11 @@ func getAllServiceNodes(cfg *apiConfig) ([]ServiceNode, error) {
// See https://www.consul.io/api/catalog.html#list-services
data, err := getAPIResponse(cfg, "/v1/catalog/services")
if err != nil {
return nil, fmt.Errorf("cannot obtain services: %s", err)
return nil, fmt.Errorf("cannot obtain services: %w", err)
}
var m map[string][]string
if err := json.Unmarshal(data, &m); err != nil {
return nil, fmt.Errorf("cannot parse services response %q: %s", data, err)
return nil, fmt.Errorf("cannot parse services response %q: %w", data, err)
}
serviceNames := make(map[string]bool)
for serviceName, tags := range m {
@ -125,7 +125,7 @@ func getServiceNodes(cfg *apiConfig, serviceName string) ([]ServiceNode, error)
}
data, err := getAPIResponse(cfg, path)
if err != nil {
return nil, fmt.Errorf("cannot obtain instances for serviceName=%q: %s", serviceName, err)
return nil, fmt.Errorf("cannot obtain instances for serviceName=%q: %w", serviceName, err)
}
return parseServiceNodes(data)
}
@ -173,7 +173,7 @@ type Check struct {
func parseServiceNodes(data []byte) ([]ServiceNode, error) {
var sns []ServiceNode
if err := json.Unmarshal(data, &sns); err != nil {
return nil, fmt.Errorf("cannot unmarshal ServiceNodes from %q: %s", data, err)
return nil, fmt.Errorf("cannot unmarshal ServiceNodes from %q: %w", data, err)
}
return sns, nil
}

View file

@ -36,7 +36,7 @@ func newAPIConfig(sdc *SDConfig) (*apiConfig, error) {
if len(region) == 0 {
r, err := getDefaultRegion()
if err != nil {
return nil, fmt.Errorf("cannot determine default ec2 region; probably, `region` param in `ec2_sd_configs` is missing; the error: %s", err)
return nil, fmt.Errorf("cannot determine default ec2 region; probably, `region` param in `ec2_sd_configs` is missing; the error: %w", err)
}
region = r
}
@ -88,7 +88,7 @@ func getDefaultRegion() (string, error) {
}
var id IdentityDocument
if err := json.Unmarshal(data, &id); err != nil {
return "", fmt.Errorf("cannot parse identity document: %s", err)
return "", fmt.Errorf("cannot parse identity document: %w", err)
}
return id.Region, nil
}
@ -109,28 +109,28 @@ func getMetadataByPath(apiPath string) ([]byte, error) {
sessionTokenURL := "http://169.254.169.254/latest/api/token"
req, err := http.NewRequest("PUT", sessionTokenURL, nil)
if err != nil {
return nil, fmt.Errorf("cannot create request for IMDSv2 session token at url %q: %s", sessionTokenURL, err)
return nil, fmt.Errorf("cannot create request for IMDSv2 session token at url %q: %w", sessionTokenURL, err)
}
req.Header.Set("X-aws-ec2-metadata-token-ttl-seconds", "60")
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("cannot obtain IMDSv2 session token from %q; probably, `region` is missing in `ec2_sd_config`; error: %s", sessionTokenURL, err)
return nil, fmt.Errorf("cannot obtain IMDSv2 session token from %q; probably, `region` is missing in `ec2_sd_config`; error: %w", sessionTokenURL, err)
}
token, err := readResponseBody(resp, sessionTokenURL)
if err != nil {
return nil, fmt.Errorf("cannot read IMDSv2 session token from %q; probably, `region` is missing in `ec2_sd_config`; error: %s", sessionTokenURL, err)
return nil, fmt.Errorf("cannot read IMDSv2 session token from %q; probably, `region` is missing in `ec2_sd_config`; error: %w", sessionTokenURL, err)
}
// Use session token in the request.
apiURL := "http://169.254.169.254/latest/" + apiPath
req, err = http.NewRequest("GET", apiURL, nil)
if err != nil {
return nil, fmt.Errorf("cannot create request to %q: %s", apiURL, err)
return nil, fmt.Errorf("cannot create request to %q: %w", apiURL, err)
}
req.Header.Set("X-aws-ec2-metadata-token", string(token))
resp, err = client.Do(req)
if err != nil {
return nil, fmt.Errorf("cannot obtain response for %q: %s", apiURL, err)
return nil, fmt.Errorf("cannot obtain response for %q: %w", apiURL, err)
}
return readResponseBody(resp, apiURL)
}
@ -158,11 +158,11 @@ func getAPIResponse(cfg *apiConfig, action, nextPageToken string) ([]byte, error
apiURL += "&Version=2013-10-15"
req, err := newSignedRequest(apiURL, "ec2", cfg.region, cfg.accessKey, cfg.secretKey)
if err != nil {
return nil, fmt.Errorf("cannot create signed request: %s", err)
return nil, fmt.Errorf("cannot create signed request: %w", err)
}
resp, err := discoveryutils.GetHTTPClient().Do(req)
if err != nil {
return nil, fmt.Errorf("cannot perform http request to %q: %s", apiURL, err)
return nil, fmt.Errorf("cannot perform http request to %q: %w", apiURL, err)
}
return readResponseBody(resp, apiURL)
}
@ -171,7 +171,7 @@ func readResponseBody(resp *http.Response, apiURL string) ([]byte, error) {
data, err := ioutil.ReadAll(resp.Body)
_ = resp.Body.Close()
if err != nil {
return nil, fmt.Errorf("cannot read response from %q: %s", apiURL, err)
return nil, fmt.Errorf("cannot read response from %q: %w", apiURL, err)
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("unexpected status code for %q; got %d; want %d; response body: %q",

View file

@ -34,11 +34,11 @@ type Filter struct {
func GetLabels(sdc *SDConfig) ([]map[string]string, error) {
cfg, err := getAPIConfig(sdc)
if err != nil {
return nil, fmt.Errorf("cannot get API config: %s", err)
return nil, fmt.Errorf("cannot get API config: %w", err)
}
ms, err := getInstancesLabels(cfg)
if err != nil {
return nil, fmt.Errorf("error when fetching instances data from EC2: %s", err)
return nil, fmt.Errorf("error when fetching instances data from EC2: %w", err)
}
return ms, nil
}

View file

@ -31,11 +31,11 @@ func getReservations(cfg *apiConfig) ([]Reservation, error) {
for {
data, err := getAPIResponse(cfg, action, pageToken)
if err != nil {
return nil, fmt.Errorf("cannot obtain instances: %s", err)
return nil, fmt.Errorf("cannot obtain instances: %w", err)
}
ir, err := parseInstancesResponse(data)
if err != nil {
return nil, fmt.Errorf("cannot parse instance list: %s", err)
return nil, fmt.Errorf("cannot parse instance list: %w", err)
}
rs = append(rs, ir.ReservationSet.Items...)
if len(ir.NextPageToken) == 0 {
@ -121,7 +121,7 @@ type Tag struct {
func parseInstancesResponse(data []byte) (*InstancesResponse, error) {
var v InstancesResponse
if err := xml.Unmarshal(data, &v); err != nil {
return nil, fmt.Errorf("cannot unmarshal InstancesResponse from %q: %s", data, err)
return nil, fmt.Errorf("cannot unmarshal InstancesResponse from %q: %w", data, err)
}
return &v, nil
}

View file

@ -24,7 +24,7 @@ func newSignedRequest(apiURL, service, region, accessKey, secretKey string) (*ht
func newSignedRequestWithTime(apiURL, service, region, accessKey, secretKey string, t time.Time) (*http.Request, error) {
uri, err := url.Parse(apiURL)
if err != nil {
return nil, fmt.Errorf("cannot parse %q: %s", apiURL, err)
return nil, fmt.Errorf("cannot parse %q: %w", apiURL, err)
}
// Create canonicalRequest
@ -65,7 +65,7 @@ func newSignedRequestWithTime(apiURL, service, region, accessKey, secretKey stri
req, err := http.NewRequest("GET", apiURL, nil)
if err != nil {
return nil, fmt.Errorf("cannot create request from %q: %s", apiURL, err)
return nil, fmt.Errorf("cannot create request from %q: %w", apiURL, err)
}
req.Header.Set("x-amz-date", amzdate)
req.Header.Set("Authorization", authHeader)

View file

@ -36,13 +36,13 @@ func newAPIConfig(sdc *SDConfig) (*apiConfig, error) {
ctx := context.Background()
client, err := google.DefaultClient(ctx, "https://www.googleapis.com/auth/compute.readonly")
if err != nil {
return nil, fmt.Errorf("cannot create oauth2 client for gce: %s", err)
return nil, fmt.Errorf("cannot create oauth2 client for gce: %w", err)
}
project := sdc.Project
if len(project) == 0 {
proj, err := getCurrentProject()
if err != nil {
return nil, fmt.Errorf("cannot determine the current project; make sure `vmagent` runs inside GCE; error: %s", err)
return nil, fmt.Errorf("cannot determine the current project; make sure `vmagent` runs inside GCE; error: %w", err)
}
project = proj
logger.Infof("autodetected the current GCE project: %q", project)
@ -52,7 +52,7 @@ func newAPIConfig(sdc *SDConfig) (*apiConfig, error) {
// Autodetect the current zone.
zone, err := getCurrentZone()
if err != nil {
return nil, fmt.Errorf("cannot determine the current zone; make sure `vmagent` runs inside GCE; error: %s", err)
return nil, fmt.Errorf("cannot determine the current zone; make sure `vmagent` runs inside GCE; error: %w", err)
}
zones = append(zones, zone)
logger.Infof("autodetected the current GCE zone: %q", zone)
@ -60,7 +60,7 @@ func newAPIConfig(sdc *SDConfig) (*apiConfig, error) {
// Autodetect zones for project.
zs, err := getZonesForProject(client, project, sdc.Filter)
if err != nil {
return nil, fmt.Errorf("cannot obtain zones for project %q: %s", project, err)
return nil, fmt.Errorf("cannot obtain zones for project %q: %w", project, err)
}
zones = zs
logger.Infof("autodetected all the zones for the GCE project %q: %q", project, zones)
@ -88,7 +88,7 @@ func getAPIResponse(client *http.Client, apiURL, filter, pageToken string) ([]by
apiURL = appendNonEmptyQueryArg(apiURL, "pageToken", pageToken)
resp, err := client.Get(apiURL)
if err != nil {
return nil, fmt.Errorf("cannot query %q: %s", apiURL, err)
return nil, fmt.Errorf("cannot query %q: %w", apiURL, err)
}
return readResponseBody(resp, apiURL)
}
@ -97,7 +97,7 @@ func readResponseBody(resp *http.Response, apiURL string) ([]byte, error) {
data, err := ioutil.ReadAll(resp.Body)
_ = resp.Body.Close()
if err != nil {
return nil, fmt.Errorf("cannot read response from %q: %s", apiURL, err)
return nil, fmt.Errorf("cannot read response from %q: %w", apiURL, err)
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("unexpected status code for %q; got %d; want %d; response body: %q",
@ -144,12 +144,12 @@ func getGCEMetadata(path string) ([]byte, error) {
metadataURL := "http://metadata.google.internal/computeMetadata/v1/" + path
req, err := http.NewRequest("GET", metadataURL, nil)
if err != nil {
return nil, fmt.Errorf("cannot create http request for %q: %s", metadataURL, err)
return nil, fmt.Errorf("cannot create http request for %q: %w", metadataURL, err)
}
req.Header.Set("Metadata-Flavor", "Google")
resp, err := discoveryutils.GetHTTPClient().Do(req)
if err != nil {
return nil, fmt.Errorf("cannot obtain response to %q: %s", metadataURL, err)
return nil, fmt.Errorf("cannot obtain response to %q: %w", metadataURL, err)
}
return readResponseBody(resp, metadataURL)
}

View file

@ -51,7 +51,7 @@ func (z *ZoneYAML) UnmarshalYAML(unmarshal func(interface{}) error) error {
func GetLabels(sdc *SDConfig) ([]map[string]string, error) {
cfg, err := getAPIConfig(sdc)
if err != nil {
return nil, fmt.Errorf("cannot get API config: %s", err)
return nil, fmt.Errorf("cannot get API config: %w", err)
}
ms := getInstancesLabels(cfg)
return ms, nil

View file

@ -58,11 +58,11 @@ func getInstancesForProjectAndZone(client *http.Client, project, zone, filter st
for {
data, err := getAPIResponse(client, instsURL, filter, pageToken)
if err != nil {
return nil, fmt.Errorf("cannot obtain instances: %s", err)
return nil, fmt.Errorf("cannot obtain instances: %w", err)
}
il, err := parseInstanceList(data)
if err != nil {
return nil, fmt.Errorf("cannot parse instance list from %q: %s", instsURL, err)
return nil, fmt.Errorf("cannot parse instance list from %q: %w", instsURL, err)
}
insts = append(insts, il.Items...)
if len(il.NextPageToken) == 0 {
@ -125,7 +125,7 @@ type MetadataEntry struct {
func parseInstanceList(data []byte) (*InstanceList, error) {
var il InstanceList
if err := json.Unmarshal(data, &il); err != nil {
return nil, fmt.Errorf("cannot unmarshal InstanceList from %q: %s", data, err)
return nil, fmt.Errorf("cannot unmarshal InstanceList from %q: %w", data, err)
}
return &il, nil
}

View file

@ -14,11 +14,11 @@ func getZonesForProject(client *http.Client, project, filter string) ([]string,
for {
data, err := getAPIResponse(client, zonesURL, filter, pageToken)
if err != nil {
return nil, fmt.Errorf("cannot obtain zones: %s", err)
return nil, fmt.Errorf("cannot obtain zones: %w", err)
}
zl, err := parseZoneList(data)
if err != nil {
return nil, fmt.Errorf("cannot parse zone list from %q: %s", zonesURL, err)
return nil, fmt.Errorf("cannot parse zone list from %q: %w", zonesURL, err)
}
for _, z := range zl.Items {
zones = append(zones, z.Name)
@ -45,7 +45,7 @@ type Zone struct {
func parseZoneList(data []byte) (*ZoneList, error) {
var zl ZoneList
if err := json.Unmarshal(data, &zl); err != nil {
return nil, fmt.Errorf("cannot unmarshal ZoneList from %q: %s", data, err)
return nil, fmt.Errorf("cannot unmarshal ZoneList from %q: %w", data, err)
}
return &zl, nil
}

View file

@ -29,7 +29,7 @@ func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
ac, err := promauth.NewConfig(baseDir, sdc.BasicAuth, sdc.BearerToken, sdc.BearerTokenFile, sdc.TLSConfig)
if err != nil {
return nil, fmt.Errorf("cannot parse auth config: %s", err)
return nil, fmt.Errorf("cannot parse auth config: %w", err)
}
apiServer := sdc.APIServer
if len(apiServer) == 0 {
@ -52,13 +52,13 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
}
acNew, err := promauth.NewConfig(".", nil, "", "/var/run/secrets/kubernetes.io/serviceaccount/token", &tlsConfig)
if err != nil {
return nil, fmt.Errorf("cannot initialize service account auth: %s; probably, `kubernetes_sd_config->api_server` is missing in Prometheus configs?", err)
return nil, fmt.Errorf("cannot initialize service account auth: %w; probably, `kubernetes_sd_config->api_server` is missing in Prometheus configs?", err)
}
ac = acNew
}
client, err := discoveryutils.NewClient(apiServer, ac)
if err != nil {
return nil, fmt.Errorf("cannot create HTTP client for %q: %s", apiServer, err)
return nil, fmt.Errorf("cannot create HTTP client for %q: %w", apiServer, err)
}
cfg := &apiConfig{
client: client,

View file

@ -53,11 +53,11 @@ func getEndpoints(cfg *apiConfig) ([]Endpoints, error) {
func getEndpointsByPath(cfg *apiConfig, path string) ([]Endpoints, error) {
data, err := getAPIResponse(cfg, "endpoints", path)
if err != nil {
return nil, fmt.Errorf("cannot obtain endpoints data from API server: %s", err)
return nil, fmt.Errorf("cannot obtain endpoints data from API server: %w", err)
}
epl, err := parseEndpointsList(data)
if err != nil {
return nil, fmt.Errorf("cannot parse endpoints response from API server: %s", err)
return nil, fmt.Errorf("cannot parse endpoints response from API server: %w", err)
}
return epl.Items, nil
}
@ -119,7 +119,7 @@ type EndpointPort struct {
func parseEndpointsList(data []byte) (*EndpointsList, error) {
var esl EndpointsList
if err := json.Unmarshal(data, &esl); err != nil {
return nil, fmt.Errorf("cannot unmarshal EndpointsList from %q: %s", data, err)
return nil, fmt.Errorf("cannot unmarshal EndpointsList from %q: %w", data, err)
}
return &esl, nil
}

View file

@ -43,11 +43,11 @@ func getIngresses(cfg *apiConfig) ([]Ingress, error) {
func getIngressesByPath(cfg *apiConfig, path string) ([]Ingress, error) {
data, err := getAPIResponse(cfg, "ingress", path)
if err != nil {
return nil, fmt.Errorf("cannot obtain ingresses data from API server: %s", err)
return nil, fmt.Errorf("cannot obtain ingresses data from API server: %w", err)
}
igl, err := parseIngressList(data)
if err != nil {
return nil, fmt.Errorf("cannot parse ingresses response from API server: %s", err)
return nil, fmt.Errorf("cannot parse ingresses response from API server: %w", err)
}
return igl.Items, nil
}
@ -108,7 +108,7 @@ type HTTPIngressPath struct {
func parseIngressList(data []byte) (*IngressList, error) {
var il IngressList
if err := json.Unmarshal(data, &il); err != nil {
return nil, fmt.Errorf("cannot unmarshal IngressList from %q: %s", data, err)
return nil, fmt.Errorf("cannot unmarshal IngressList from %q: %w", data, err)
}
return &il, nil
}

View file

@ -39,7 +39,7 @@ type Selector struct {
func GetLabels(sdc *SDConfig, baseDir string) ([]map[string]string, error) {
cfg, err := getAPIConfig(sdc, baseDir)
if err != nil {
return nil, fmt.Errorf("cannot create API config: %s", err)
return nil, fmt.Errorf("cannot create API config: %w", err)
}
switch sdc.Role {
case "node":

Some files were not shown because too many files have changed in this diff Show more