mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
all: use %w instead of %s for wrapping errors in fmt.Errorf
This will simplify examining the returned errors such as httpserver.ErrorWithStatusCode . See https://blog.golang.org/go1.13-errors for details.
This commit is contained in:
parent
586c5be404
commit
d5dddb0953
146 changed files with 826 additions and 826 deletions
|
@ -162,7 +162,7 @@ func getTLSConfig(argIdx int) (*tls.Config, error) {
|
|||
}
|
||||
cfg, err := promauth.NewConfig(".", nil, "", "", tlsConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot populate TLS config: %s", err)
|
||||
return nil, fmt.Errorf("cannot populate TLS config: %w", err)
|
||||
}
|
||||
tlsCfg := cfg.NewTLSConfig()
|
||||
return tlsCfg, nil
|
||||
|
|
|
@ -33,7 +33,7 @@ func loadRelabelConfigs() (*relabelConfigs, error) {
|
|||
if *relabelConfigPathGlobal != "" {
|
||||
global, err := promrelabel.LoadRelabelConfigs(*relabelConfigPathGlobal)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot load -remoteWrite.relabelConfig=%q: %s", *relabelConfigPathGlobal, err)
|
||||
return nil, fmt.Errorf("cannot load -remoteWrite.relabelConfig=%q: %w", *relabelConfigPathGlobal, err)
|
||||
}
|
||||
rcs.global = global
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ func loadRelabelConfigs() (*relabelConfigs, error) {
|
|||
for i, path := range *relabelConfigPaths {
|
||||
prc, err := promrelabel.LoadRelabelConfigs(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot load relabel configs from -remoteWrite.urlRelabelConfig=%q: %s", path, err)
|
||||
return nil, fmt.Errorf("cannot load relabel configs from -remoteWrite.urlRelabelConfig=%q: %w", path, err)
|
||||
}
|
||||
rcs.perURL[i] = prc
|
||||
}
|
||||
|
|
|
@ -72,7 +72,7 @@ func (ar *AlertingRule) Exec(ctx context.Context, q datasource.Querier, series b
|
|||
ar.lastExecError = err
|
||||
ar.lastExecTime = time.Now()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute query %q: %s", ar.Expr, err)
|
||||
return nil, fmt.Errorf("failed to execute query %q: %w", ar.Expr, err)
|
||||
}
|
||||
|
||||
for h, a := range ar.alerts {
|
||||
|
@ -103,7 +103,7 @@ func (ar *AlertingRule) Exec(ctx context.Context, q datasource.Querier, series b
|
|||
a, err := ar.newAlert(m, ar.lastExecTime)
|
||||
if err != nil {
|
||||
ar.lastExecError = err
|
||||
return nil, fmt.Errorf("failed to create alert: %s", err)
|
||||
return nil, fmt.Errorf("failed to create alert: %w", err)
|
||||
}
|
||||
a.ID = h
|
||||
a.State = notifier.StatePending
|
||||
|
@ -363,7 +363,7 @@ func (ar *AlertingRule) Restore(ctx context.Context, q datasource.Querier, lookb
|
|||
|
||||
a, err := ar.newAlert(m, time.Unix(int64(m.Value), 0))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create alert: %s", err)
|
||||
return fmt.Errorf("failed to create alert: %w", err)
|
||||
}
|
||||
a.ID = hash(m)
|
||||
a.State = notifier.StatePending
|
||||
|
|
|
@ -46,19 +46,19 @@ func (g *Group) Validate(validateAnnotations, validateExpressions bool) error {
|
|||
}
|
||||
uniqueRules[r.ID] = struct{}{}
|
||||
if err := r.Validate(); err != nil {
|
||||
return fmt.Errorf("invalid rule %q.%q: %s", g.Name, ruleName, err)
|
||||
return fmt.Errorf("invalid rule %q.%q: %w", g.Name, ruleName, err)
|
||||
}
|
||||
if validateExpressions {
|
||||
if _, err := metricsql.Parse(r.Expr); err != nil {
|
||||
return fmt.Errorf("invalid expression for rule %q.%q: %s", g.Name, ruleName, err)
|
||||
return fmt.Errorf("invalid expression for rule %q.%q: %w", g.Name, ruleName, err)
|
||||
}
|
||||
}
|
||||
if validateAnnotations {
|
||||
if err := notifier.ValidateTemplates(r.Annotations); err != nil {
|
||||
return fmt.Errorf("invalid annotations for rule %q.%q: %s", g.Name, ruleName, err)
|
||||
return fmt.Errorf("invalid annotations for rule %q.%q: %w", g.Name, ruleName, err)
|
||||
}
|
||||
if err := notifier.ValidateTemplates(r.Labels); err != nil {
|
||||
return fmt.Errorf("invalid labels for rule %q.%q: %s", g.Name, ruleName, err)
|
||||
return fmt.Errorf("invalid labels for rule %q.%q: %w", g.Name, ruleName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -137,7 +137,7 @@ func Parse(pathPatterns []string, validateAnnotations, validateExpressions bool)
|
|||
for _, pattern := range pathPatterns {
|
||||
matches, err := filepath.Glob(pattern)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading file pattern %s: %v", pattern, err)
|
||||
return nil, fmt.Errorf("error reading file pattern %s: %w", pattern, err)
|
||||
}
|
||||
fp = append(fp, matches...)
|
||||
}
|
||||
|
@ -150,7 +150,7 @@ func Parse(pathPatterns []string, validateAnnotations, validateExpressions bool)
|
|||
}
|
||||
for _, g := range gr {
|
||||
if err := g.Validate(validateAnnotations, validateExpressions); err != nil {
|
||||
return nil, fmt.Errorf("invalid group %q in file %q: %s", g.Name, file, err)
|
||||
return nil, fmt.Errorf("invalid group %q in file %q: %w", g.Name, file, err)
|
||||
}
|
||||
if _, ok := uniqueGroups[g.Name]; ok {
|
||||
return nil, fmt.Errorf("group name %q duplicate in file %q", g.Name, file)
|
||||
|
|
|
@ -31,7 +31,7 @@ func Init() (Querier, error) {
|
|||
}
|
||||
tr, err := utils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create transport: %s", err)
|
||||
return nil, fmt.Errorf("failed to create transport: %w", err)
|
||||
}
|
||||
c := &http.Client{Transport: tr}
|
||||
return NewVMStorage(*addr, *basicAuthUsername, *basicAuthPassword, c), nil
|
||||
|
|
|
@ -32,7 +32,7 @@ func (r response) metrics() ([]Metric, error) {
|
|||
for i, res := range r.Data.Result {
|
||||
f, err = strconv.ParseFloat(res.TV[1].(string), 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("metric %v, unable to parse float64 from %s: %s", res, res.TV[1], err)
|
||||
return nil, fmt.Errorf("metric %v, unable to parse float64 from %s: %w", res, res.TV[1], err)
|
||||
}
|
||||
m.Labels = nil
|
||||
for k, v := range r.Data.Result[i].Labels {
|
||||
|
@ -80,25 +80,25 @@ func (s *VMStorage) Query(ctx context.Context, query string) ([]Metric, error) {
|
|||
}
|
||||
resp, err := s.c.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting response from %s:%s", req.URL, err)
|
||||
return nil, fmt.Errorf("error getting response from %s: %w", req.URL, err)
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
return nil, fmt.Errorf("datasource returns unxeprected response code %d for %s with err %s. Reponse body %s", resp.StatusCode, req.URL, err, body)
|
||||
return nil, fmt.Errorf("datasource returns unxeprected response code %d for %s with err %w; reponse body: %s", resp.StatusCode, req.URL, err, body)
|
||||
}
|
||||
r := &response{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(r); err != nil {
|
||||
return nil, fmt.Errorf("error parsing metrics for %s:%s", req.URL, err)
|
||||
return nil, fmt.Errorf("error parsing metrics for %s: %w", req.URL, err)
|
||||
}
|
||||
if r.Status == statusError {
|
||||
return nil, fmt.Errorf("response error, query: %s, errorType: %s, error: %s", req.URL, r.ErrorType, r.Error)
|
||||
}
|
||||
if r.Status != statusSuccess {
|
||||
return nil, fmt.Errorf("unkown status:%s, Expected success or error ", r.Status)
|
||||
return nil, fmt.Errorf("unknown status: %s, Expected success or error ", r.Status)
|
||||
}
|
||||
if r.Data.ResultType != rtVector {
|
||||
return nil, fmt.Errorf("unkown restul type:%s. Expected vector", r.Data.ResultType)
|
||||
return nil, fmt.Errorf("unknown restul type:%s. Expected vector", r.Data.ResultType)
|
||||
}
|
||||
return r.metrics()
|
||||
}
|
||||
|
|
|
@ -84,7 +84,7 @@ func (g *Group) Restore(ctx context.Context, q datasource.Querier, lookback time
|
|||
continue
|
||||
}
|
||||
if err := rr.Restore(ctx, q, lookback); err != nil {
|
||||
return fmt.Errorf("error while restoring rule %q: %s", rule, err)
|
||||
return fmt.Errorf("error while restoring rule %q: %w", rule, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -251,7 +251,7 @@ func (e *executor) exec(ctx context.Context, rule Rule, returnSeries bool, inter
|
|||
tss, err := rule.Exec(ctx, e.querier, returnSeries)
|
||||
if err != nil {
|
||||
execErrors.Inc()
|
||||
return fmt.Errorf("rule %q: failed to execute: %s", rule, err)
|
||||
return fmt.Errorf("rule %q: failed to execute: %w", rule, err)
|
||||
}
|
||||
|
||||
if len(tss) > 0 && e.rw != nil {
|
||||
|
@ -259,7 +259,7 @@ func (e *executor) exec(ctx context.Context, rule Rule, returnSeries bool, inter
|
|||
for _, ts := range tss {
|
||||
if err := e.rw.Push(ts); err != nil {
|
||||
remoteWriteErrors.Inc()
|
||||
return fmt.Errorf("rule %q: remote write failure: %s", rule, err)
|
||||
return fmt.Errorf("rule %q: remote write failure: %w", rule, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -293,7 +293,7 @@ func (e *executor) exec(ctx context.Context, rule Rule, returnSeries bool, inter
|
|||
for _, nt := range e.notifiers {
|
||||
if err := nt.Send(ctx, alerts); err != nil {
|
||||
alertsSendErrors.Inc()
|
||||
errGr.Add(fmt.Errorf("rule %q: failed to send alerts: %s", rule, err))
|
||||
errGr.Add(fmt.Errorf("rule %q: failed to send alerts: %w", rule, err))
|
||||
}
|
||||
}
|
||||
return errGr.Err()
|
||||
|
|
|
@ -105,20 +105,20 @@ var (
|
|||
func newManager(ctx context.Context) (*manager, error) {
|
||||
q, err := datasource.Init()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to init datasource: %s", err)
|
||||
return nil, fmt.Errorf("failed to init datasource: %w", err)
|
||||
}
|
||||
eu, err := getExternalURL(*externalURL, *httpListenAddr, httpserver.IsTLS())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to init `external.url`: %s", err)
|
||||
return nil, fmt.Errorf("failed to init `external.url`: %w", err)
|
||||
}
|
||||
notifier.InitTemplateFunc(eu)
|
||||
aug, err := getAlertURLGenerator(eu, *externalAlertSource, *validateTemplates)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to init `external.alert.source`: %s", err)
|
||||
return nil, fmt.Errorf("failed to init `external.alert.source`: %w", err)
|
||||
}
|
||||
nts, err := notifier.Init(aug)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to init notifier: %s", err)
|
||||
return nil, fmt.Errorf("failed to init notifier: %w", err)
|
||||
}
|
||||
|
||||
manager := &manager{
|
||||
|
@ -128,13 +128,13 @@ func newManager(ctx context.Context) (*manager, error) {
|
|||
}
|
||||
rw, err := remotewrite.Init(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to init remoteWrite: %s", err)
|
||||
return nil, fmt.Errorf("failed to init remoteWrite: %w", err)
|
||||
}
|
||||
manager.rw = rw
|
||||
|
||||
rr, err := remoteread.Init()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to init remoteRead: %s", err)
|
||||
return nil, fmt.Errorf("failed to init remoteRead: %w", err)
|
||||
}
|
||||
manager.rr = rr
|
||||
return manager, nil
|
||||
|
@ -169,7 +169,7 @@ func getAlertURLGenerator(externalURL *url.URL, externalAlertSource string, vali
|
|||
if err := notifier.ValidateTemplates(map[string]string{
|
||||
"tpl": externalAlertSource,
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("error validating source template %s:%w", externalAlertSource, err)
|
||||
return nil, fmt.Errorf("error validating source template %s: %w", externalAlertSource, err)
|
||||
}
|
||||
}
|
||||
m := map[string]string{
|
||||
|
|
|
@ -83,7 +83,7 @@ func (m *manager) update(ctx context.Context, path []string, validateTpl, valida
|
|||
logger.Infof("reading rules configuration file from %q", strings.Join(path, ";"))
|
||||
groupsCfg, err := config.Parse(path, validateTpl, validateExpr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse configuration file: %s", err)
|
||||
return fmt.Errorf("cannot parse configuration file: %w", err)
|
||||
}
|
||||
|
||||
groupsRegistry := make(map[uint64]*Group)
|
||||
|
|
|
@ -89,7 +89,7 @@ func templateAnnotations(annotations map[string]string, header string, data aler
|
|||
builder.WriteString(header)
|
||||
builder.WriteString(text)
|
||||
if err := templateAnnotation(&buf, builder.String(), data); err != nil {
|
||||
eg.Add(fmt.Errorf("key %q, template %q: %s", key, text, err))
|
||||
eg.Add(fmt.Errorf("key %q, template %q: %w", key, text, err))
|
||||
continue
|
||||
}
|
||||
r[key] = buf.String()
|
||||
|
|
|
@ -43,7 +43,7 @@ func (am *AlertManager) Send(ctx context.Context, alerts []Alert) error {
|
|||
if resp.StatusCode != http.StatusOK {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read response from %q: %s", am.alertURL, err)
|
||||
return fmt.Errorf("failed to read response from %q: %w", am.alertURL, err)
|
||||
}
|
||||
return fmt.Errorf("invalid SC %d from %q; response body: %s", resp.StatusCode, am.alertURL, string(body))
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ func Init(gen AlertURLGenerator) ([]Notifier, error) {
|
|||
ca, serverName := tlsCAFile.GetOptionalArg(i), tlsServerName.GetOptionalArg(i)
|
||||
tr, err := utils.Transport(addr, cert, key, ca, serverName, *tlsInsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create transport: %s", err)
|
||||
return nil, fmt.Errorf("failed to create transport: %w", err)
|
||||
}
|
||||
user, pass := basicAuthUsername.GetOptionalArg(i), basicAuthPassword.GetOptionalArg(i)
|
||||
am := NewAlertManager(addr, user, pass, gen, &http.Client{Transport: tr})
|
||||
|
|
|
@ -71,7 +71,7 @@ func (rr *RecordingRule) Exec(ctx context.Context, q datasource.Querier, series
|
|||
rr.lastExecTime = time.Now()
|
||||
rr.lastExecError = err
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute query %q: %s", rr.Expr, err)
|
||||
return nil, fmt.Errorf("failed to execute query %q: %w", rr.Expr, err)
|
||||
}
|
||||
|
||||
duplicates := make(map[uint64]prompbmarshal.TimeSeries, len(qMetrics))
|
||||
|
|
|
@ -32,7 +32,7 @@ func Init() (datasource.Querier, error) {
|
|||
}
|
||||
tr, err := utils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create transport: %s", err)
|
||||
return nil, fmt.Errorf("failed to create transport: %w", err)
|
||||
}
|
||||
c := &http.Client{Transport: tr}
|
||||
return datasource.NewVMStorage(*addr, *basicAuthUsername, *basicAuthPassword, c), nil
|
||||
|
|
|
@ -38,7 +38,7 @@ func Init(ctx context.Context) (*Client, error) {
|
|||
|
||||
t, err := utils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create transport: %s", err)
|
||||
return nil, fmt.Errorf("failed to create transport: %w", err)
|
||||
}
|
||||
|
||||
return NewClient(ctx, Config{
|
||||
|
|
|
@ -30,7 +30,7 @@ func TLSConfig(certFile, keyFile, CAFile, serverName string, insecureSkipVerify
|
|||
if certFile != "" {
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot load TLS certificate from `cert_file`=%q, `key_file`=%q: %s", certFile, keyFile, err)
|
||||
return nil, fmt.Errorf("cannot load TLS certificate from `cert_file`=%q, `key_file`=%q: %w", certFile, keyFile, err)
|
||||
}
|
||||
|
||||
certs = []tls.Certificate{cert}
|
||||
|
@ -40,7 +40,7 @@ func TLSConfig(certFile, keyFile, CAFile, serverName string, insecureSkipVerify
|
|||
if CAFile != "" {
|
||||
pem, err := ioutil.ReadFile(CAFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read `ca_file` %q: %s", CAFile, err)
|
||||
return nil, fmt.Errorf("cannot read `ca_file` %q: %w", CAFile, err)
|
||||
}
|
||||
|
||||
rootCAs = x509.NewCertPool()
|
||||
|
|
|
@ -80,7 +80,7 @@ func (rh *requestHandler) listGroups() ([]byte, error) {
|
|||
b, err := json.Marshal(lr)
|
||||
if err != nil {
|
||||
return nil, &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf(`error encoding list of active alerts: %s`, err),
|
||||
Err: fmt.Errorf(`error encoding list of active alerts: %w`, err),
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
}
|
||||
}
|
||||
|
@ -117,7 +117,7 @@ func (rh *requestHandler) listAlerts() ([]byte, error) {
|
|||
b, err := json.Marshal(lr)
|
||||
if err != nil {
|
||||
return nil, &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf(`error encoding list of active alerts: %s`, err),
|
||||
Err: fmt.Errorf(`error encoding list of active alerts: %w`, err),
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
}
|
||||
}
|
||||
|
@ -138,11 +138,11 @@ func (rh *requestHandler) alert(path string) ([]byte, error) {
|
|||
|
||||
groupID, err := uint64FromPath(parts[0])
|
||||
if err != nil {
|
||||
return nil, badRequest(fmt.Errorf(`cannot parse groupID: %s`, err))
|
||||
return nil, badRequest(fmt.Errorf(`cannot parse groupID: %w`, err))
|
||||
}
|
||||
alertID, err := uint64FromPath(parts[1])
|
||||
if err != nil {
|
||||
return nil, badRequest(fmt.Errorf(`cannot parse alertID: %s`, err))
|
||||
return nil, badRequest(fmt.Errorf(`cannot parse alertID: %w`, err))
|
||||
}
|
||||
resp, err := rh.m.AlertAPI(groupID, alertID)
|
||||
if err != nil {
|
||||
|
|
|
@ -82,11 +82,11 @@ var stopCh chan struct{}
|
|||
func readAuthConfig(path string) (map[string]*UserInfo, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read %q: %s", path, err)
|
||||
return nil, fmt.Errorf("cannot read %q: %w", path, err)
|
||||
}
|
||||
m, err := parseAuthConfig(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse %q: %s", path, err)
|
||||
return nil, fmt.Errorf("cannot parse %q: %w", path, err)
|
||||
}
|
||||
logger.Infof("Loaded information about %d users from %q", len(m), path)
|
||||
return m, nil
|
||||
|
@ -95,7 +95,7 @@ func readAuthConfig(path string) (map[string]*UserInfo, error) {
|
|||
func parseAuthConfig(data []byte) (map[string]*UserInfo, error) {
|
||||
var ac AuthConfig
|
||||
if err := yaml.UnmarshalStrict(data, &ac); err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal AuthConfig data: %s", err)
|
||||
return nil, fmt.Errorf("cannot unmarshal AuthConfig data: %w", err)
|
||||
}
|
||||
uis := ac.Users
|
||||
if len(uis) == 0 {
|
||||
|
@ -115,7 +115,7 @@ func parseAuthConfig(data []byte) (map[string]*UserInfo, error) {
|
|||
// Validate urlPrefix
|
||||
target, err := url.Parse(urlPrefix)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid `url_prefix: %q`: %s", urlPrefix, err)
|
||||
return nil, fmt.Errorf("invalid `url_prefix: %q`: %w", urlPrefix, err)
|
||||
}
|
||||
if target.Scheme != "http" && target.Scheme != "https" {
|
||||
return nil, fmt.Errorf("unsupported scheme for `url_prefix: %q`: %q; must be `http` or `https`", urlPrefix, target.Scheme)
|
||||
|
|
|
@ -110,12 +110,12 @@ func newSrcFS() (*fslocal.FS, error) {
|
|||
// Verify the snapshot exists.
|
||||
f, err := os.Open(snapshotPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot open snapshot at %q: %s", snapshotPath, err)
|
||||
return nil, fmt.Errorf("cannot open snapshot at %q: %w", snapshotPath, err)
|
||||
}
|
||||
fi, err := f.Stat()
|
||||
_ = f.Close()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot stat %q: %s", snapshotPath, err)
|
||||
return nil, fmt.Errorf("cannot stat %q: %w", snapshotPath, err)
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
return nil, fmt.Errorf("snapshot %q must be a directory", snapshotPath)
|
||||
|
@ -126,7 +126,7 @@ func newSrcFS() (*fslocal.FS, error) {
|
|||
MaxBytesPerSecond: *maxBytesPerSecond,
|
||||
}
|
||||
if err := fs.Init(); err != nil {
|
||||
return nil, fmt.Errorf("cannot initialize fs: %s", err)
|
||||
return nil, fmt.Errorf("cannot initialize fs: %w", err)
|
||||
}
|
||||
return fs, nil
|
||||
}
|
||||
|
@ -134,7 +134,7 @@ func newSrcFS() (*fslocal.FS, error) {
|
|||
func newDstFS() (common.RemoteFS, error) {
|
||||
fs, err := actions.NewRemoteFS(*dst)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse `-dst`=%q: %s", *dst, err)
|
||||
return nil, fmt.Errorf("cannot parse `-dst`=%q: %w", *dst, err)
|
||||
}
|
||||
return fs, nil
|
||||
}
|
||||
|
@ -145,7 +145,7 @@ func newOriginFS() (common.RemoteFS, error) {
|
|||
}
|
||||
fs, err := actions.NewRemoteFS(*origin)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse `-origin`=%q: %s", *origin, err)
|
||||
return nil, fmt.Errorf("cannot parse `-origin`=%q: %w", *origin, err)
|
||||
}
|
||||
return fs, nil
|
||||
}
|
||||
|
|
|
@ -122,7 +122,7 @@ func (ctx *InsertCtx) AddLabel(name, value string) {
|
|||
func (ctx *InsertCtx) FlushBufs() error {
|
||||
if err := vmstorage.AddRows(ctx.mrs); err != nil {
|
||||
return &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("cannot store metrics: %s", err),
|
||||
Err: fmt.Errorf("cannot store metrics: %w", err),
|
||||
StatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -71,7 +71,7 @@ func newDstFS() (*fslocal.FS, error) {
|
|||
MaxBytesPerSecond: *maxBytesPerSecond,
|
||||
}
|
||||
if err := fs.Init(); err != nil {
|
||||
return nil, fmt.Errorf("cannot initialize local fs: %s", err)
|
||||
return nil, fmt.Errorf("cannot initialize local fs: %w", err)
|
||||
}
|
||||
return fs, nil
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ func newDstFS() (*fslocal.FS, error) {
|
|||
func newSrcFS() (common.RemoteFS, error) {
|
||||
fs, err := actions.NewRemoteFS(*src)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse `-src`=%q: %s", *src, err)
|
||||
return nil, fmt.Errorf("cannot parse `-src`=%q: %w", *src, err)
|
||||
}
|
||||
return fs, nil
|
||||
}
|
||||
|
|
|
@ -98,7 +98,7 @@ func timeseriesWorker(workerID uint) {
|
|||
continue
|
||||
}
|
||||
if err := tsw.pts.Unpack(&rs, rss.tr, rss.fetchData); err != nil {
|
||||
tsw.doneCh <- fmt.Errorf("error during time series unpacking: %s", err)
|
||||
tsw.doneCh <- fmt.Errorf("error during time series unpacking: %w", err)
|
||||
continue
|
||||
}
|
||||
if len(rs.Timestamps) > 0 || !rss.fetchData {
|
||||
|
@ -187,7 +187,7 @@ func unpackWorker() {
|
|||
sb := getSortBlock()
|
||||
if err := sb.unpackFrom(upw.br, upw.tr, upw.fetchData); err != nil {
|
||||
putSortBlock(sb)
|
||||
upw.doneCh <- fmt.Errorf("cannot unpack block: %s", err)
|
||||
upw.doneCh <- fmt.Errorf("cannot unpack block: %w", err)
|
||||
continue
|
||||
}
|
||||
upw.sb = sb
|
||||
|
@ -200,7 +200,7 @@ func (pts *packedTimeseries) Unpack(dst *Result, tr storage.TimeRange, fetchData
|
|||
dst.reset()
|
||||
|
||||
if err := dst.MetricName.Unmarshal(bytesutil.ToUnsafeBytes(pts.metricName)); err != nil {
|
||||
return fmt.Errorf("cannot unmarshal metricName %q: %s", pts.metricName, err)
|
||||
return fmt.Errorf("cannot unmarshal metricName %q: %w", pts.metricName, err)
|
||||
}
|
||||
|
||||
// Feed workers with work
|
||||
|
@ -329,7 +329,7 @@ func (sb *sortBlock) unpackFrom(br storage.BlockRef, tr storage.TimeRange, fetch
|
|||
br.MustReadBlock(&sb.b, fetchData)
|
||||
if fetchData {
|
||||
if err := sb.b.UnmarshalData(); err != nil {
|
||||
return fmt.Errorf("cannot unmarshal block: %s", err)
|
||||
return fmt.Errorf("cannot unmarshal block: %w", err)
|
||||
}
|
||||
}
|
||||
timestamps := sb.b.Timestamps()
|
||||
|
@ -398,7 +398,7 @@ func DeleteSeries(sq *storage.SearchQuery) (int, error) {
|
|||
func GetLabels(deadline Deadline) ([]string, error) {
|
||||
labels, err := vmstorage.SearchTagKeys(*maxTagKeysPerSearch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error during labels search: %s", err)
|
||||
return nil, fmt.Errorf("error during labels search: %w", err)
|
||||
}
|
||||
|
||||
// Substitute "" with "__name__"
|
||||
|
@ -424,7 +424,7 @@ func GetLabelValues(labelName string, deadline Deadline) ([]string, error) {
|
|||
// Search for tag values
|
||||
labelValues, err := vmstorage.SearchTagValues([]byte(labelName), *maxTagValuesPerSearch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error during label values search for labelName=%q: %s", labelName, err)
|
||||
return nil, fmt.Errorf("error during label values search for labelName=%q: %w", labelName, err)
|
||||
}
|
||||
|
||||
// Sort labelValues like Prometheus does
|
||||
|
@ -437,7 +437,7 @@ func GetLabelValues(labelName string, deadline Deadline) ([]string, error) {
|
|||
func GetLabelEntries(deadline Deadline) ([]storage.TagEntry, error) {
|
||||
labelEntries, err := vmstorage.SearchTagEntries(*maxTagKeysPerSearch, *maxTagValuesPerSearch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error during label entries request: %s", err)
|
||||
return nil, fmt.Errorf("error during label entries request: %w", err)
|
||||
}
|
||||
|
||||
// Substitute "" with "__name__"
|
||||
|
@ -464,7 +464,7 @@ func GetLabelEntries(deadline Deadline) ([]storage.TagEntry, error) {
|
|||
func GetTSDBStatusForDate(deadline Deadline, date uint64, topN int) (*storage.TSDBStatus, error) {
|
||||
status, err := vmstorage.GetTSDBStatusForDate(date, topN)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error during tsdb status request: %s", err)
|
||||
return nil, fmt.Errorf("error during tsdb status request: %w", err)
|
||||
}
|
||||
return status, nil
|
||||
}
|
||||
|
@ -473,7 +473,7 @@ func GetTSDBStatusForDate(deadline Deadline, date uint64, topN int) (*storage.TS
|
|||
func GetSeriesCount(deadline Deadline) (uint64, error) {
|
||||
n, err := vmstorage.GetSeriesCount()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("error during series count request: %s", err)
|
||||
return 0, fmt.Errorf("error during series count request: %w", err)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
@ -529,7 +529,7 @@ func ProcessSearchQuery(sq *storage.SearchQuery, fetchData bool, deadline Deadli
|
|||
m[string(metricName)] = append(brs, *sr.MetricBlockRef.BlockRef)
|
||||
}
|
||||
if err := sr.Error(); err != nil {
|
||||
return nil, fmt.Errorf("search error after reading %d data blocks: %s", blocksRead, err)
|
||||
return nil, fmt.Errorf("search error after reading %d data blocks: %w", blocksRead, err)
|
||||
}
|
||||
|
||||
var rss Results
|
||||
|
@ -555,7 +555,7 @@ func setupTfss(tagFilterss [][]storage.TagFilter) ([]*storage.TagFilters, error)
|
|||
for i := range tagFilters {
|
||||
tf := &tagFilters[i]
|
||||
if err := tfs.Add(tf.Key, tf.Value, tf.IsNegative, tf.IsRegexp); err != nil {
|
||||
return nil, fmt.Errorf("cannot parse tag filter %s: %s", tf, err)
|
||||
return nil, fmt.Errorf("cannot parse tag filter %s: %w", tf, err)
|
||||
}
|
||||
}
|
||||
tfss = append(tfss, tfs)
|
||||
|
|
|
@ -46,7 +46,7 @@ const defaultStep = 5 * 60 * 1000
|
|||
func FederateHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
||||
ct := currentTime()
|
||||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse request form values: %s", err)
|
||||
return fmt.Errorf("cannot parse request form values: %w", err)
|
||||
}
|
||||
matches := r.Form["match[]"]
|
||||
if len(matches) == 0 {
|
||||
|
@ -82,7 +82,7 @@ func FederateHandler(startTime time.Time, w http.ResponseWriter, r *http.Request
|
|||
}
|
||||
rss, err := netstorage.ProcessSearchQuery(sq, true, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot fetch data for %q: %s", sq, err)
|
||||
return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
||||
}
|
||||
|
||||
resultsCh := make(chan *quicktemplate.ByteBuffer)
|
||||
|
@ -105,7 +105,7 @@ func FederateHandler(startTime time.Time, w http.ResponseWriter, r *http.Request
|
|||
|
||||
err = <-doneCh
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during data fetching: %s", err)
|
||||
return fmt.Errorf("error during data fetching: %w", err)
|
||||
}
|
||||
federateDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
|
@ -117,7 +117,7 @@ var federateDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/fe
|
|||
func ExportHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
||||
ct := currentTime()
|
||||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse request form values: %s", err)
|
||||
return fmt.Errorf("cannot parse request form values: %w", err)
|
||||
}
|
||||
matches := r.Form["match[]"]
|
||||
if len(matches) == 0 {
|
||||
|
@ -143,7 +143,7 @@ func ExportHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
|||
end = start + defaultStep
|
||||
}
|
||||
if err := exportHandler(w, matches, start, end, format, maxRowsPerLine, deadline); err != nil {
|
||||
return fmt.Errorf("error when exporting data for queries=%q on the time range (start=%d, end=%d): %s", matches, start, end, err)
|
||||
return fmt.Errorf("error when exporting data for queries=%q on the time range (start=%d, end=%d): %w", matches, start, end, err)
|
||||
}
|
||||
exportDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
|
@ -202,7 +202,7 @@ func exportHandler(w http.ResponseWriter, matches []string, start, end int64, fo
|
|||
}
|
||||
rss, err := netstorage.ProcessSearchQuery(sq, true, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot fetch data for %q: %s", sq, err)
|
||||
return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
||||
}
|
||||
|
||||
resultsCh := make(chan *quicktemplate.ByteBuffer, runtime.GOMAXPROCS(-1))
|
||||
|
@ -227,7 +227,7 @@ func exportHandler(w http.ResponseWriter, matches []string, start, end int64, fo
|
|||
}
|
||||
err = <-doneCh
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during data fetching: %s", err)
|
||||
return fmt.Errorf("error during data fetching: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -237,7 +237,7 @@ func exportHandler(w http.ResponseWriter, matches []string, start, end int64, fo
|
|||
// See https://prometheus.io/docs/prometheus/latest/querying/api/#delete-series
|
||||
func DeleteHandler(startTime time.Time, r *http.Request) error {
|
||||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse request form values: %s", err)
|
||||
return fmt.Errorf("cannot parse request form values: %w", err)
|
||||
}
|
||||
if r.FormValue("start") != "" || r.FormValue("end") != "" {
|
||||
return fmt.Errorf("start and end aren't supported. Remove these args from the query in order to delete all the matching metrics")
|
||||
|
@ -255,7 +255,7 @@ func DeleteHandler(startTime time.Time, r *http.Request) error {
|
|||
}
|
||||
deletedCount, err := netstorage.DeleteSeries(sq)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot delete time series matching %q: %s", matches, err)
|
||||
return fmt.Errorf("cannot delete time series matching %q: %w", matches, err)
|
||||
}
|
||||
if deletedCount > 0 {
|
||||
promql.ResetRollupResultCache()
|
||||
|
@ -273,14 +273,14 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr
|
|||
deadline := getDeadlineForQuery(r)
|
||||
|
||||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse form values: %s", err)
|
||||
return fmt.Errorf("cannot parse form values: %w", err)
|
||||
}
|
||||
var labelValues []string
|
||||
if len(r.Form["match[]"]) == 0 && len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
|
||||
var err error
|
||||
labelValues, err = netstorage.GetLabelValues(labelName, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`cannot obtain label values for %q: %s`, labelName, err)
|
||||
return fmt.Errorf(`cannot obtain label values for %q: %w`, labelName, err)
|
||||
}
|
||||
} else {
|
||||
// Extended functionality that allows filtering by label filters and time range
|
||||
|
@ -302,7 +302,7 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr
|
|||
}
|
||||
labelValues, err = labelValuesWithMatches(labelName, matches, start, end, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain label values for %q, match[]=%q, start=%d, end=%d: %s", labelName, matches, start, end, err)
|
||||
return fmt.Errorf("cannot obtain label values for %q, match[]=%q, start=%d, end=%d: %w", labelName, matches, start, end, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -343,7 +343,7 @@ func labelValuesWithMatches(labelName string, matches []string, start, end int64
|
|||
}
|
||||
rss, err := netstorage.ProcessSearchQuery(sq, false, deadline)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot fetch data for %q: %s", sq, err)
|
||||
return nil, fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
||||
}
|
||||
|
||||
m := make(map[string]struct{})
|
||||
|
@ -358,7 +358,7 @@ func labelValuesWithMatches(labelName string, matches []string, start, end int64
|
|||
mLock.Unlock()
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error when data fetching: %s", err)
|
||||
return nil, fmt.Errorf("error when data fetching: %w", err)
|
||||
}
|
||||
|
||||
labelValues := make([]string, 0, len(m))
|
||||
|
@ -376,7 +376,7 @@ func LabelsCountHandler(startTime time.Time, w http.ResponseWriter, r *http.Requ
|
|||
deadline := getDeadlineForQuery(r)
|
||||
labelEntries, err := netstorage.GetLabelEntries(deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`cannot obtain label entries: %s`, err)
|
||||
return fmt.Errorf(`cannot obtain label entries: %w`, err)
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteLabelsCountResponse(w, labelEntries)
|
||||
|
@ -394,14 +394,14 @@ const secsPerDay = 3600 * 24
|
|||
func TSDBStatusHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
||||
deadline := getDeadlineForQuery(r)
|
||||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse form values: %s", err)
|
||||
return fmt.Errorf("cannot parse form values: %w", err)
|
||||
}
|
||||
date := fasttime.UnixDate()
|
||||
dateStr := r.FormValue("date")
|
||||
if len(dateStr) > 0 {
|
||||
t, err := time.Parse("2006-01-02", dateStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse `date` arg %q: %s", dateStr, err)
|
||||
return fmt.Errorf("cannot parse `date` arg %q: %w", dateStr, err)
|
||||
}
|
||||
date = uint64(t.Unix()) / secsPerDay
|
||||
}
|
||||
|
@ -410,7 +410,7 @@ func TSDBStatusHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque
|
|||
if len(topNStr) > 0 {
|
||||
n, err := strconv.Atoi(topNStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse `topN` arg %q: %s", topNStr, err)
|
||||
return fmt.Errorf("cannot parse `topN` arg %q: %w", topNStr, err)
|
||||
}
|
||||
if n <= 0 {
|
||||
n = 1
|
||||
|
@ -422,7 +422,7 @@ func TSDBStatusHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
status, err := netstorage.GetTSDBStatusForDate(deadline, date, topN)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`cannot obtain tsdb status for date=%d, topN=%d: %s`, date, topN, err)
|
||||
return fmt.Errorf(`cannot obtain tsdb status for date=%d, topN=%d: %w`, date, topN, err)
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteTSDBStatusResponse(w, status)
|
||||
|
@ -439,14 +439,14 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
|||
deadline := getDeadlineForQuery(r)
|
||||
|
||||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse form values: %s", err)
|
||||
return fmt.Errorf("cannot parse form values: %w", err)
|
||||
}
|
||||
var labels []string
|
||||
if len(r.Form["match[]"]) == 0 && len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
|
||||
var err error
|
||||
labels, err = netstorage.GetLabels(deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain labels: %s", err)
|
||||
return fmt.Errorf("cannot obtain labels: %w", err)
|
||||
}
|
||||
} else {
|
||||
// Extended functionality that allows filtering by label filters and time range
|
||||
|
@ -466,7 +466,7 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
|||
}
|
||||
labels, err = labelsWithMatches(matches, start, end, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain labels for match[]=%q, start=%d, end=%d: %s", matches, start, end, err)
|
||||
return fmt.Errorf("cannot obtain labels for match[]=%q, start=%d, end=%d: %w", matches, start, end, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -494,7 +494,7 @@ func labelsWithMatches(matches []string, start, end int64, deadline netstorage.D
|
|||
}
|
||||
rss, err := netstorage.ProcessSearchQuery(sq, false, deadline)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot fetch data for %q: %s", sq, err)
|
||||
return nil, fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
||||
}
|
||||
|
||||
m := make(map[string]struct{})
|
||||
|
@ -510,7 +510,7 @@ func labelsWithMatches(matches []string, start, end int64, deadline netstorage.D
|
|||
mLock.Unlock()
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error when data fetching: %s", err)
|
||||
return nil, fmt.Errorf("error when data fetching: %w", err)
|
||||
}
|
||||
|
||||
labels := make([]string, 0, len(m))
|
||||
|
@ -528,7 +528,7 @@ func SeriesCountHandler(startTime time.Time, w http.ResponseWriter, r *http.Requ
|
|||
deadline := getDeadlineForQuery(r)
|
||||
n, err := netstorage.GetSeriesCount(deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain series count: %s", err)
|
||||
return fmt.Errorf("cannot obtain series count: %w", err)
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteSeriesCountResponse(w, n)
|
||||
|
@ -545,7 +545,7 @@ func SeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
|||
ct := currentTime()
|
||||
|
||||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse form values: %s", err)
|
||||
return fmt.Errorf("cannot parse form values: %w", err)
|
||||
}
|
||||
matches := r.Form["match[]"]
|
||||
if len(matches) == 0 {
|
||||
|
@ -580,7 +580,7 @@ func SeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
|||
}
|
||||
rss, err := netstorage.ProcessSearchQuery(sq, false, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot fetch data for %q: %s", sq, err)
|
||||
return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
||||
}
|
||||
|
||||
resultsCh := make(chan *quicktemplate.ByteBuffer)
|
||||
|
@ -605,7 +605,7 @@ func SeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
|||
}
|
||||
err = <-doneCh
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during data fetching: %s", err)
|
||||
return fmt.Errorf("error during data fetching: %w", err)
|
||||
}
|
||||
seriesDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
|
@ -652,17 +652,17 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e
|
|||
if childQuery, windowStr, offsetStr := promql.IsMetricSelectorWithRollup(query); childQuery != "" {
|
||||
window, err := parsePositiveDuration(windowStr, step)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse window: %s", err)
|
||||
return fmt.Errorf("cannot parse window: %w", err)
|
||||
}
|
||||
offset, err := parseDuration(offsetStr, step)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse offset: %s", err)
|
||||
return fmt.Errorf("cannot parse offset: %w", err)
|
||||
}
|
||||
start -= offset
|
||||
end := start
|
||||
start = end - window
|
||||
if err := exportHandler(w, []string{childQuery}, start, end, "promapi", 0, deadline); err != nil {
|
||||
return fmt.Errorf("error when exporting data for query=%q on the time range (start=%d, end=%d): %s", childQuery, start, end, err)
|
||||
return fmt.Errorf("error when exporting data for query=%q on the time range (start=%d, end=%d): %w", childQuery, start, end, err)
|
||||
}
|
||||
queryDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
|
@ -670,24 +670,24 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e
|
|||
if childQuery, windowStr, stepStr, offsetStr := promql.IsRollup(query); childQuery != "" {
|
||||
newStep, err := parsePositiveDuration(stepStr, step)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse step: %s", err)
|
||||
return fmt.Errorf("cannot parse step: %w", err)
|
||||
}
|
||||
if newStep > 0 {
|
||||
step = newStep
|
||||
}
|
||||
window, err := parsePositiveDuration(windowStr, step)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse window: %s", err)
|
||||
return fmt.Errorf("cannot parse window: %w", err)
|
||||
}
|
||||
offset, err := parseDuration(offsetStr, step)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse offset: %s", err)
|
||||
return fmt.Errorf("cannot parse offset: %w", err)
|
||||
}
|
||||
start -= offset
|
||||
end := start
|
||||
start = end - window
|
||||
if err := queryRangeHandler(w, childQuery, start, end, step, r, ct); err != nil {
|
||||
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %s", childQuery, start, end, step, err)
|
||||
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %w", childQuery, start, end, step, err)
|
||||
}
|
||||
queryDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
|
@ -702,7 +702,7 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e
|
|||
}
|
||||
result, err := promql.Exec(&ec, query, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error when executing query=%q for (time=%d, step=%d): %s", query, start, step, err)
|
||||
return fmt.Errorf("error when executing query=%q for (time=%d, step=%d): %w", query, start, step, err)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
@ -750,7 +750,7 @@ func QueryRangeHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque
|
|||
return err
|
||||
}
|
||||
if err := queryRangeHandler(w, query, start, end, step, r, ct); err != nil {
|
||||
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %s", query, start, end, step, err)
|
||||
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %w", query, start, end, step, err)
|
||||
}
|
||||
queryRangeDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
|
@ -788,7 +788,7 @@ func queryRangeHandler(w http.ResponseWriter, query string, start, end, step int
|
|||
}
|
||||
result, err := promql.Exec(&ec, query, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot execute query: %s", err)
|
||||
return fmt.Errorf("cannot execute query: %w", err)
|
||||
}
|
||||
queryOffset := getLatencyOffsetMilliseconds()
|
||||
if ct-end < queryOffset {
|
||||
|
@ -897,7 +897,7 @@ func getTime(r *http.Request, argKey string, defaultValue int64) (int64, error)
|
|||
// Try parsing duration relative to the current time
|
||||
d, err1 := time.ParseDuration(argValue)
|
||||
if err1 != nil {
|
||||
return 0, fmt.Errorf("cannot parse %q=%q: %s", argKey, argValue, err)
|
||||
return 0, fmt.Errorf("cannot parse %q=%q: %w", argKey, argValue, err)
|
||||
}
|
||||
if d > 0 {
|
||||
d = -d
|
||||
|
@ -939,7 +939,7 @@ func getDuration(r *http.Request, argKey string, defaultValue int64) (int64, err
|
|||
// Try parsing string format
|
||||
d, err := time.ParseDuration(argValue)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse %q=%q: %s", argKey, argValue, err)
|
||||
return 0, fmt.Errorf("cannot parse %q=%q: %w", argKey, argValue, err)
|
||||
}
|
||||
secs = d.Seconds()
|
||||
}
|
||||
|
@ -1001,7 +1001,7 @@ func getTagFilterssFromMatches(matches []string) ([][]storage.TagFilter, error)
|
|||
for _, match := range matches {
|
||||
tagFilters, err := promql.ParseMetricSelector(match)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse %q: %s", match, err)
|
||||
return nil, fmt.Errorf("cannot parse %q: %w", match, err)
|
||||
}
|
||||
tagFilterss = append(tagFilterss, tagFilters)
|
||||
}
|
||||
|
|
|
@ -119,7 +119,7 @@ func testIncrementalParallelAggr(iafc *incrementalAggrFuncContext, tssSrc, tssEx
|
|||
wg.Wait()
|
||||
tssActual := iafc.finalizeTimeseries()
|
||||
if err := expectTimeseriesEqual(tssActual, tssExpected); err != nil {
|
||||
return fmt.Errorf("%s; tssActual=%v, tssExpected=%v", err, tssActual, tssExpected)
|
||||
return fmt.Errorf("%w; tssActual=%v, tssExpected=%v", err, tssActual, tssExpected)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -164,7 +164,7 @@ func expectTsEqual(actual, expected *timeseries) error {
|
|||
return fmt.Errorf("unexpected timestamps; got %v; want %v", actual.Timestamps, expected.Timestamps)
|
||||
}
|
||||
if err := compareValues(actual.Values, expected.Values); err != nil {
|
||||
return fmt.Errorf("%s; actual %v; expected %v", err, actual.Values, expected.Values)
|
||||
return fmt.Errorf("%w; actual %v; expected %v", err, actual.Values, expected.Values)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -160,14 +160,14 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
|
|||
}
|
||||
rv, err := evalRollupFunc(ec, "default_rollup", rollupDefault, e, re, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`cannot evaluate %q: %s`, me.AppendString(nil), err)
|
||||
return nil, fmt.Errorf(`cannot evaluate %q: %w`, me.AppendString(nil), err)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
if re, ok := e.(*metricsql.RollupExpr); ok {
|
||||
rv, err := evalRollupFunc(ec, "default_rollup", rollupDefault, e, re, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`cannot evaluate %q: %s`, re.AppendString(nil), err)
|
||||
return nil, fmt.Errorf(`cannot evaluate %q: %w`, re.AppendString(nil), err)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
|
@ -189,7 +189,7 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
|
|||
}
|
||||
rv, err := tf(tfa)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`cannot evaluate %q: %s`, fe.AppendString(nil), err)
|
||||
return nil, fmt.Errorf(`cannot evaluate %q: %w`, fe.AppendString(nil), err)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
|
@ -203,7 +203,7 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
|
|||
}
|
||||
rv, err := evalRollupFunc(ec, fe.Name, rf, e, re, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`cannot evaluate %q: %s`, fe.AppendString(nil), err)
|
||||
return nil, fmt.Errorf(`cannot evaluate %q: %w`, fe.AppendString(nil), err)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
|
@ -240,7 +240,7 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
|
|||
}
|
||||
rv, err := af(afa)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`cannot evaluate %q: %s`, ae.AppendString(nil), err)
|
||||
return nil, fmt.Errorf(`cannot evaluate %q: %w`, ae.AppendString(nil), err)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
|
@ -264,7 +264,7 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
|
|||
}
|
||||
rv, err := bf(bfa)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`cannot evaluate %q: %s`, be.AppendString(nil), err)
|
||||
return nil, fmt.Errorf(`cannot evaluate %q: %w`, be.AppendString(nil), err)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
|
@ -375,7 +375,7 @@ func evalRollupFuncArgs(ec *EvalConfig, fe *metricsql.FuncExpr) ([]interface{},
|
|||
}
|
||||
ts, err := evalExpr(ec, arg)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("cannot evaluate arg #%d for %q: %s", i+1, fe.AppendString(nil), err)
|
||||
return nil, nil, fmt.Errorf("cannot evaluate arg #%d for %q: %w", i+1, fe.AppendString(nil), err)
|
||||
}
|
||||
args[i] = ts
|
||||
}
|
||||
|
|
|
@ -285,7 +285,7 @@ func getRollupConfigs(name string, rf rollupFunc, expr metricsql.Expr, start, en
|
|||
case "aggr_over_time":
|
||||
aggrFuncNames, err := getRollupAggrFuncNames(expr)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("invalid args to %s: %s", expr.AppendString(nil), err)
|
||||
return nil, nil, fmt.Errorf("invalid args to %s: %w", expr.AppendString(nil), err)
|
||||
}
|
||||
for _, aggrFuncName := range aggrFuncNames {
|
||||
if rollupFuncsRemoveCounterResets[aggrFuncName] {
|
||||
|
|
|
@ -286,7 +286,7 @@ var (
|
|||
var buf [8]byte
|
||||
if _, err := rand.Read(buf[:]); err != nil {
|
||||
// do not use logger.Panicf, since it isn't initialized yet.
|
||||
panic(fmt.Errorf("FATAL: cannot read random data for rollupResultCacheKeyPrefix: %s", err))
|
||||
panic(fmt.Errorf("FATAL: cannot read random data for rollupResultCacheKeyPrefix: %w", err))
|
||||
}
|
||||
return encoding.UnmarshalUint64(buf[:])
|
||||
}()
|
||||
|
@ -414,7 +414,7 @@ func (mi *rollupResultCacheMetainfo) Unmarshal(src []byte) error {
|
|||
for i := 0; i < entriesLen; i++ {
|
||||
tail, err := mi.entries[i].Unmarshal(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot unmarshal entry #%d: %s", i, err)
|
||||
return fmt.Errorf("cannot unmarshal entry #%d: %w", i, err)
|
||||
}
|
||||
src = tail
|
||||
}
|
||||
|
|
|
@ -217,7 +217,7 @@ func (ts *timeseries) unmarshalFastNoTimestamps(src []byte) ([]byte, error) {
|
|||
|
||||
tail, err := unmarshalMetricNameFast(&ts.MetricName, src)
|
||||
if err != nil {
|
||||
return tail, fmt.Errorf("cannot unmarshal MetricName: %s", err)
|
||||
return tail, fmt.Errorf("cannot unmarshal MetricName: %w", err)
|
||||
}
|
||||
src = tail
|
||||
|
||||
|
@ -275,7 +275,7 @@ func unmarshalMetricNameFast(mn *storage.MetricName, src []byte) ([]byte, error)
|
|||
|
||||
tail, metricGroup, err := unmarshalBytesFast(src)
|
||||
if err != nil {
|
||||
return tail, fmt.Errorf("cannot unmarshal MetricGroup: %s", err)
|
||||
return tail, fmt.Errorf("cannot unmarshal MetricGroup: %w", err)
|
||||
}
|
||||
src = tail
|
||||
mn.MetricGroup = metricGroup[:len(metricGroup):len(metricGroup)]
|
||||
|
@ -292,13 +292,13 @@ func unmarshalMetricNameFast(mn *storage.MetricName, src []byte) ([]byte, error)
|
|||
for i := range mn.Tags {
|
||||
tail, key, err := unmarshalBytesFast(src)
|
||||
if err != nil {
|
||||
return tail, fmt.Errorf("cannot unmarshal key for tag[%d]: %s", i, err)
|
||||
return tail, fmt.Errorf("cannot unmarshal key for tag[%d]: %w", i, err)
|
||||
}
|
||||
src = tail
|
||||
|
||||
tail, value, err := unmarshalBytesFast(src)
|
||||
if err != nil {
|
||||
return tail, fmt.Errorf("cannot unmarshal value for tag[%d]: %s", i, err)
|
||||
return tail, fmt.Errorf("cannot unmarshal value for tag[%d]: %w", i, err)
|
||||
}
|
||||
src = tail
|
||||
|
||||
|
|
|
@ -414,7 +414,7 @@ func transformHistogramShare(tfa *transformFuncArg) ([]*timeseries, error) {
|
|||
}
|
||||
les, err := getScalar(args[0], 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse le: %s", err)
|
||||
return nil, fmt.Errorf("cannot parse le: %w", err)
|
||||
}
|
||||
|
||||
// Convert buckets with `vmrange` labels to buckets with `le` labels.
|
||||
|
@ -425,7 +425,7 @@ func transformHistogramShare(tfa *transformFuncArg) ([]*timeseries, error) {
|
|||
if len(args) > 2 {
|
||||
s, err := getString(args[2], 2)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse boundsLabel (arg #3): %s", err)
|
||||
return nil, fmt.Errorf("cannot parse boundsLabel (arg #3): %w", err)
|
||||
}
|
||||
boundsLabel = s
|
||||
}
|
||||
|
@ -513,7 +513,7 @@ func transformHistogramQuantile(tfa *transformFuncArg) ([]*timeseries, error) {
|
|||
}
|
||||
phis, err := getScalar(args[0], 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse phi: %s", err)
|
||||
return nil, fmt.Errorf("cannot parse phi: %w", err)
|
||||
}
|
||||
|
||||
// Convert buckets with `vmrange` labels to buckets with `le` labels.
|
||||
|
@ -524,7 +524,7 @@ func transformHistogramQuantile(tfa *transformFuncArg) ([]*timeseries, error) {
|
|||
if len(args) > 2 {
|
||||
s, err := getString(args[2], 2)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse boundsLabel (arg #3): %s", err)
|
||||
return nil, fmt.Errorf("cannot parse boundsLabel (arg #3): %w", err)
|
||||
}
|
||||
boundsLabel = s
|
||||
}
|
||||
|
@ -1034,7 +1034,7 @@ func transformLabelMap(tfa *transformFuncArg) ([]*timeseries, error) {
|
|||
}
|
||||
label, err := getString(args[1], 1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read label name: %s", err)
|
||||
return nil, fmt.Errorf("cannot read label name: %w", err)
|
||||
}
|
||||
srcValues, dstValues, err := getStringPairs(args[2:])
|
||||
if err != nil {
|
||||
|
@ -1179,7 +1179,7 @@ func transformLabelTransform(tfa *transformFuncArg) ([]*timeseries, error) {
|
|||
|
||||
r, err := metricsql.CompileRegexp(regex)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`cannot compile regex %q: %s`, regex, err)
|
||||
return nil, fmt.Errorf(`cannot compile regex %q: %w`, regex, err)
|
||||
}
|
||||
return labelReplace(args[0], label, r, label, replacement)
|
||||
}
|
||||
|
@ -1208,7 +1208,7 @@ func transformLabelReplace(tfa *transformFuncArg) ([]*timeseries, error) {
|
|||
|
||||
r, err := metricsql.CompileRegexpAnchored(regex)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`cannot compile regex %q: %s`, regex, err)
|
||||
return nil, fmt.Errorf(`cannot compile regex %q: %w`, regex, err)
|
||||
}
|
||||
return labelReplace(args[0], srcLabel, r, dstLabel, replacement)
|
||||
}
|
||||
|
@ -1238,7 +1238,7 @@ func transformLabelValue(tfa *transformFuncArg) ([]*timeseries, error) {
|
|||
}
|
||||
labelName, err := getString(args[1], 1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get label name: %s", err)
|
||||
return nil, fmt.Errorf("cannot get label name: %w", err)
|
||||
}
|
||||
rvs := args[0]
|
||||
for _, ts := range rvs {
|
||||
|
@ -1265,15 +1265,15 @@ func transformLabelMatch(tfa *transformFuncArg) ([]*timeseries, error) {
|
|||
}
|
||||
labelName, err := getString(args[1], 1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get label name: %s", err)
|
||||
return nil, fmt.Errorf("cannot get label name: %w", err)
|
||||
}
|
||||
labelRe, err := getString(args[2], 2)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get regexp: %s", err)
|
||||
return nil, fmt.Errorf("cannot get regexp: %w", err)
|
||||
}
|
||||
r, err := metricsql.CompileRegexpAnchored(labelRe)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`cannot compile regexp %q: %s`, labelRe, err)
|
||||
return nil, fmt.Errorf(`cannot compile regexp %q: %w`, labelRe, err)
|
||||
}
|
||||
tss := args[0]
|
||||
rvs := tss[:0]
|
||||
|
@ -1293,15 +1293,15 @@ func transformLabelMismatch(tfa *transformFuncArg) ([]*timeseries, error) {
|
|||
}
|
||||
labelName, err := getString(args[1], 1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get label name: %s", err)
|
||||
return nil, fmt.Errorf("cannot get label name: %w", err)
|
||||
}
|
||||
labelRe, err := getString(args[2], 2)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get regexp: %s", err)
|
||||
return nil, fmt.Errorf("cannot get regexp: %w", err)
|
||||
}
|
||||
r, err := metricsql.CompileRegexpAnchored(labelRe)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`cannot compile regexp %q: %s`, labelRe, err)
|
||||
return nil, fmt.Errorf(`cannot compile regexp %q: %w`, labelRe, err)
|
||||
}
|
||||
tss := args[0]
|
||||
rvs := tss[:0]
|
||||
|
@ -1401,7 +1401,7 @@ func newTransformFuncSortByLabel(isDesc bool) transformFunc {
|
|||
}
|
||||
label, err := getString(args[1], 1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse label name for sorting: %s", err)
|
||||
return nil, fmt.Errorf("cannot parse label name for sorting: %w", err)
|
||||
}
|
||||
rvs := args[0]
|
||||
sort.SliceStable(rvs, func(i, j int) bool {
|
||||
|
|
|
@ -171,7 +171,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
w.Header().Set("Content-Type", "application/json")
|
||||
snapshotPath, err := Storage.CreateSnapshot()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("cannot create snapshot: %s", err)
|
||||
err = fmt.Errorf("cannot create snapshot: %w", err)
|
||||
jsonResponseError(w, err)
|
||||
return true
|
||||
}
|
||||
|
@ -185,7 +185,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
w.Header().Set("Content-Type", "application/json")
|
||||
snapshots, err := Storage.ListSnapshots()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("cannot list snapshots: %s", err)
|
||||
err = fmt.Errorf("cannot list snapshots: %w", err)
|
||||
jsonResponseError(w, err)
|
||||
return true
|
||||
}
|
||||
|
@ -202,7 +202,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
w.Header().Set("Content-Type", "application/json")
|
||||
snapshotName := r.FormValue("snapshot")
|
||||
if err := Storage.DeleteSnapshot(snapshotName); err != nil {
|
||||
err = fmt.Errorf("cannot delete snapshot %q: %s", snapshotName, err)
|
||||
err = fmt.Errorf("cannot delete snapshot %q: %w", snapshotName, err)
|
||||
jsonResponseError(w, err)
|
||||
return true
|
||||
}
|
||||
|
@ -212,13 +212,13 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
w.Header().Set("Content-Type", "application/json")
|
||||
snapshots, err := Storage.ListSnapshots()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("cannot list snapshots: %s", err)
|
||||
err = fmt.Errorf("cannot list snapshots: %w", err)
|
||||
jsonResponseError(w, err)
|
||||
return true
|
||||
}
|
||||
for _, snapshotName := range snapshots {
|
||||
if err := Storage.DeleteSnapshot(snapshotName); err != nil {
|
||||
err = fmt.Errorf("cannot delete snapshot %q: %s", snapshotName, err)
|
||||
err = fmt.Errorf("cannot delete snapshot %q: %w", snapshotName, err)
|
||||
jsonResponseError(w, err)
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -55,13 +55,13 @@ func (b *Backup) Run() error {
|
|||
}
|
||||
|
||||
if err := dst.DeleteFile(fscommon.BackupCompleteFilename); err != nil {
|
||||
return fmt.Errorf("cannot delete `backup complete` file at %s: %s", dst, err)
|
||||
return fmt.Errorf("cannot delete `backup complete` file at %s: %w", dst, err)
|
||||
}
|
||||
if err := runBackup(src, dst, origin, concurrency); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := dst.CreateFile(fscommon.BackupCompleteFilename, []byte("ok")); err != nil {
|
||||
return fmt.Errorf("cannot create `backup complete` file at %s: %s", dst, err)
|
||||
return fmt.Errorf("cannot create `backup complete` file at %s: %w", dst, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -74,17 +74,17 @@ func runBackup(src *fslocal.FS, dst common.RemoteFS, origin common.OriginFS, con
|
|||
logger.Infof("obtaining list of parts at %s", src)
|
||||
srcParts, err := src.ListParts()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot list src parts: %s", err)
|
||||
return fmt.Errorf("cannot list src parts: %w", err)
|
||||
}
|
||||
logger.Infof("obtaining list of parts at %s", dst)
|
||||
dstParts, err := dst.ListParts()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot list dst parts: %s", err)
|
||||
return fmt.Errorf("cannot list dst parts: %w", err)
|
||||
}
|
||||
logger.Infof("obtaining list of parts at %s", origin)
|
||||
originParts, err := origin.ListParts()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot list origin parts: %s", err)
|
||||
return fmt.Errorf("cannot list origin parts: %w", err)
|
||||
}
|
||||
|
||||
backupSize := getPartsSize(srcParts)
|
||||
|
@ -97,7 +97,7 @@ func runBackup(src *fslocal.FS, dst common.RemoteFS, origin common.OriginFS, con
|
|||
err = runParallel(concurrency, partsToDelete, func(p common.Part) error {
|
||||
logger.Infof("deleting %s from %s", &p, dst)
|
||||
if err := dst.DeletePart(p); err != nil {
|
||||
return fmt.Errorf("cannot delete %s from %s: %s", &p, dst, err)
|
||||
return fmt.Errorf("cannot delete %s from %s: %w", &p, dst, err)
|
||||
}
|
||||
atomic.AddUint64(&deletedParts, 1)
|
||||
return nil
|
||||
|
@ -109,7 +109,7 @@ func runBackup(src *fslocal.FS, dst common.RemoteFS, origin common.OriginFS, con
|
|||
return err
|
||||
}
|
||||
if err := dst.RemoveEmptyDirs(); err != nil {
|
||||
return fmt.Errorf("cannot remove empty directories at %s: %s", dst, err)
|
||||
return fmt.Errorf("cannot remove empty directories at %s: %w", dst, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -122,7 +122,7 @@ func runBackup(src *fslocal.FS, dst common.RemoteFS, origin common.OriginFS, con
|
|||
err = runParallel(concurrency, originCopyParts, func(p common.Part) error {
|
||||
logger.Infof("server-side copying %s from %s to %s", &p, origin, dst)
|
||||
if err := dst.CopyPart(origin, p); err != nil {
|
||||
return fmt.Errorf("cannot copy %s from %s to %s: %s", &p, origin, dst, err)
|
||||
return fmt.Errorf("cannot copy %s from %s to %s: %w", &p, origin, dst, err)
|
||||
}
|
||||
atomic.AddUint64(&copiedParts, 1)
|
||||
return nil
|
||||
|
@ -144,17 +144,17 @@ func runBackup(src *fslocal.FS, dst common.RemoteFS, origin common.OriginFS, con
|
|||
logger.Infof("uploading %s from %s to %s", &p, src, dst)
|
||||
rc, err := src.NewReadCloser(p)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create reader for %s from %s: %s", &p, src, err)
|
||||
return fmt.Errorf("cannot create reader for %s from %s: %w", &p, src, err)
|
||||
}
|
||||
sr := &statReader{
|
||||
r: rc,
|
||||
bytesRead: &bytesUploaded,
|
||||
}
|
||||
if err := dst.UploadPart(p, sr); err != nil {
|
||||
return fmt.Errorf("cannot upload %s to %s: %s", &p, dst, err)
|
||||
return fmt.Errorf("cannot upload %s to %s: %w", &p, dst, err)
|
||||
}
|
||||
if err = rc.Close(); err != nil {
|
||||
return fmt.Errorf("cannot close reader for %s from %s: %s", &p, src, err)
|
||||
return fmt.Errorf("cannot close reader for %s from %s: %w", &p, src, err)
|
||||
}
|
||||
return nil
|
||||
}, func(elapsed time.Duration) {
|
||||
|
|
|
@ -43,11 +43,11 @@ func (r *Restore) Run() error {
|
|||
|
||||
// Make sure VictoriaMetrics doesn't run during the restore process.
|
||||
if err := fs.MkdirAllIfNotExist(r.Dst.Dir); err != nil {
|
||||
return fmt.Errorf("cannot create dir %q: %s", r.Dst.Dir, err)
|
||||
return fmt.Errorf("cannot create dir %q: %w", r.Dst.Dir, err)
|
||||
}
|
||||
flockF, err := fs.CreateFlockFile(r.Dst.Dir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create lock file in %q; make sure VictoriaMetrics doesn't use the dir; error: %s", r.Dst.Dir, err)
|
||||
return fmt.Errorf("cannot create lock file in %q; make sure VictoriaMetrics doesn't use the dir; error: %w", r.Dst.Dir, err)
|
||||
}
|
||||
defer fs.MustClose(flockF)
|
||||
|
||||
|
@ -71,12 +71,12 @@ func (r *Restore) Run() error {
|
|||
logger.Infof("obtaining list of parts at %s", src)
|
||||
srcParts, err := src.ListParts()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot list src parts: %s", err)
|
||||
return fmt.Errorf("cannot list src parts: %w", err)
|
||||
}
|
||||
logger.Infof("obtaining list of parts at %s", dst)
|
||||
dstParts, err := dst.ListParts()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot list dst parts: %s", err)
|
||||
return fmt.Errorf("cannot list dst parts: %w", err)
|
||||
}
|
||||
|
||||
backupSize := getPartsSize(srcParts)
|
||||
|
@ -129,7 +129,7 @@ func (r *Restore) Run() error {
|
|||
logger.Infof("deleting %s from %s", path, dst)
|
||||
size, err := dst.DeletePath(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot delete %s from %s: %s", path, dst, err)
|
||||
return fmt.Errorf("cannot delete %s from %s: %w", path, dst, err)
|
||||
}
|
||||
deleteSize += size
|
||||
}
|
||||
|
@ -137,14 +137,14 @@ func (r *Restore) Run() error {
|
|||
return err
|
||||
}
|
||||
if err := dst.RemoveEmptyDirs(); err != nil {
|
||||
return fmt.Errorf("cannot remove empty directories at %s: %s", dst, err)
|
||||
return fmt.Errorf("cannot remove empty directories at %s: %w", dst, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Re-read dstParts, since additional parts may be removed on the previous step.
|
||||
dstParts, err = dst.ListParts()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot list dst parts after the deletion: %s", err)
|
||||
return fmt.Errorf("cannot list dst parts after the deletion: %w", err)
|
||||
}
|
||||
|
||||
partsToCopy := common.PartsDifference(srcParts, dstParts)
|
||||
|
@ -166,17 +166,17 @@ func (r *Restore) Run() error {
|
|||
logger.Infof("downloading %s from %s to %s", &p, src, dst)
|
||||
wc, err := dst.NewWriteCloser(p)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create writer for %q to %s: %s", &p, dst, err)
|
||||
return fmt.Errorf("cannot create writer for %q to %s: %w", &p, dst, err)
|
||||
}
|
||||
sw := &statWriter{
|
||||
w: wc,
|
||||
bytesWritten: &bytesDownloaded,
|
||||
}
|
||||
if err := src.DownloadPart(p, sw); err != nil {
|
||||
return fmt.Errorf("cannot download %s to %s: %s", &p, dst, err)
|
||||
return fmt.Errorf("cannot download %s to %s: %w", &p, dst, err)
|
||||
}
|
||||
if err := wc.Close(); err != nil {
|
||||
return fmt.Errorf("cannot close reader from %s from %s: %s", &p, src, err)
|
||||
return fmt.Errorf("cannot close reader from %s from %s: %w", &p, src, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -207,7 +207,7 @@ func NewRemoteFS(path string) (common.RemoteFS, error) {
|
|||
Dir: dir,
|
||||
}
|
||||
if err := fs.Init(); err != nil {
|
||||
return nil, fmt.Errorf("cannot initialize connection to gcs: %s", err)
|
||||
return nil, fmt.Errorf("cannot initialize connection to gcs: %w", err)
|
||||
}
|
||||
return fs, nil
|
||||
case "s3":
|
||||
|
@ -226,7 +226,7 @@ func NewRemoteFS(path string) (common.RemoteFS, error) {
|
|||
Dir: dir,
|
||||
}
|
||||
if err := fs.Init(); err != nil {
|
||||
return nil, fmt.Errorf("cannot initialize connection to s3: %s", err)
|
||||
return nil, fmt.Errorf("cannot initialize connection to s3: %w", err)
|
||||
}
|
||||
return fs, nil
|
||||
default:
|
||||
|
|
|
@ -13,11 +13,11 @@ import (
|
|||
func FsyncFile(path string) error {
|
||||
if err := fsync(path); err != nil {
|
||||
_ = os.RemoveAll(path)
|
||||
return fmt.Errorf("cannot fsync file %q: %s", path, err)
|
||||
return fmt.Errorf("cannot fsync file %q: %w", path, err)
|
||||
}
|
||||
dir := filepath.Dir(path)
|
||||
if err := fsync(dir); err != nil {
|
||||
return fmt.Errorf("cannot fsync dir %q: %s", dir, err)
|
||||
return fmt.Errorf("cannot fsync dir %q: %w", dir, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ func fsync(path string) error {
|
|||
func AppendFiles(dst []string, dir string) ([]string, error) {
|
||||
d, err := os.Open(dir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot open %q: %s", dir, err)
|
||||
return nil, fmt.Errorf("cannot open %q: %w", dir, err)
|
||||
}
|
||||
dst, err = appendFilesInternal(dst, d)
|
||||
if err1 := d.Close(); err1 != nil {
|
||||
|
@ -58,14 +58,14 @@ func appendFilesInternal(dst []string, d *os.File) ([]string, error) {
|
|||
dir := d.Name()
|
||||
dfi, err := d.Stat()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot stat %q: %s", dir, err)
|
||||
return nil, fmt.Errorf("cannot stat %q: %w", dir, err)
|
||||
}
|
||||
if !dfi.IsDir() {
|
||||
return nil, fmt.Errorf("%q isn't a directory", dir)
|
||||
}
|
||||
fis, err := d.Readdir(-1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read directory contents in %q: %s", dir, err)
|
||||
return nil, fmt.Errorf("cannot read directory contents in %q: %w", dir, err)
|
||||
}
|
||||
for _, fi := range fis {
|
||||
name := fi.Name()
|
||||
|
@ -82,7 +82,7 @@ func appendFilesInternal(dst []string, d *os.File) ([]string, error) {
|
|||
// Process directory
|
||||
dst, err = AppendFiles(dst, path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot list %q: %s", path, err)
|
||||
return nil, fmt.Errorf("cannot list %q: %w", path, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
@ -100,17 +100,17 @@ func appendFilesInternal(dst []string, d *os.File) ([]string, error) {
|
|||
// Skip symlink that points to nowhere.
|
||||
continue
|
||||
}
|
||||
return nil, fmt.Errorf("cannot resolve symlink %q: %s", pathOrig, err)
|
||||
return nil, fmt.Errorf("cannot resolve symlink %q: %w", pathOrig, err)
|
||||
}
|
||||
sfi, err := os.Stat(pathReal)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot stat %q from symlink %q: %s", pathReal, path, err)
|
||||
return nil, fmt.Errorf("cannot stat %q from symlink %q: %w", pathReal, path, err)
|
||||
}
|
||||
if sfi.IsDir() {
|
||||
// Symlink points to directory
|
||||
dstNew, err := AppendFiles(dst, pathReal)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot list files at %q from symlink %q: %s", pathReal, path, err)
|
||||
return nil, fmt.Errorf("cannot list files at %q from symlink %q: %w", pathReal, path, err)
|
||||
}
|
||||
pathReal += "/"
|
||||
for i := len(dst); i < len(dstNew); i++ {
|
||||
|
@ -163,14 +163,14 @@ func removeEmptyDirsInternal(d *os.File) (bool, error) {
|
|||
dir := d.Name()
|
||||
dfi, err := d.Stat()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("cannot stat %q: %s", dir, err)
|
||||
return false, fmt.Errorf("cannot stat %q: %w", dir, err)
|
||||
}
|
||||
if !dfi.IsDir() {
|
||||
return false, fmt.Errorf("%q isn't a directory", dir)
|
||||
}
|
||||
fis, err := d.Readdir(-1)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("cannot read directory contents in %q: %s", dir, err)
|
||||
return false, fmt.Errorf("cannot read directory contents in %q: %w", dir, err)
|
||||
}
|
||||
dirEntries := 0
|
||||
hasFlock := false
|
||||
|
@ -184,7 +184,7 @@ func removeEmptyDirsInternal(d *os.File) (bool, error) {
|
|||
// Process directory
|
||||
ok, err := removeEmptyDirs(path)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("cannot list %q: %s", path, err)
|
||||
return false, fmt.Errorf("cannot list %q: %w", path, err)
|
||||
}
|
||||
if !ok {
|
||||
dirEntries++
|
||||
|
@ -209,21 +209,21 @@ func removeEmptyDirsInternal(d *os.File) (bool, error) {
|
|||
// Remove symlink that points to nowere.
|
||||
logger.Infof("removing broken symlink %q", pathOrig)
|
||||
if err := os.Remove(pathOrig); err != nil {
|
||||
return false, fmt.Errorf("cannot remove %q: %s", pathOrig, err)
|
||||
return false, fmt.Errorf("cannot remove %q: %w", pathOrig, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
return false, fmt.Errorf("cannot resolve symlink %q: %s", pathOrig, err)
|
||||
return false, fmt.Errorf("cannot resolve symlink %q: %w", pathOrig, err)
|
||||
}
|
||||
sfi, err := os.Stat(pathReal)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("cannot stat %q from symlink %q: %s", pathReal, path, err)
|
||||
return false, fmt.Errorf("cannot stat %q from symlink %q: %w", pathReal, path, err)
|
||||
}
|
||||
if sfi.IsDir() {
|
||||
// Symlink points to directory
|
||||
ok, err := removeEmptyDirs(pathReal)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("cannot list files at %q from symlink %q: %s", pathReal, path, err)
|
||||
return false, fmt.Errorf("cannot list files at %q from symlink %q: %w", pathReal, path, err)
|
||||
}
|
||||
if !ok {
|
||||
dirEntries++
|
||||
|
@ -231,7 +231,7 @@ func removeEmptyDirsInternal(d *os.File) (bool, error) {
|
|||
// Remove the symlink
|
||||
logger.Infof("removing symlink that points to empty dir %q", pathOrig)
|
||||
if err := os.Remove(pathOrig); err != nil {
|
||||
return false, fmt.Errorf("cannot remove %q: %s", pathOrig, err)
|
||||
return false, fmt.Errorf("cannot remove %q: %w", pathOrig, err)
|
||||
}
|
||||
}
|
||||
continue
|
||||
|
@ -252,11 +252,11 @@ func removeEmptyDirsInternal(d *os.File) (bool, error) {
|
|||
if hasFlock {
|
||||
flockFilepath := dir + "/flock.lock"
|
||||
if err := os.Remove(flockFilepath); err != nil {
|
||||
return false, fmt.Errorf("cannot remove %q: %s", flockFilepath, err)
|
||||
return false, fmt.Errorf("cannot remove %q: %w", flockFilepath, err)
|
||||
}
|
||||
}
|
||||
if err := os.Remove(dir); err != nil {
|
||||
return false, fmt.Errorf("cannot remove %q: %s", dir, err)
|
||||
return false, fmt.Errorf("cannot remove %q: %w", dir, err)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ func (fs *FS) ListParts() ([]common.Part, error) {
|
|||
}
|
||||
fi, err := os.Stat(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot stat %q: %s", file, err)
|
||||
return nil, fmt.Errorf("cannot stat %q: %w", file, err)
|
||||
}
|
||||
path := file[len(dir):]
|
||||
size := uint64(fi.Size())
|
||||
|
@ -100,7 +100,7 @@ func (fs *FS) NewReadCloser(p common.Part) (io.ReadCloser, error) {
|
|||
path := fs.path(p)
|
||||
r, err := filestream.OpenReaderAt(path, int64(p.Offset), true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot open %q at %q: %s", p.Path, fs.Dir, err)
|
||||
return nil, fmt.Errorf("cannot open %q at %q: %w", p.Path, fs.Dir, err)
|
||||
}
|
||||
lrc := &limitedReadCloser{
|
||||
r: r,
|
||||
|
@ -121,7 +121,7 @@ func (fs *FS) NewWriteCloser(p common.Part) (io.WriteCloser, error) {
|
|||
}
|
||||
w, err := filestream.OpenWriterAt(path, int64(p.Offset), true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot open writer for %q at offset %d: %s", path, p.Offset, err)
|
||||
return nil, fmt.Errorf("cannot open writer for %q at offset %d: %w", path, p.Offset, err)
|
||||
}
|
||||
wc := &writeCloser{
|
||||
w: w,
|
||||
|
@ -148,16 +148,16 @@ func (fs *FS) DeletePath(path string) (uint64, error) {
|
|||
// The file could be deleted earlier via symlink.
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("cannot open %q at %q: %s", path, fullPath, err)
|
||||
return 0, fmt.Errorf("cannot open %q at %q: %w", path, fullPath, err)
|
||||
}
|
||||
fi, err := f.Stat()
|
||||
_ = f.Close()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot stat %q at %q: %s", path, fullPath, err)
|
||||
return 0, fmt.Errorf("cannot stat %q at %q: %w", path, fullPath, err)
|
||||
}
|
||||
size := uint64(fi.Size())
|
||||
if err := os.Remove(fullPath); err != nil {
|
||||
return 0, fmt.Errorf("cannot remove %q: %s", fullPath, err)
|
||||
return 0, fmt.Errorf("cannot remove %q: %w", fullPath, err)
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
|
@ -170,7 +170,7 @@ func (fs *FS) RemoveEmptyDirs() error {
|
|||
func (fs *FS) mkdirAll(filePath string) error {
|
||||
dir := filepath.Dir(filePath)
|
||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||
return fmt.Errorf("cannot create directory %q: %s", dir, err)
|
||||
return fmt.Errorf("cannot create directory %q: %w", dir, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ func (fs *FS) ListParts() ([]common.Part, error) {
|
|||
// Check for correct part size.
|
||||
fi, err := os.Stat(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot stat file %q for part %q: %s", file, p.Path, err)
|
||||
return nil, fmt.Errorf("cannot stat file %q for part %q: %w", file, p.Path, err)
|
||||
}
|
||||
p.ActualSize = uint64(fi.Size())
|
||||
parts = append(parts, p)
|
||||
|
@ -72,7 +72,7 @@ func (fs *FS) ListParts() ([]common.Part, error) {
|
|||
func (fs *FS) DeletePart(p common.Part) error {
|
||||
path := fs.path(p)
|
||||
if err := os.Remove(path); err != nil {
|
||||
return fmt.Errorf("cannot remove %q: %s", path, err)
|
||||
return fmt.Errorf("cannot remove %q: %w", path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -103,12 +103,12 @@ func (fs *FS) CopyPart(srcFS common.OriginFS, p common.Part) error {
|
|||
// Cannot create hardlink. Just copy file contents
|
||||
srcFile, err := os.Open(srcPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open file %q: %s", srcPath, err)
|
||||
return fmt.Errorf("cannot open file %q: %w", srcPath, err)
|
||||
}
|
||||
dstFile, err := os.Create(dstPath)
|
||||
if err != nil {
|
||||
_ = srcFile.Close()
|
||||
return fmt.Errorf("cannot create file %q: %s", dstPath, err)
|
||||
return fmt.Errorf("cannot create file %q: %w", dstPath, err)
|
||||
}
|
||||
n, err := io.Copy(dstFile, srcFile)
|
||||
if err1 := dstFile.Close(); err1 != nil {
|
||||
|
@ -137,14 +137,14 @@ func (fs *FS) DownloadPart(p common.Part, w io.Writer) error {
|
|||
path := fs.path(p)
|
||||
r, err := os.Open(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open %q: %s", path, err)
|
||||
return fmt.Errorf("cannot open %q: %w", path, err)
|
||||
}
|
||||
n, err := io.Copy(w, r)
|
||||
if err1 := r.Close(); err1 != nil && err == nil {
|
||||
err = err1
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot download data from %q: %s", path, err)
|
||||
return fmt.Errorf("cannot download data from %q: %w", path, err)
|
||||
}
|
||||
if uint64(n) != p.Size {
|
||||
return fmt.Errorf("wrong data size downloaded from %q; got %d bytes; want %d bytes", path, n, p.Size)
|
||||
|
@ -160,7 +160,7 @@ func (fs *FS) UploadPart(p common.Part, r io.Reader) error {
|
|||
}
|
||||
w, err := os.Create(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create file %q: %s", path, err)
|
||||
return fmt.Errorf("cannot create file %q: %w", path, err)
|
||||
}
|
||||
n, err := io.Copy(w, r)
|
||||
if err1 := w.Close(); err1 != nil && err == nil {
|
||||
|
@ -168,7 +168,7 @@ func (fs *FS) UploadPart(p common.Part, r io.Reader) error {
|
|||
}
|
||||
if err != nil {
|
||||
_ = os.RemoveAll(path)
|
||||
return fmt.Errorf("cannot upload data to %q: %s", path, err)
|
||||
return fmt.Errorf("cannot upload data to %q: %w", path, err)
|
||||
}
|
||||
if uint64(n) != p.Size {
|
||||
_ = os.RemoveAll(path)
|
||||
|
@ -184,7 +184,7 @@ func (fs *FS) UploadPart(p common.Part, r io.Reader) error {
|
|||
func (fs *FS) mkdirAll(filePath string) error {
|
||||
dir := filepath.Dir(filePath)
|
||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||
return fmt.Errorf("cannot create directory %q: %s", dir, err)
|
||||
return fmt.Errorf("cannot create directory %q: %w", dir, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -200,7 +200,7 @@ func (fs *FS) DeleteFile(filePath string) error {
|
|||
path := filepath.Join(fs.Dir, filePath)
|
||||
err := os.Remove(path)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("cannot remove %q: %s", path, err)
|
||||
return fmt.Errorf("cannot remove %q: %w", path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -214,7 +214,7 @@ func (fs *FS) CreateFile(filePath string, data []byte) error {
|
|||
return err
|
||||
}
|
||||
if err := ioutil.WriteFile(path, data, 0600); err != nil {
|
||||
return fmt.Errorf("cannot write %d bytes to %q: %s", len(data), path, err)
|
||||
return fmt.Errorf("cannot write %d bytes to %q: %w", len(data), path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -227,7 +227,7 @@ func (fs *FS) HasFile(filePath string) (bool, error) {
|
|||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("cannot stat %q: %s", path, err)
|
||||
return false, fmt.Errorf("cannot stat %q: %w", path, err)
|
||||
}
|
||||
if fi.IsDir() {
|
||||
return false, fmt.Errorf("%q is directory, while file is needed", path)
|
||||
|
|
|
@ -49,13 +49,13 @@ func (fs *FS) Init() error {
|
|||
creds := option.WithCredentialsFile(fs.CredsFilePath)
|
||||
c, err := storage.NewClient(ctx, creds)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create gcs client with credsFile %q: %s", fs.CredsFilePath, err)
|
||||
return fmt.Errorf("cannot create gcs client with credsFile %q: %w", fs.CredsFilePath, err)
|
||||
}
|
||||
client = c
|
||||
} else {
|
||||
c, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create default gcs client: %q", err)
|
||||
return fmt.Errorf("cannot create default gcs client: %w", err)
|
||||
}
|
||||
client = c
|
||||
}
|
||||
|
@ -82,7 +82,7 @@ func (fs *FS) ListParts() ([]common.Part, error) {
|
|||
Prefix: dir,
|
||||
}
|
||||
if err := q.SetAttrSelection(selectAttrs); err != nil {
|
||||
return nil, fmt.Errorf("error in SetAttrSelection: %s", err)
|
||||
return nil, fmt.Errorf("error in SetAttrSelection: %w", err)
|
||||
}
|
||||
it := fs.bkt.Objects(ctx, q)
|
||||
var parts []common.Part
|
||||
|
@ -92,7 +92,7 @@ func (fs *FS) ListParts() ([]common.Part, error) {
|
|||
return parts, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error when iterating objects at %q: %s", dir, err)
|
||||
return nil, fmt.Errorf("error when iterating objects at %q: %w", dir, err)
|
||||
}
|
||||
file := attr.Name
|
||||
if !strings.HasPrefix(file, dir) {
|
||||
|
@ -116,7 +116,7 @@ func (fs *FS) DeletePart(p common.Part) error {
|
|||
o := fs.object(p)
|
||||
ctx := context.Background()
|
||||
if err := o.Delete(ctx); err != nil {
|
||||
return fmt.Errorf("cannot delete %q at %s (remote path %q): %s", p.Path, fs, o.ObjectName(), err)
|
||||
return fmt.Errorf("cannot delete %q at %s (remote path %q): %w", p.Path, fs, o.ObjectName(), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -140,7 +140,7 @@ func (fs *FS) CopyPart(srcFS common.OriginFS, p common.Part) error {
|
|||
ctx := context.Background()
|
||||
attr, err := copier.Run(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot copy %q from %s to %s: %s", p.Path, src, fs, err)
|
||||
return fmt.Errorf("cannot copy %q from %s to %s: %w", p.Path, src, fs, err)
|
||||
}
|
||||
if uint64(attr.Size) != p.Size {
|
||||
return fmt.Errorf("unexpected %q size after copying from %s to %s; got %d bytes; want %d bytes", p.Path, src, fs, attr.Size, p.Size)
|
||||
|
@ -154,14 +154,14 @@ func (fs *FS) DownloadPart(p common.Part, w io.Writer) error {
|
|||
ctx := context.Background()
|
||||
r, err := o.NewReader(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open reader for %q at %s (remote path %q): %s", p.Path, fs, o.ObjectName(), err)
|
||||
return fmt.Errorf("cannot open reader for %q at %s (remote path %q): %w", p.Path, fs, o.ObjectName(), err)
|
||||
}
|
||||
n, err := io.Copy(w, r)
|
||||
if err1 := r.Close(); err1 != nil && err == nil {
|
||||
err = err1
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot download %q from at %s (remote path %q): %s", p.Path, fs, o.ObjectName(), err)
|
||||
return fmt.Errorf("cannot download %q from at %s (remote path %q): %w", p.Path, fs, o.ObjectName(), err)
|
||||
}
|
||||
if uint64(n) != p.Size {
|
||||
return fmt.Errorf("wrong data size downloaded from %q at %s; got %d bytes; want %d bytes", p.Path, fs, n, p.Size)
|
||||
|
@ -179,7 +179,7 @@ func (fs *FS) UploadPart(p common.Part, r io.Reader) error {
|
|||
err = err1
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot upload data to %q at %s (remote path %q): %s", p.Path, fs, o.ObjectName(), err)
|
||||
return fmt.Errorf("cannot upload data to %q at %s (remote path %q): %w", p.Path, fs, o.ObjectName(), err)
|
||||
}
|
||||
if uint64(n) != p.Size {
|
||||
return fmt.Errorf("wrong data size uploaded to %q at %s; got %d bytes; want %d bytes", p.Path, fs, n, p.Size)
|
||||
|
@ -201,7 +201,7 @@ func (fs *FS) DeleteFile(filePath string) error {
|
|||
ctx := context.Background()
|
||||
if err := o.Delete(ctx); err != nil {
|
||||
if err != storage.ErrObjectNotExist {
|
||||
return fmt.Errorf("cannot delete %q at %s (remote path %q): %s", filePath, fs, o.ObjectName(), err)
|
||||
return fmt.Errorf("cannot delete %q at %s (remote path %q): %w", filePath, fs, o.ObjectName(), err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -218,14 +218,14 @@ func (fs *FS) CreateFile(filePath string, data []byte) error {
|
|||
n, err := w.Write(data)
|
||||
if err != nil {
|
||||
_ = w.Close()
|
||||
return fmt.Errorf("cannot upload %d bytes to %q at %s (remote path %q): %s", len(data), filePath, fs, o.ObjectName(), err)
|
||||
return fmt.Errorf("cannot upload %d bytes to %q at %s (remote path %q): %w", len(data), filePath, fs, o.ObjectName(), err)
|
||||
}
|
||||
if n != len(data) {
|
||||
_ = w.Close()
|
||||
return fmt.Errorf("wrong data size uploaded to %q at %s (remote path %q); got %d bytes; want %d bytes", filePath, fs, o.ObjectName(), n, len(data))
|
||||
}
|
||||
if err := w.Close(); err != nil {
|
||||
return fmt.Errorf("cannot close %q at %s (remote path %q): %s", filePath, fs, o.ObjectName(), err)
|
||||
return fmt.Errorf("cannot close %q at %s (remote path %q): %w", filePath, fs, o.ObjectName(), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -240,7 +240,7 @@ func (fs *FS) HasFile(filePath string) (bool, error) {
|
|||
if err == storage.ErrObjectNotExist {
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("unexpected error when obtaining attributes for %q at %s (remote path %q): %s", filePath, fs, o.ObjectName(), err)
|
||||
return false, fmt.Errorf("unexpected error when obtaining attributes for %q at %s (remote path %q): %w", filePath, fs, o.ObjectName(), err)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
|
|
@ -66,7 +66,7 @@ func (fs *FS) Init() error {
|
|||
}
|
||||
sess, err := session.NewSessionWithOptions(opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create S3 session: %s", err)
|
||||
return fmt.Errorf("cannot create S3 session: %w", err)
|
||||
}
|
||||
|
||||
if len(fs.CustomEndpoint) > 0 {
|
||||
|
@ -81,7 +81,7 @@ func (fs *FS) Init() error {
|
|||
ctx := context.Background()
|
||||
region, err := s3manager.GetBucketRegion(ctx, sess, fs.Bucket, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot determine region for bucket %q: %s", fs.Bucket, err)
|
||||
return fmt.Errorf("cannot determine region for bucket %q: %w", fs.Bucket, err)
|
||||
}
|
||||
sess.Config.WithRegion(region)
|
||||
logger.Infof("bucket %q is stored at region %q; switching to this region", fs.Bucket, region)
|
||||
|
@ -133,7 +133,7 @@ func (fs *FS) ListParts() ([]common.Part, error) {
|
|||
err = errOuter
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error when listing s3 objects inside dir %q: %s", dir, err)
|
||||
return nil, fmt.Errorf("error when listing s3 objects inside dir %q: %w", dir, err)
|
||||
}
|
||||
return parts, nil
|
||||
}
|
||||
|
@ -147,7 +147,7 @@ func (fs *FS) DeletePart(p common.Part) error {
|
|||
}
|
||||
_, err := fs.s3.DeleteObject(input)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot delete %q at %s (remote path %q): %s", p.Path, fs, path, err)
|
||||
return fmt.Errorf("cannot delete %q at %s (remote path %q): %w", p.Path, fs, path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -175,7 +175,7 @@ func (fs *FS) CopyPart(srcFS common.OriginFS, p common.Part) error {
|
|||
}
|
||||
_, err := fs.s3.CopyObject(input)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot copy %q from %s to %s (copySource %q): %s", p.Path, src, fs, copySource, err)
|
||||
return fmt.Errorf("cannot copy %q from %s to %s (copySource %q): %w", p.Path, src, fs, copySource, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -189,7 +189,7 @@ func (fs *FS) DownloadPart(p common.Part, w io.Writer) error {
|
|||
}
|
||||
o, err := fs.s3.GetObject(input)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open %q at %s (remote path %q): %s", p.Path, fs, path, err)
|
||||
return fmt.Errorf("cannot open %q at %s (remote path %q): %w", p.Path, fs, path, err)
|
||||
}
|
||||
r := o.Body
|
||||
n, err := io.Copy(w, r)
|
||||
|
@ -197,7 +197,7 @@ func (fs *FS) DownloadPart(p common.Part, w io.Writer) error {
|
|||
err = err1
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot download %q from at %s (remote path %q): %s", p.Path, fs, path, err)
|
||||
return fmt.Errorf("cannot download %q from at %s (remote path %q): %w", p.Path, fs, path, err)
|
||||
}
|
||||
if uint64(n) != p.Size {
|
||||
return fmt.Errorf("wrong data size downloaded from %q at %s; got %d bytes; want %d bytes", p.Path, fs, n, p.Size)
|
||||
|
@ -218,7 +218,7 @@ func (fs *FS) UploadPart(p common.Part, r io.Reader) error {
|
|||
}
|
||||
_, err := fs.uploader.Upload(input)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot upoad data to %q at %s (remote path %q): %s", p.Path, fs, path, err)
|
||||
return fmt.Errorf("cannot upoad data to %q at %s (remote path %q): %w", p.Path, fs, path, err)
|
||||
}
|
||||
if uint64(sr.size) != p.Size {
|
||||
return fmt.Errorf("wrong data size uploaded to %q at %s; got %d bytes; want %d bytes", p.Path, fs, sr.size, p.Size)
|
||||
|
@ -249,7 +249,7 @@ func (fs *FS) DeleteFile(filePath string) error {
|
|||
Key: aws.String(path),
|
||||
}
|
||||
if _, err := fs.s3.DeleteObject(input); err != nil {
|
||||
return fmt.Errorf("cannot delete %q at %s (remote path %q): %s", filePath, fs, path, err)
|
||||
return fmt.Errorf("cannot delete %q at %s (remote path %q): %w", filePath, fs, path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -269,7 +269,7 @@ func (fs *FS) CreateFile(filePath string, data []byte) error {
|
|||
}
|
||||
_, err := fs.uploader.Upload(input)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot upoad data to %q at %s (remote path %q): %s", filePath, fs, path, err)
|
||||
return fmt.Errorf("cannot upoad data to %q at %s (remote path %q): %w", filePath, fs, path, err)
|
||||
}
|
||||
l := int64(len(data))
|
||||
if sr.size != l {
|
||||
|
@ -290,10 +290,10 @@ func (fs *FS) HasFile(filePath string) (bool, error) {
|
|||
if ae, ok := err.(awserr.Error); ok && ae.Code() == s3.ErrCodeNoSuchKey {
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("cannot open %q at %s (remote path %q): %s", filePath, fs, path, err)
|
||||
return false, fmt.Errorf("cannot open %q at %s (remote path %q): %w", filePath, fs, path, err)
|
||||
}
|
||||
if err := o.Body.Close(); err != nil {
|
||||
return false, fmt.Errorf("cannot close %q at %s (remote path %q): %s", filePath, fs, path, err)
|
||||
return false, fmt.Errorf("cannot close %q at %s (remote path %q): %w", filePath, fs, path, err)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ func MarshalTimestamps(dst []byte, timestamps []int64, precisionBits uint8) (res
|
|||
func UnmarshalTimestamps(dst []int64, src []byte, mt MarshalType, firstTimestamp int64, itemsCount int) ([]int64, error) {
|
||||
dst, err := unmarshalInt64Array(dst, src, mt, firstTimestamp, itemsCount)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal %d timestamps from len(src)=%d bytes: %s", itemsCount, len(src), err)
|
||||
return nil, fmt.Errorf("cannot unmarshal %d timestamps from len(src)=%d bytes: %w", itemsCount, len(src), err)
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ func MarshalValues(dst []byte, values []int64, precisionBits uint8) (result []by
|
|||
func UnmarshalValues(dst []int64, src []byte, mt MarshalType, firstValue int64, itemsCount int) ([]int64, error) {
|
||||
dst, err := unmarshalInt64Array(dst, src, mt, firstValue, itemsCount)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal %d values from len(src)=%d bytes: %s", itemsCount, len(src), err)
|
||||
return nil, fmt.Errorf("cannot unmarshal %d values from len(src)=%d bytes: %w", itemsCount, len(src), err)
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
|
@ -166,36 +166,36 @@ func unmarshalInt64Array(dst []int64, src []byte, mt MarshalType, firstValue int
|
|||
bb := bbPool.Get()
|
||||
bb.B, err = DecompressZSTD(bb.B[:0], src)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot decompress zstd data of size %d: %s; src_zstd=%X", len(src), err, src)
|
||||
return nil, fmt.Errorf("cannot decompress zstd data of size %d: %w; src_zstd=%X", len(src), err, src)
|
||||
}
|
||||
dst, err = unmarshalInt64NearestDelta(dst, bb.B, firstValue, itemsCount)
|
||||
bbPool.Put(bb)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal nearest delta data after zstd decompression: %s; src_zstd=%X", err, src)
|
||||
return nil, fmt.Errorf("cannot unmarshal nearest delta data after zstd decompression: %w; src_zstd=%X", err, src)
|
||||
}
|
||||
return dst, nil
|
||||
case MarshalTypeZSTDNearestDelta2:
|
||||
bb := bbPool.Get()
|
||||
bb.B, err = DecompressZSTD(bb.B[:0], src)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot decompress zstd data of size %d: %s; src_zstd=%X", len(src), err, src)
|
||||
return nil, fmt.Errorf("cannot decompress zstd data of size %d: %w; src_zstd=%X", len(src), err, src)
|
||||
}
|
||||
dst, err = unmarshalInt64NearestDelta2(dst, bb.B, firstValue, itemsCount)
|
||||
bbPool.Put(bb)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal nearest delta2 data after zstd decompression: %s; src_zstd=%X", err, src)
|
||||
return nil, fmt.Errorf("cannot unmarshal nearest delta2 data after zstd decompression: %w; src_zstd=%X", err, src)
|
||||
}
|
||||
return dst, nil
|
||||
case MarshalTypeNearestDelta:
|
||||
dst, err = unmarshalInt64NearestDelta(dst, src, firstValue, itemsCount)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal nearest delta data: %s", err)
|
||||
return nil, fmt.Errorf("cannot unmarshal nearest delta data: %w", err)
|
||||
}
|
||||
return dst, nil
|
||||
case MarshalTypeNearestDelta2:
|
||||
dst, err = unmarshalInt64NearestDelta2(dst, src, firstValue, itemsCount)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal nearest delta2 data: %s", err)
|
||||
return nil, fmt.Errorf("cannot unmarshal nearest delta2 data: %w", err)
|
||||
}
|
||||
return dst, nil
|
||||
case MarshalTypeConst:
|
||||
|
@ -219,7 +219,7 @@ func unmarshalInt64Array(dst []int64, src []byte, mt MarshalType, firstValue int
|
|||
v := firstValue
|
||||
tail, d, err := UnmarshalVarInt64(src)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal delta value for delta const: %s", err)
|
||||
return nil, fmt.Errorf("cannot unmarshal delta value for delta const: %w", err)
|
||||
}
|
||||
if len(tail) > 0 {
|
||||
return nil, fmt.Errorf("unexpected trailing data after delta const (d=%d): %d bytes", d, len(tail))
|
||||
|
|
|
@ -34,7 +34,7 @@ func BenchmarkUnmarshalGaugeArray(b *testing.B) {
|
|||
for pb.Next() {
|
||||
dst, err = unmarshalInt64Array(dst[:0], benchMarshaledGaugeArray, MarshalTypeZSTDNearestDelta, benchGaugeArray[0], len(benchGaugeArray))
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot unmarshal gauge array: %s", err))
|
||||
panic(fmt.Errorf("cannot unmarshal gauge array: %w", err))
|
||||
}
|
||||
atomic.AddUint64(&Sink, uint64(len(dst)))
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ func BenchmarkUnmarshalDeltaConstArray(b *testing.B) {
|
|||
for pb.Next() {
|
||||
dst, err = unmarshalInt64Array(dst[:0], benchMarshaledDeltaConstArray, MarshalTypeDeltaConst, benchDeltaConstArray[0], len(benchDeltaConstArray))
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot unmarshal delta const array: %s", err))
|
||||
panic(fmt.Errorf("cannot unmarshal delta const array: %w", err))
|
||||
}
|
||||
atomic.AddUint64(&Sink, uint64(len(dst)))
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ func BenchmarkUnmarshalConstArray(b *testing.B) {
|
|||
for pb.Next() {
|
||||
dst, err = unmarshalInt64Array(dst[:0], benchMarshaledConstArray, MarshalTypeConst, benchConstArray[0], len(benchConstArray))
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot unmarshal const array: %s", err))
|
||||
panic(fmt.Errorf("cannot unmarshal const array: %w", err))
|
||||
}
|
||||
atomic.AddUint64(&Sink, uint64(len(dst)))
|
||||
}
|
||||
|
@ -173,7 +173,7 @@ func BenchmarkUnmarshalZeroConstArray(b *testing.B) {
|
|||
for pb.Next() {
|
||||
dst, err = unmarshalInt64Array(dst[:0], benchMarshaledZeroConstArray, MarshalTypeConst, benchZeroConstArray[0], len(benchZeroConstArray))
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot unmarshal zero const array: %s", err))
|
||||
panic(fmt.Errorf("cannot unmarshal zero const array: %w", err))
|
||||
}
|
||||
atomic.AddUint64(&Sink, uint64(len(dst)))
|
||||
}
|
||||
|
@ -212,7 +212,7 @@ func BenchmarkUnmarshalInt64Array(b *testing.B) {
|
|||
for pb.Next() {
|
||||
dst, err = unmarshalInt64Array(dst[:0], benchMarshaledInt64Array, benchMarshalType, benchInt64Array[0], len(benchInt64Array))
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot unmarshal int64 array: %s", err))
|
||||
panic(fmt.Errorf("cannot unmarshal int64 array: %w", err))
|
||||
}
|
||||
atomic.AddUint64(&Sink, uint64(len(dst)))
|
||||
}
|
||||
|
|
|
@ -229,7 +229,7 @@ func MarshalBytes(dst, b []byte) []byte {
|
|||
func UnmarshalBytes(src []byte) ([]byte, []byte, error) {
|
||||
tail, n, err := UnmarshalVarUint64(src)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("cannot unmarshal string size: %d", err)
|
||||
return nil, nil, fmt.Errorf("cannot unmarshal string size: %w", err)
|
||||
}
|
||||
src = tail
|
||||
if uint64(len(src)) < n {
|
||||
|
|
|
@ -135,7 +135,7 @@ func benchmarkUnmarshalVarInt64s(b *testing.B, maxValue int64) {
|
|||
for pb.Next() {
|
||||
tail, err := UnmarshalVarInt64s(dst, data)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %s", err))
|
||||
panic(fmt.Errorf("unexpected error: %w", err))
|
||||
}
|
||||
if len(tail) > 0 {
|
||||
panic(fmt.Errorf("unexpected non-empty tail with len=%d: %X", len(tail), tail))
|
||||
|
|
|
@ -60,7 +60,7 @@ func unmarshalInt64NearestDelta(dst []int64, src []byte, firstValue int64, items
|
|||
|
||||
tail, err := UnmarshalVarInt64s(is.A, src)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal nearest delta from %d bytes; src=%X: %s", len(src), src, err)
|
||||
return nil, fmt.Errorf("cannot unmarshal nearest delta from %d bytes; src=%X: %w", len(src), src, err)
|
||||
}
|
||||
if len(tail) > 0 {
|
||||
return nil, fmt.Errorf("unexpected tail left after unmarshaling %d items from %d bytes; tail size=%d; src=%X; tail=%X", itemsCount, len(src), len(tail), src, tail)
|
||||
|
|
|
@ -63,7 +63,7 @@ func unmarshalInt64NearestDelta2(dst []int64, src []byte, firstValue int64, item
|
|||
|
||||
tail, err := UnmarshalVarInt64s(is.A, src)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal nearest delta from %d bytes; src=%X: %s", len(src), src, err)
|
||||
return nil, fmt.Errorf("cannot unmarshal nearest delta from %d bytes; src=%X: %w", len(src), src, err)
|
||||
}
|
||||
if len(tail) > 0 {
|
||||
return nil, fmt.Errorf("unexpected tail left after unmarshaling %d items from %d bytes; tail size=%d; src=%X; tail=%X", itemsCount, len(src), len(tail), src, tail)
|
||||
|
|
|
@ -35,7 +35,7 @@ func BenchmarkUnmarshalInt64NearestDelta2(b *testing.B) {
|
|||
for pb.Next() {
|
||||
dst, err = unmarshalInt64NearestDelta2(dst[:0], benchInt64NearestDelta2Data, 0, len(benchInt64Array))
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %s", err))
|
||||
panic(fmt.Errorf("unexpected error: %w", err))
|
||||
}
|
||||
atomic.AddUint64(&Sink, uint64(len(dst)))
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ func BenchmarkUnmarshalInt64NearestDelta(b *testing.B) {
|
|||
for pb.Next() {
|
||||
dst, err = unmarshalInt64NearestDelta(dst[:0], benchInt64NearestDeltaData, 0, len(benchInt64Array))
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %s", err))
|
||||
panic(fmt.Errorf("unexpected error: %w", err))
|
||||
}
|
||||
atomic.AddUint64(&Sink, uint64(len(dst)))
|
||||
}
|
||||
|
|
|
@ -63,7 +63,7 @@ func OpenReaderAt(path string, offset int64, nocache bool) (*Reader, error) {
|
|||
n, err := r.f.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
r.MustClose()
|
||||
return nil, fmt.Errorf("cannot seek to offset=%d for %q: %s", offset, path, err)
|
||||
return nil, fmt.Errorf("cannot seek to offset=%d for %q: %w", offset, path, err)
|
||||
}
|
||||
if n != offset {
|
||||
r.MustClose()
|
||||
|
@ -78,7 +78,7 @@ func OpenReaderAt(path string, offset int64, nocache bool) (*Reader, error) {
|
|||
func Open(path string, nocache bool) (*Reader, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot open file %q: %s", path, err)
|
||||
return nil, fmt.Errorf("cannot open file %q: %w", path, err)
|
||||
}
|
||||
r := &Reader{
|
||||
f: f,
|
||||
|
@ -124,7 +124,7 @@ func (r *Reader) Read(p []byte) (int, error) {
|
|||
return n, err
|
||||
}
|
||||
if err := r.st.adviseDontNeed(n, false); err != nil {
|
||||
return n, fmt.Errorf("advise error for %q: %s", r.f.Name(), err)
|
||||
return n, fmt.Errorf("advise error for %q: %w", r.f.Name(), err)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
@ -172,12 +172,12 @@ type Writer struct {
|
|||
func OpenWriterAt(path string, offset int64, nocache bool) (*Writer, error) {
|
||||
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot open %q: %s", path, err)
|
||||
return nil, fmt.Errorf("cannot open %q: %w", path, err)
|
||||
}
|
||||
n, err := f.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
_ = f.Close()
|
||||
return nil, fmt.Errorf("cannot seek to offset=%d in %q: %s", offset, path, err)
|
||||
return nil, fmt.Errorf("cannot seek to offset=%d in %q: %w", offset, path, err)
|
||||
}
|
||||
if n != offset {
|
||||
_ = f.Close()
|
||||
|
@ -192,7 +192,7 @@ func OpenWriterAt(path string, offset int64, nocache bool) (*Writer, error) {
|
|||
func Create(path string, nocache bool) (*Writer, error) {
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create file %q: %s", path, err)
|
||||
return nil, fmt.Errorf("cannot create file %q: %w", path, err)
|
||||
}
|
||||
return newWriter(f, nocache), nil
|
||||
}
|
||||
|
@ -248,7 +248,7 @@ func (w *Writer) Write(p []byte) (int, error) {
|
|||
return n, err
|
||||
}
|
||||
if err := w.st.adviseDontNeed(n, true); err != nil {
|
||||
return n, fmt.Errorf("advise error for %q: %s", w.f.Name(), err)
|
||||
return n, fmt.Errorf("advise error for %q: %w", w.f.Name(), err)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
|
|
@ -18,11 +18,11 @@ func (st *streamTracker) adviseDontNeed(n int, fdatasync bool) error {
|
|||
blockSize := st.length - (st.length % dontNeedBlockSize)
|
||||
if fdatasync {
|
||||
if err := unixFdatasync(int(st.fd)); err != nil {
|
||||
return fmt.Errorf("unix.Fdatasync error: %s", err)
|
||||
return fmt.Errorf("unix.Fdatasync error: %w", err)
|
||||
}
|
||||
}
|
||||
if err := unix.Fadvise(int(st.fd), int64(st.offset), int64(blockSize), unix.FADV_DONTNEED); err != nil {
|
||||
return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, %d, %d) error: %s", st.offset, blockSize, err)
|
||||
return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, %d, %d) error: %w", st.offset, blockSize, err)
|
||||
}
|
||||
st.offset += blockSize
|
||||
st.length -= blockSize
|
||||
|
@ -35,7 +35,7 @@ func (st *streamTracker) close() error {
|
|||
}
|
||||
// Advise the whole file as it shouldn't be cached.
|
||||
if err := unix.Fadvise(int(st.fd), 0, 0, unix.FADV_DONTNEED); err != nil {
|
||||
return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, 0, 0) error: %s", err)
|
||||
return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, 0, 0) error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -16,11 +16,11 @@ func (st *streamTracker) adviseDontNeed(n int, fdatasync bool) error {
|
|||
blockSize := st.length - (st.length % dontNeedBlockSize)
|
||||
if fdatasync {
|
||||
if err := unix.Fdatasync(int(st.fd)); err != nil {
|
||||
return fmt.Errorf("unix.Fdatasync error: %s", err)
|
||||
return fmt.Errorf("unix.Fdatasync error: %w", err)
|
||||
}
|
||||
}
|
||||
if err := unix.Fadvise(int(st.fd), int64(st.offset), int64(blockSize), unix.FADV_DONTNEED); err != nil {
|
||||
return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, %d, %d) error: %s", st.offset, blockSize, err)
|
||||
return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, %d, %d) error: %w", st.offset, blockSize, err)
|
||||
}
|
||||
st.offset += blockSize
|
||||
st.length -= blockSize
|
||||
|
@ -33,7 +33,7 @@ func (st *streamTracker) close() error {
|
|||
}
|
||||
// Advise the whole file as it shouldn't be cached.
|
||||
if err := unix.Fadvise(int(st.fd), 0, 0, unix.FADV_DONTNEED); err != nil {
|
||||
return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, 0, 0) error: %s", err)
|
||||
return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, 0, 0) error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
22
lib/fs/fs.go
22
lib/fs/fs.go
|
@ -48,12 +48,12 @@ func WriteFileAtomically(path string, data []byte) error {
|
|||
tmpPath := fmt.Sprintf("%s.tmp.%d", path, n)
|
||||
f, err := filestream.Create(tmpPath, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create file %q: %s", tmpPath, err)
|
||||
return fmt.Errorf("cannot create file %q: %w", tmpPath, err)
|
||||
}
|
||||
if _, err := f.Write(data); err != nil {
|
||||
f.MustClose()
|
||||
MustRemoveAll(tmpPath)
|
||||
return fmt.Errorf("cannot write %d bytes to file %q: %s", len(data), tmpPath, err)
|
||||
return fmt.Errorf("cannot write %d bytes to file %q: %w", len(data), tmpPath, err)
|
||||
}
|
||||
|
||||
// Sync and close the file.
|
||||
|
@ -63,14 +63,14 @@ func WriteFileAtomically(path string, data []byte) error {
|
|||
if err := os.Rename(tmpPath, path); err != nil {
|
||||
// do not call MustRemoveAll(tmpPath) here, so the user could inspect
|
||||
// the file contents during investigating the issue.
|
||||
return fmt.Errorf("cannot move %q to %q: %s", tmpPath, path, err)
|
||||
return fmt.Errorf("cannot move %q to %q: %w", tmpPath, path, err)
|
||||
}
|
||||
|
||||
// Sync the containing directory, so the file is guaranteed to appear in the directory.
|
||||
// See https://www.quora.com/When-should-you-fsync-the-containing-directory-in-addition-to-the-file-itself
|
||||
absPath, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain absolute path to %q: %s", path, err)
|
||||
return fmt.Errorf("cannot obtain absolute path to %q: %w", path, err)
|
||||
}
|
||||
parentDirPath := filepath.Dir(absPath)
|
||||
MustSyncPath(parentDirPath)
|
||||
|
@ -204,12 +204,12 @@ func MustRemoveAllWithDoneCallback(path string, done func()) {
|
|||
// HardLinkFiles makes hard links for all the files from srcDir in dstDir.
|
||||
func HardLinkFiles(srcDir, dstDir string) error {
|
||||
if err := mkdirSync(dstDir); err != nil {
|
||||
return fmt.Errorf("cannot create dstDir=%q: %s", dstDir, err)
|
||||
return fmt.Errorf("cannot create dstDir=%q: %w", dstDir, err)
|
||||
}
|
||||
|
||||
d, err := os.Open(srcDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open srcDir=%q: %s", srcDir, err)
|
||||
return fmt.Errorf("cannot open srcDir=%q: %w", srcDir, err)
|
||||
}
|
||||
defer func() {
|
||||
if err := d.Close(); err != nil {
|
||||
|
@ -219,7 +219,7 @@ func HardLinkFiles(srcDir, dstDir string) error {
|
|||
|
||||
fis, err := d.Readdir(-1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read files in scrDir=%q: %s", srcDir, err)
|
||||
return fmt.Errorf("cannot read files in scrDir=%q: %w", srcDir, err)
|
||||
}
|
||||
for _, fi := range fis {
|
||||
if IsDirOrSymlink(fi) {
|
||||
|
@ -248,7 +248,7 @@ func SymlinkRelative(srcPath, dstPath string) error {
|
|||
baseDir := filepath.Dir(dstPath)
|
||||
srcPathRel, err := filepath.Rel(baseDir, srcPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot make relative path for srcPath=%q: %s", srcPath, err)
|
||||
return fmt.Errorf("cannot make relative path for srcPath=%q: %w", srcPath, err)
|
||||
}
|
||||
return os.Symlink(srcPathRel, dstPath)
|
||||
}
|
||||
|
@ -260,7 +260,7 @@ func ReadFullData(r io.Reader, data []byte) error {
|
|||
if err == io.EOF {
|
||||
return io.EOF
|
||||
}
|
||||
return fmt.Errorf("cannot read %d bytes; read only %d bytes; error: %s", len(data), n, err)
|
||||
return fmt.Errorf("cannot read %d bytes; read only %d bytes; error: %w", len(data), n, err)
|
||||
}
|
||||
if n != len(data) {
|
||||
logger.Panicf("BUG: io.ReadFull read only %d bytes; must read %d bytes", n, len(data))
|
||||
|
@ -288,10 +288,10 @@ func CreateFlockFile(dir string) (*os.File, error) {
|
|||
flockFile := dir + "/flock.lock"
|
||||
flockF, err := os.Create(flockFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create lock file %q: %s", flockFile, err)
|
||||
return nil, fmt.Errorf("cannot create lock file %q: %w", flockFile, err)
|
||||
}
|
||||
if err := unix.Flock(int(flockF.Fd()), unix.LOCK_EX|unix.LOCK_NB); err != nil {
|
||||
return nil, fmt.Errorf("cannot acquire lock on file %q: %s", flockFile, err)
|
||||
return nil, fmt.Errorf("cannot acquire lock on file %q: %w", flockFile, err)
|
||||
}
|
||||
return flockF, nil
|
||||
}
|
||||
|
|
|
@ -154,7 +154,7 @@ func (r *ReaderAt) MustClose() {
|
|||
func OpenReaderAt(path string) (*ReaderAt, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot open file %q for reader: %s", path, err)
|
||||
return nil, fmt.Errorf("cannot open file %q for reader: %w", path, err)
|
||||
}
|
||||
var r ReaderAt
|
||||
r.f = f
|
||||
|
@ -162,7 +162,7 @@ func OpenReaderAt(path string) (*ReaderAt, error) {
|
|||
if !*disableMmap {
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in stat: %s", err)
|
||||
return nil, fmt.Errorf("error in stat: %w", err)
|
||||
}
|
||||
size := fi.Size()
|
||||
bm := &pageCacheBitmap{
|
||||
|
@ -178,7 +178,7 @@ func OpenReaderAt(path string) (*ReaderAt, error) {
|
|||
data, err := mmapFile(f, size)
|
||||
if err != nil {
|
||||
MustClose(f)
|
||||
return nil, fmt.Errorf("cannot init reader for %q: %s", path, err)
|
||||
return nil, fmt.Errorf("cannot init reader for %q: %w", path, err)
|
||||
}
|
||||
r.mmapData = data
|
||||
}
|
||||
|
@ -228,7 +228,7 @@ func mmapFile(f *os.File, size int64) ([]byte, error) {
|
|||
}
|
||||
data, err := unix.Mmap(int(f.Fd()), 0, int(size), unix.PROT_READ, unix.MAP_SHARED)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot mmap file with size %d: %s", size, err)
|
||||
return nil, fmt.Errorf("cannot mmap file with size %d: %w", size, err)
|
||||
}
|
||||
return data[:sizeOrig], nil
|
||||
}
|
||||
|
|
|
@ -61,7 +61,7 @@ func (bh *blockHeader) Unmarshal(src []byte) ([]byte, error) {
|
|||
// Unmarshal commonPrefix
|
||||
tail, cp, err := encoding.UnmarshalBytes(src)
|
||||
if err != nil {
|
||||
return tail, fmt.Errorf("cannot unmarshal commonPrefix: %s", err)
|
||||
return tail, fmt.Errorf("cannot unmarshal commonPrefix: %w", err)
|
||||
}
|
||||
bh.commonPrefix = append(bh.commonPrefix[:0], cp...)
|
||||
src = tail
|
||||
|
@ -69,7 +69,7 @@ func (bh *blockHeader) Unmarshal(src []byte) ([]byte, error) {
|
|||
// Unmarshal firstItem
|
||||
tail, fi, err := encoding.UnmarshalBytes(src)
|
||||
if err != nil {
|
||||
return tail, fmt.Errorf("cannot unmarshal firstItem: %s", err)
|
||||
return tail, fmt.Errorf("cannot unmarshal firstItem: %w", err)
|
||||
}
|
||||
bh.firstItem = append(bh.firstItem[:0], fi...)
|
||||
src = tail
|
||||
|
@ -81,7 +81,7 @@ func (bh *blockHeader) Unmarshal(src []byte) ([]byte, error) {
|
|||
bh.marshalType = marshalType(src[0])
|
||||
src = src[1:]
|
||||
if err := checkMarshalType(bh.marshalType); err != nil {
|
||||
return src, fmt.Errorf("unexpected marshalType: %s", err)
|
||||
return src, fmt.Errorf("unexpected marshalType: %w", err)
|
||||
}
|
||||
|
||||
// Unmarshal itemsCount
|
||||
|
@ -148,7 +148,7 @@ func unmarshalBlockHeaders(dst []blockHeader, src []byte, blockHeadersCount int)
|
|||
for i := 0; i < blockHeadersCount; i++ {
|
||||
tail, err := dst[dstLen+i].Unmarshal(src)
|
||||
if err != nil {
|
||||
return dst, fmt.Errorf("cannot unmarshal block header: %s", err)
|
||||
return dst, fmt.Errorf("cannot unmarshal block header: %w", err)
|
||||
}
|
||||
src = tail
|
||||
}
|
||||
|
|
|
@ -131,31 +131,31 @@ func (bsr *blockStreamReader) InitFromFilePart(path string) error {
|
|||
path = filepath.Clean(path)
|
||||
|
||||
if err := bsr.ph.ParseFromPath(path); err != nil {
|
||||
return fmt.Errorf("cannot parse partHeader data from %q: %s", path, err)
|
||||
return fmt.Errorf("cannot parse partHeader data from %q: %w", path, err)
|
||||
}
|
||||
|
||||
metaindexPath := path + "/metaindex.bin"
|
||||
metaindexFile, err := filestream.Open(metaindexPath, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open metaindex file in stream mode: %s", err)
|
||||
return fmt.Errorf("cannot open metaindex file in stream mode: %w", err)
|
||||
}
|
||||
bsr.mrs, err = unmarshalMetaindexRows(bsr.mrs[:0], metaindexFile)
|
||||
metaindexFile.MustClose()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot unmarshal metaindex rows from file %q: %s", metaindexPath, err)
|
||||
return fmt.Errorf("cannot unmarshal metaindex rows from file %q: %w", metaindexPath, err)
|
||||
}
|
||||
|
||||
indexPath := path + "/index.bin"
|
||||
indexFile, err := filestream.Open(indexPath, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open index file in stream mode: %s", err)
|
||||
return fmt.Errorf("cannot open index file in stream mode: %w", err)
|
||||
}
|
||||
|
||||
itemsPath := path + "/items.bin"
|
||||
itemsFile, err := filestream.Open(itemsPath, true)
|
||||
if err != nil {
|
||||
indexFile.MustClose()
|
||||
return fmt.Errorf("cannot open items file in stream mode: %s", err)
|
||||
return fmt.Errorf("cannot open items file in stream mode: %w", err)
|
||||
}
|
||||
|
||||
lensPath := path + "/lens.bin"
|
||||
|
@ -163,7 +163,7 @@ func (bsr *blockStreamReader) InitFromFilePart(path string) error {
|
|||
if err != nil {
|
||||
indexFile.MustClose()
|
||||
itemsFile.MustClose()
|
||||
return fmt.Errorf("cannot open lens file in stream mode: %s", err)
|
||||
return fmt.Errorf("cannot open lens file in stream mode: %w", err)
|
||||
}
|
||||
|
||||
bsr.path = path
|
||||
|
@ -200,7 +200,7 @@ func (bsr *blockStreamReader) Next() bool {
|
|||
err = fmt.Errorf("unexpected last item; got %X; want %X", lastItem, bsr.ph.lastItem)
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("cannot read the next index block: %s", err)
|
||||
err = fmt.Errorf("cannot read the next index block: %w", err)
|
||||
}
|
||||
bsr.err = err
|
||||
return false
|
||||
|
@ -212,18 +212,18 @@ func (bsr *blockStreamReader) Next() bool {
|
|||
|
||||
bsr.sb.itemsData = bytesutil.Resize(bsr.sb.itemsData, int(bsr.bh.itemsBlockSize))
|
||||
if err := fs.ReadFullData(bsr.itemsReader, bsr.sb.itemsData); err != nil {
|
||||
bsr.err = fmt.Errorf("cannot read compressed items block with size %d: %s", bsr.bh.itemsBlockSize, err)
|
||||
bsr.err = fmt.Errorf("cannot read compressed items block with size %d: %w", bsr.bh.itemsBlockSize, err)
|
||||
return false
|
||||
}
|
||||
|
||||
bsr.sb.lensData = bytesutil.Resize(bsr.sb.lensData, int(bsr.bh.lensBlockSize))
|
||||
if err := fs.ReadFullData(bsr.lensReader, bsr.sb.lensData); err != nil {
|
||||
bsr.err = fmt.Errorf("cannot read compressed lens block with size %d: %s", bsr.bh.lensBlockSize, err)
|
||||
bsr.err = fmt.Errorf("cannot read compressed lens block with size %d: %w", bsr.bh.lensBlockSize, err)
|
||||
return false
|
||||
}
|
||||
|
||||
if err := bsr.Block.UnmarshalData(&bsr.sb, bsr.bh.firstItem, bsr.bh.commonPrefix, bsr.bh.itemsCount, bsr.bh.marshalType); err != nil {
|
||||
bsr.err = fmt.Errorf("cannot unmarshal inmemoryBlock from storageBlock with firstItem=%X, commonPrefix=%X, itemsCount=%d, marshalType=%d: %s",
|
||||
bsr.err = fmt.Errorf("cannot unmarshal inmemoryBlock from storageBlock with firstItem=%X, commonPrefix=%X, itemsCount=%d, marshalType=%d: %w",
|
||||
bsr.bh.firstItem, bsr.bh.commonPrefix, bsr.bh.itemsCount, bsr.bh.marshalType, err)
|
||||
return false
|
||||
}
|
||||
|
@ -260,14 +260,14 @@ func (bsr *blockStreamReader) readNextBHS() error {
|
|||
// Read compressed index block.
|
||||
bsr.packedBuf = bytesutil.Resize(bsr.packedBuf, int(mr.indexBlockSize))
|
||||
if err := fs.ReadFullData(bsr.indexReader, bsr.packedBuf); err != nil {
|
||||
return fmt.Errorf("cannot read compressed index block with size %d: %s", mr.indexBlockSize, err)
|
||||
return fmt.Errorf("cannot read compressed index block with size %d: %w", mr.indexBlockSize, err)
|
||||
}
|
||||
|
||||
// Unpack the compressed index block.
|
||||
var err error
|
||||
bsr.unpackedBuf, err = encoding.DecompressZSTD(bsr.unpackedBuf[:0], bsr.packedBuf)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot decompress index block with size %d: %s", mr.indexBlockSize, err)
|
||||
return fmt.Errorf("cannot decompress index block with size %d: %w", mr.indexBlockSize, err)
|
||||
}
|
||||
|
||||
// Unmarshal the unpacked index block into bsr.bhs.
|
||||
|
@ -280,7 +280,7 @@ func (bsr *blockStreamReader) readNextBHS() error {
|
|||
for i := 0; i < int(mr.blockHeadersCount); i++ {
|
||||
tail, err := bsr.bhs[i].Unmarshal(b)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot unmarshal blockHeader #%d in the index block #%d: %s", len(bsr.bhs), bsr.mrIdx, err)
|
||||
return fmt.Errorf("cannot unmarshal blockHeader #%d in the index block #%d: %w", len(bsr.bhs), bsr.mrIdx, err)
|
||||
}
|
||||
b = tail
|
||||
}
|
||||
|
|
|
@ -84,7 +84,7 @@ func (bsw *blockStreamWriter) InitFromFilePart(path string, nocache bool, compre
|
|||
|
||||
// Create the directory
|
||||
if err := fs.MkdirAllFailIfExist(path); err != nil {
|
||||
return fmt.Errorf("cannot create directory %q: %s", path, err)
|
||||
return fmt.Errorf("cannot create directory %q: %w", path, err)
|
||||
}
|
||||
|
||||
// Create part files in the directory.
|
||||
|
@ -95,7 +95,7 @@ func (bsw *blockStreamWriter) InitFromFilePart(path string, nocache bool, compre
|
|||
metaindexFile, err := filestream.Create(metaindexPath, false)
|
||||
if err != nil {
|
||||
fs.MustRemoveAll(path)
|
||||
return fmt.Errorf("cannot create metaindex file: %s", err)
|
||||
return fmt.Errorf("cannot create metaindex file: %w", err)
|
||||
}
|
||||
|
||||
indexPath := path + "/index.bin"
|
||||
|
@ -103,7 +103,7 @@ func (bsw *blockStreamWriter) InitFromFilePart(path string, nocache bool, compre
|
|||
if err != nil {
|
||||
metaindexFile.MustClose()
|
||||
fs.MustRemoveAll(path)
|
||||
return fmt.Errorf("cannot create index file: %s", err)
|
||||
return fmt.Errorf("cannot create index file: %w", err)
|
||||
}
|
||||
|
||||
itemsPath := path + "/items.bin"
|
||||
|
@ -112,7 +112,7 @@ func (bsw *blockStreamWriter) InitFromFilePart(path string, nocache bool, compre
|
|||
metaindexFile.MustClose()
|
||||
indexFile.MustClose()
|
||||
fs.MustRemoveAll(path)
|
||||
return fmt.Errorf("cannot create items file: %s", err)
|
||||
return fmt.Errorf("cannot create items file: %w", err)
|
||||
}
|
||||
|
||||
lensPath := path + "/lens.bin"
|
||||
|
@ -122,7 +122,7 @@ func (bsw *blockStreamWriter) InitFromFilePart(path string, nocache bool, compre
|
|||
indexFile.MustClose()
|
||||
itemsFile.MustClose()
|
||||
fs.MustRemoveAll(path)
|
||||
return fmt.Errorf("cannot create lens file: %s", err)
|
||||
return fmt.Errorf("cannot create lens file: %w", err)
|
||||
}
|
||||
|
||||
bsw.reset()
|
||||
|
|
|
@ -267,7 +267,7 @@ func (ib *inmemoryBlock) UnmarshalData(sb *storageBlock, firstItem, commonPrefix
|
|||
switch mt {
|
||||
case marshalTypePlain:
|
||||
if err := ib.unmarshalDataPlain(sb, firstItem, itemsCount); err != nil {
|
||||
return fmt.Errorf("cannot unmarshal plain data: %s", err)
|
||||
return fmt.Errorf("cannot unmarshal plain data: %w", err)
|
||||
}
|
||||
if !ib.isSorted() {
|
||||
return fmt.Errorf("plain data block contains unsorted items; items:\n%s", ib.debugItemsString())
|
||||
|
@ -289,7 +289,7 @@ func (ib *inmemoryBlock) UnmarshalData(sb *storageBlock, firstItem, commonPrefix
|
|||
// Unmarshal lens data.
|
||||
bb.B, err = encoding.DecompressZSTD(bb.B[:0], sb.lensData)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot decompress lensData: %s", err)
|
||||
return fmt.Errorf("cannot decompress lensData: %w", err)
|
||||
}
|
||||
|
||||
lb := getLensBuffer(int(2 * itemsCount))
|
||||
|
@ -304,7 +304,7 @@ func (ib *inmemoryBlock) UnmarshalData(sb *storageBlock, firstItem, commonPrefix
|
|||
// Unmarshal prefixLens
|
||||
tail, err := encoding.UnmarshalVarUint64s(is.A, bb.B)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot unmarshal prefixLens from lensData: %s", err)
|
||||
return fmt.Errorf("cannot unmarshal prefixLens from lensData: %w", err)
|
||||
}
|
||||
prefixLens[0] = 0
|
||||
for i, xLen := range is.A {
|
||||
|
@ -314,7 +314,7 @@ func (ib *inmemoryBlock) UnmarshalData(sb *storageBlock, firstItem, commonPrefix
|
|||
// Unmarshal lens
|
||||
tail, err = encoding.UnmarshalVarUint64s(is.A, tail)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot unmarshal lens from lensData: %s", err)
|
||||
return fmt.Errorf("cannot unmarshal lens from lensData: %w", err)
|
||||
}
|
||||
if len(tail) > 0 {
|
||||
return fmt.Errorf("unexpected tail left unmarshaling %d lens; tail size=%d; contents=%X", itemsCount, len(tail), tail)
|
||||
|
@ -331,7 +331,7 @@ func (ib *inmemoryBlock) UnmarshalData(sb *storageBlock, firstItem, commonPrefix
|
|||
// Unmarshal items data.
|
||||
bb.B, err = encoding.DecompressZSTD(bb.B[:0], sb.itemsData)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot decompress lensData: %s", err)
|
||||
return fmt.Errorf("cannot decompress lensData: %w", err)
|
||||
}
|
||||
data := bytesutil.Resize(ib.data, maxInmemoryBlockSize)
|
||||
if n := int(itemsCount) - cap(ib.items); n > 0 {
|
||||
|
|
|
@ -30,7 +30,7 @@ type PrepareBlockCallback func(data []byte, items [][]byte) ([]byte, [][]byte)
|
|||
func mergeBlockStreams(ph *partHeader, bsw *blockStreamWriter, bsrs []*blockStreamReader, prepareBlock PrepareBlockCallback, stopCh <-chan struct{}, itemsMerged *uint64) error {
|
||||
bsm := bsmPool.Get().(*blockStreamMerger)
|
||||
if err := bsm.Init(bsrs, prepareBlock); err != nil {
|
||||
return fmt.Errorf("cannot initialize blockStreamMerger: %s", err)
|
||||
return fmt.Errorf("cannot initialize blockStreamMerger: %w", err)
|
||||
}
|
||||
err := bsm.Merge(bsw, ph, stopCh, itemsMerged)
|
||||
bsm.reset()
|
||||
|
@ -42,7 +42,7 @@ func mergeBlockStreams(ph *partHeader, bsw *blockStreamWriter, bsrs []*blockStre
|
|||
if err == errForciblyStopped {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("cannot merge %d block streams: %s: %s", len(bsrs), bsrs, err)
|
||||
return fmt.Errorf("cannot merge %d block streams: %s: %w", len(bsrs), bsrs, err)
|
||||
}
|
||||
|
||||
var bsmPool = &sync.Pool{
|
||||
|
@ -88,7 +88,7 @@ func (bsm *blockStreamMerger) Init(bsrs []*blockStreamReader, prepareBlock Prepa
|
|||
}
|
||||
|
||||
if err := bsr.Error(); err != nil {
|
||||
return fmt.Errorf("cannot obtain the next block from blockStreamReader %q: %s", bsr.path, err)
|
||||
return fmt.Errorf("cannot obtain the next block from blockStreamReader %q: %w", bsr.path, err)
|
||||
}
|
||||
}
|
||||
heap.Init(&bsm.bsrHeap)
|
||||
|
@ -143,7 +143,7 @@ again:
|
|||
goto again
|
||||
}
|
||||
if err := bsr.Error(); err != nil {
|
||||
return fmt.Errorf("cannot read storageBlock: %s", err)
|
||||
return fmt.Errorf("cannot read storageBlock: %w", err)
|
||||
}
|
||||
goto again
|
||||
}
|
||||
|
|
|
@ -121,7 +121,7 @@ func testMergeBlockStreamsSerial(blocksToMerge, maxItemsPerBlock int) error {
|
|||
var bsw blockStreamWriter
|
||||
bsw.InitFromInmemoryPart(&dstIP)
|
||||
if err := mergeBlockStreams(&dstIP.ph, &bsw, bsrs, nil, nil, &itemsMerged); err != nil {
|
||||
return fmt.Errorf("cannot merge block streams: %s", err)
|
||||
return fmt.Errorf("cannot merge block streams: %w", err)
|
||||
}
|
||||
if itemsMerged != uint64(len(items)) {
|
||||
return fmt.Errorf("unexpected itemsMerged; got %d; want %d", itemsMerged, len(items))
|
||||
|
@ -130,7 +130,7 @@ func testMergeBlockStreamsSerial(blocksToMerge, maxItemsPerBlock int) error {
|
|||
// Verify the resulting part (dstIP) contains all the items
|
||||
// in the correct order.
|
||||
if err := testCheckItems(&dstIP, items); err != nil {
|
||||
return fmt.Errorf("error checking items: %s", err)
|
||||
return fmt.Errorf("error checking items: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -164,7 +164,7 @@ func testCheckItems(dstIP *inmemoryPart, items []string) error {
|
|||
}
|
||||
}
|
||||
if err := dstBsr.Error(); err != nil {
|
||||
return fmt.Errorf("unexpected error in dstBsr: %s", err)
|
||||
return fmt.Errorf("unexpected error in dstBsr: %w", err)
|
||||
}
|
||||
if !reflect.DeepEqual(items, dstItems) {
|
||||
return fmt.Errorf("unequal items\ngot\n%q\nwant\n%q", dstItems, items)
|
||||
|
|
|
@ -44,7 +44,7 @@ func (mr *metaindexRow) Unmarshal(src []byte) ([]byte, error) {
|
|||
// Unmarshal firstItem
|
||||
tail, fi, err := encoding.UnmarshalBytes(src)
|
||||
if err != nil {
|
||||
return tail, fmt.Errorf("cannot unmarshal firstItem: %s", err)
|
||||
return tail, fmt.Errorf("cannot unmarshal firstItem: %w", err)
|
||||
}
|
||||
mr.firstItem = append(mr.firstItem[:0], fi...)
|
||||
src = tail
|
||||
|
@ -85,11 +85,11 @@ func unmarshalMetaindexRows(dst []metaindexRow, r io.Reader) ([]metaindexRow, er
|
|||
// since it is quite small.
|
||||
compressedData, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return dst, fmt.Errorf("cannot read metaindex data: %s", err)
|
||||
return dst, fmt.Errorf("cannot read metaindex data: %w", err)
|
||||
}
|
||||
data, err := encoding.DecompressZSTD(nil, compressedData)
|
||||
if err != nil {
|
||||
return dst, fmt.Errorf("cannot decompress metaindex data with size %d bytes: %s", len(compressedData), err)
|
||||
return dst, fmt.Errorf("cannot decompress metaindex data with size %d bytes: %w", len(compressedData), err)
|
||||
}
|
||||
|
||||
dstLen := len(dst)
|
||||
|
@ -102,7 +102,7 @@ func unmarshalMetaindexRows(dst []metaindexRow, r io.Reader) ([]metaindexRow, er
|
|||
mr := &dst[len(dst)-1]
|
||||
tail, err := mr.Unmarshal(data)
|
||||
if err != nil {
|
||||
return dst, fmt.Errorf("cannot unmarshal metaindexRow #%d from metaindex data: %s", len(dst)-dstLen, err)
|
||||
return dst, fmt.Errorf("cannot unmarshal metaindexRow #%d from metaindex data: %w", len(dst)-dstLen, err)
|
||||
}
|
||||
data = tail
|
||||
}
|
||||
|
|
|
@ -67,13 +67,13 @@ func openFilePart(path string) (*part, error) {
|
|||
|
||||
var ph partHeader
|
||||
if err := ph.ParseFromPath(path); err != nil {
|
||||
return nil, fmt.Errorf("cannot parse path to part: %s", err)
|
||||
return nil, fmt.Errorf("cannot parse path to part: %w", err)
|
||||
}
|
||||
|
||||
metaindexPath := path + "/metaindex.bin"
|
||||
metaindexFile, err := filestream.Open(metaindexPath, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot open %q: %s", metaindexPath, err)
|
||||
return nil, fmt.Errorf("cannot open %q: %w", metaindexPath, err)
|
||||
}
|
||||
metaindexSize := fs.MustFileSize(metaindexPath)
|
||||
|
||||
|
@ -81,7 +81,7 @@ func openFilePart(path string) (*part, error) {
|
|||
indexFile, err := fs.OpenReaderAt(indexPath)
|
||||
if err != nil {
|
||||
metaindexFile.MustClose()
|
||||
return nil, fmt.Errorf("cannot open %q: %s", indexPath, err)
|
||||
return nil, fmt.Errorf("cannot open %q: %w", indexPath, err)
|
||||
}
|
||||
indexSize := fs.MustFileSize(indexPath)
|
||||
|
||||
|
@ -90,7 +90,7 @@ func openFilePart(path string) (*part, error) {
|
|||
if err != nil {
|
||||
metaindexFile.MustClose()
|
||||
indexFile.MustClose()
|
||||
return nil, fmt.Errorf("cannot open %q: %s", itemsPath, err)
|
||||
return nil, fmt.Errorf("cannot open %q: %w", itemsPath, err)
|
||||
}
|
||||
itemsSize := fs.MustFileSize(itemsPath)
|
||||
|
||||
|
@ -100,7 +100,7 @@ func openFilePart(path string) (*part, error) {
|
|||
metaindexFile.MustClose()
|
||||
indexFile.MustClose()
|
||||
itemsFile.MustClose()
|
||||
return nil, fmt.Errorf("cannot open %q: %s", lensPath, err)
|
||||
return nil, fmt.Errorf("cannot open %q: %w", lensPath, err)
|
||||
}
|
||||
lensSize := fs.MustFileSize(lensPath)
|
||||
|
||||
|
@ -112,7 +112,7 @@ func newPart(ph *partHeader, path string, size uint64, metaindexReader filestrea
|
|||
var errors []error
|
||||
mrs, err := unmarshalMetaindexRows(nil, metaindexReader)
|
||||
if err != nil {
|
||||
errors = append(errors, fmt.Errorf("cannot unmarshal metaindexRows: %s", err))
|
||||
errors = append(errors, fmt.Errorf("cannot unmarshal metaindexRows: %w", err))
|
||||
}
|
||||
metaindexReader.MustClose()
|
||||
|
||||
|
@ -131,7 +131,7 @@ func newPart(ph *partHeader, path string, size uint64, metaindexReader filestrea
|
|||
|
||||
if len(errors) > 0 {
|
||||
// Return only the first error, since it has no sense in returning all errors.
|
||||
err := fmt.Errorf("error opening part %s: %s", p.path, errors[0])
|
||||
err := fmt.Errorf("error opening part %s: %w", p.path, errors[0])
|
||||
p.MustClose()
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ func (hs *hexString) UnmarshalJSON(data []byte) error {
|
|||
data = data[1 : len(data)-1]
|
||||
b, err := hex.DecodeString(string(data))
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot hex-decode %q: %s", data, err)
|
||||
return fmt.Errorf("cannot hex-decode %q: %w", data, err)
|
||||
}
|
||||
*hs = b
|
||||
return nil
|
||||
|
@ -101,7 +101,7 @@ func (ph *partHeader) ParseFromPath(partPath string) error {
|
|||
// Read itemsCount from partName.
|
||||
itemsCount, err := strconv.ParseUint(a[0], 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse itemsCount from partName %q: %s", partName, err)
|
||||
return fmt.Errorf("cannot parse itemsCount from partName %q: %w", partName, err)
|
||||
}
|
||||
ph.itemsCount = itemsCount
|
||||
if ph.itemsCount <= 0 {
|
||||
|
@ -111,7 +111,7 @@ func (ph *partHeader) ParseFromPath(partPath string) error {
|
|||
// Read blocksCount from partName.
|
||||
blocksCount, err := strconv.ParseUint(a[1], 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse blocksCount from partName %q: %s", partName, err)
|
||||
return fmt.Errorf("cannot parse blocksCount from partName %q: %w", partName, err)
|
||||
}
|
||||
ph.blocksCount = blocksCount
|
||||
if ph.blocksCount <= 0 {
|
||||
|
@ -126,12 +126,12 @@ func (ph *partHeader) ParseFromPath(partPath string) error {
|
|||
metadataPath := partPath + "/metadata.json"
|
||||
metadata, err := ioutil.ReadFile(metadataPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read %q: %s", metadataPath, err)
|
||||
return fmt.Errorf("cannot read %q: %w", metadataPath, err)
|
||||
}
|
||||
|
||||
var phj partHeaderJSON
|
||||
if err := json.Unmarshal(metadata, &phj); err != nil {
|
||||
return fmt.Errorf("cannot parse %q: %s", metadataPath, err)
|
||||
return fmt.Errorf("cannot parse %q: %w", metadataPath, err)
|
||||
}
|
||||
if ph.itemsCount != phj.ItemsCount {
|
||||
return fmt.Errorf("invalid ItemsCount in %q; got %d; want %d", metadataPath, phj.ItemsCount, ph.itemsCount)
|
||||
|
@ -161,11 +161,11 @@ func (ph *partHeader) WriteMetadata(partPath string) error {
|
|||
}
|
||||
metadata, err := json.MarshalIndent(&phj, "", "\t")
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot marshal metadata: %s", err)
|
||||
return fmt.Errorf("cannot marshal metadata: %w", err)
|
||||
}
|
||||
metadataPath := partPath + "/metadata.json"
|
||||
if err := fs.WriteFileAtomically(metadataPath, metadata); err != nil {
|
||||
return fmt.Errorf("cannot create %q: %s", metadataPath, err)
|
||||
return fmt.Errorf("cannot create %q: %w", metadataPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -279,7 +279,7 @@ func (ps *partSearch) nextBHS() error {
|
|||
var err error
|
||||
idxb, err = ps.readIndexBlock(mr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read index block: %s", err)
|
||||
return fmt.Errorf("cannot read index block: %w", err)
|
||||
}
|
||||
ps.idxbCache.Put(idxbKey, idxb)
|
||||
}
|
||||
|
@ -294,12 +294,12 @@ func (ps *partSearch) readIndexBlock(mr *metaindexRow) (*indexBlock, error) {
|
|||
var err error
|
||||
ps.indexBuf, err = encoding.DecompressZSTD(ps.indexBuf[:0], ps.compressedIndexBuf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot decompress index block with size %d bytes: %s", len(ps.compressedIndexBuf), err)
|
||||
return nil, fmt.Errorf("cannot decompress index block with size %d bytes: %w", len(ps.compressedIndexBuf), err)
|
||||
}
|
||||
idxb := getIndexBlock()
|
||||
idxb.bhs, err = unmarshalBlockHeaders(idxb.bhs[:0], ps.indexBuf, int(mr.blockHeadersCount))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal block headers from index block (offset=%d, size=%d): %s", mr.indexBlockOffset, mr.indexBlockSize, err)
|
||||
return nil, fmt.Errorf("cannot unmarshal block headers from index block (offset=%d, size=%d): %w", mr.indexBlockOffset, mr.indexBlockSize, err)
|
||||
}
|
||||
return idxb, nil
|
||||
}
|
||||
|
@ -340,7 +340,7 @@ func (ps *partSearch) readInmemoryBlock(bh *blockHeader) (*inmemoryBlock, error)
|
|||
|
||||
ib := getInmemoryBlock()
|
||||
if err := ib.UnmarshalData(&ps.sb, bh.firstItem, bh.commonPrefix, bh.itemsCount, bh.marshalType); err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal storage block with %d items: %s", bh.itemsCount, err)
|
||||
return nil, fmt.Errorf("cannot unmarshal storage block with %d items: %w", bh.itemsCount, err)
|
||||
}
|
||||
|
||||
return ib, nil
|
||||
|
|
|
@ -72,7 +72,7 @@ func testPartSearchSerial(p *part, items []string) error {
|
|||
return fmt.Errorf("unexpected item found past the end of all the items: %X", ps.Item)
|
||||
}
|
||||
if err := ps.Error(); err != nil {
|
||||
return fmt.Errorf("unexpected error: %s", err)
|
||||
return fmt.Errorf("unexpected error: %w", err)
|
||||
}
|
||||
|
||||
// Search for the item bigger than the items[len(items)-1]
|
||||
|
@ -83,7 +83,7 @@ func testPartSearchSerial(p *part, items []string) error {
|
|||
return fmt.Errorf("unexpected item found: %X; want nothing", ps.Item)
|
||||
}
|
||||
if err := ps.Error(); err != nil {
|
||||
return fmt.Errorf("unexpected error when searching past the last item: %s", err)
|
||||
return fmt.Errorf("unexpected error when searching past the last item: %w", err)
|
||||
}
|
||||
|
||||
// Search for inner items
|
||||
|
@ -107,7 +107,7 @@ func testPartSearchSerial(p *part, items []string) error {
|
|||
return fmt.Errorf("unexpected item found past the end of all the items for idx %d out of %d items; loop %d: got %X", n, len(items), loop, ps.Item)
|
||||
}
|
||||
if err := ps.Error(); err != nil {
|
||||
return fmt.Errorf("unexpected error on loop %d: %s", loop, err)
|
||||
return fmt.Errorf("unexpected error on loop %d: %w", loop, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -121,7 +121,7 @@ func testPartSearchSerial(p *part, items []string) error {
|
|||
return fmt.Errorf("unexpected item found at position %d: got %X; want %X", i, ps.Item, item)
|
||||
}
|
||||
if err := ps.Error(); err != nil {
|
||||
return fmt.Errorf("unexpected error when searching for items[%d]=%X: %s", i, item, err)
|
||||
return fmt.Errorf("unexpected error when searching for items[%d]=%X: %w", i, item, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -136,7 +136,7 @@ func testPartSearchSerial(p *part, items []string) error {
|
|||
return fmt.Errorf("unexpected item found at position %d: got %X; want %X", i, ps.Item, item)
|
||||
}
|
||||
if err := ps.Error(); err != nil {
|
||||
return fmt.Errorf("unexpected error when searching for items[%d]=%X: %s", i, item, err)
|
||||
return fmt.Errorf("unexpected error when searching for items[%d]=%X: %w", i, item, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -151,7 +151,7 @@ func newTestPart(blocksCount, maxItemsPerBlock int) (*part, []string, error) {
|
|||
var bsw blockStreamWriter
|
||||
bsw.InitFromInmemoryPart(&ip)
|
||||
if err := mergeBlockStreams(&ip.ph, &bsw, bsrs, nil, nil, &itemsMerged); err != nil {
|
||||
return nil, nil, fmt.Errorf("cannot merge blocks: %s", err)
|
||||
return nil, nil, fmt.Errorf("cannot merge blocks: %w", err)
|
||||
}
|
||||
if itemsMerged != uint64(len(items)) {
|
||||
return nil, nil, fmt.Errorf("unexpected itemsMerged; got %d; want %d", itemsMerged, len(items))
|
||||
|
@ -159,7 +159,7 @@ func newTestPart(blocksCount, maxItemsPerBlock int) (*part, []string, error) {
|
|||
size := ip.size()
|
||||
p, err := newPart(&ip.ph, "partName", size, ip.metaindexData.NewReader(), &ip.indexData, &ip.itemsData, &ip.lensData)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("cannot create part: %s", err)
|
||||
return nil, nil, fmt.Errorf("cannot create part: %w", err)
|
||||
}
|
||||
return p, items, nil
|
||||
}
|
||||
|
|
|
@ -169,7 +169,7 @@ func OpenTable(path string, flushCallback func(), prepareBlock PrepareBlockCallb
|
|||
|
||||
// Create a directory for the table if it doesn't exist yet.
|
||||
if err := fs.MkdirAllIfNotExist(path); err != nil {
|
||||
return nil, fmt.Errorf("cannot create directory %q: %s", path, err)
|
||||
return nil, fmt.Errorf("cannot create directory %q: %w", path, err)
|
||||
}
|
||||
|
||||
// Protect from concurrent opens.
|
||||
|
@ -181,7 +181,7 @@ func OpenTable(path string, flushCallback func(), prepareBlock PrepareBlockCallb
|
|||
// Open table parts.
|
||||
pws, err := openParts(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot open table parts at %q: %s", path, err)
|
||||
return nil, fmt.Errorf("cannot open table parts at %q: %w", path, err)
|
||||
}
|
||||
|
||||
tb := &Table{
|
||||
|
@ -481,13 +481,13 @@ func (tb *Table) convertToV1280() {
|
|||
func (tb *Table) mergePartsOptimal(pws []*partWrapper, stopCh <-chan struct{}) error {
|
||||
for len(pws) > defaultPartsToMerge {
|
||||
if err := tb.mergeParts(pws[:defaultPartsToMerge], stopCh, false); err != nil {
|
||||
return fmt.Errorf("cannot merge %d parts: %s", defaultPartsToMerge, err)
|
||||
return fmt.Errorf("cannot merge %d parts: %w", defaultPartsToMerge, err)
|
||||
}
|
||||
pws = pws[defaultPartsToMerge:]
|
||||
}
|
||||
if len(pws) > 0 {
|
||||
if err := tb.mergeParts(pws, stopCh, false); err != nil {
|
||||
return fmt.Errorf("cannot merge %d parts: %s", len(pws), err)
|
||||
return fmt.Errorf("cannot merge %d parts: %w", len(pws), err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -761,7 +761,7 @@ func (tb *Table) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isOuterP
|
|||
bsr.InitFromInmemoryPart(pw.mp)
|
||||
} else {
|
||||
if err := bsr.InitFromFilePart(pw.p.path); err != nil {
|
||||
return fmt.Errorf("cannot open source part for merging: %s", err)
|
||||
return fmt.Errorf("cannot open source part for merging: %w", err)
|
||||
}
|
||||
}
|
||||
bsrs = append(bsrs, bsr)
|
||||
|
@ -786,7 +786,7 @@ func (tb *Table) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isOuterP
|
|||
bsw := getBlockStreamWriter()
|
||||
compressLevel := getCompressLevelForPartItems(outItemsCount, outBlocksCount)
|
||||
if err := bsw.InitFromFilePart(tmpPartPath, nocache, compressLevel); err != nil {
|
||||
return fmt.Errorf("cannot create destination part %q: %s", tmpPartPath, err)
|
||||
return fmt.Errorf("cannot create destination part %q: %w", tmpPartPath, err)
|
||||
}
|
||||
|
||||
// Merge parts into a temporary location.
|
||||
|
@ -797,10 +797,10 @@ func (tb *Table) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isOuterP
|
|||
if err == errForciblyStopped {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("error when merging parts to %q: %s", tmpPartPath, err)
|
||||
return fmt.Errorf("error when merging parts to %q: %w", tmpPartPath, err)
|
||||
}
|
||||
if err := ph.WriteMetadata(tmpPartPath); err != nil {
|
||||
return fmt.Errorf("cannot write metadata to destination part %q: %s", tmpPartPath, err)
|
||||
return fmt.Errorf("cannot write metadata to destination part %q: %w", tmpPartPath, err)
|
||||
}
|
||||
|
||||
// Close bsrs (aka source parts).
|
||||
|
@ -821,18 +821,18 @@ func (tb *Table) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isOuterP
|
|||
fmt.Fprintf(&bb, "%s -> %s\n", tmpPartPath, dstPartPath)
|
||||
txnPath := fmt.Sprintf("%s/txn/%016X", tb.path, mergeIdx)
|
||||
if err := fs.WriteFileAtomically(txnPath, bb.B); err != nil {
|
||||
return fmt.Errorf("cannot create transaction file %q: %s", txnPath, err)
|
||||
return fmt.Errorf("cannot create transaction file %q: %w", txnPath, err)
|
||||
}
|
||||
|
||||
// Run the created transaction.
|
||||
if err := runTransaction(&tb.snapshotLock, tb.path, txnPath); err != nil {
|
||||
return fmt.Errorf("cannot execute transaction %q: %s", txnPath, err)
|
||||
return fmt.Errorf("cannot execute transaction %q: %w", txnPath, err)
|
||||
}
|
||||
|
||||
// Open the merged part.
|
||||
newP, err := openFilePart(dstPartPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open merged part %q: %s", dstPartPath, err)
|
||||
return fmt.Errorf("cannot open merged part %q: %w", dstPartPath, err)
|
||||
}
|
||||
newPSize := newP.size
|
||||
newPW := &partWrapper{
|
||||
|
@ -950,7 +950,7 @@ func openParts(path string) ([]*partWrapper, error) {
|
|||
}
|
||||
d, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot open difrectory: %s", err)
|
||||
return nil, fmt.Errorf("cannot open difrectory: %w", err)
|
||||
}
|
||||
defer fs.MustClose(d)
|
||||
|
||||
|
@ -958,19 +958,19 @@ func openParts(path string) ([]*partWrapper, error) {
|
|||
// Snapshots cannot be created yet, so use fakeSnapshotLock.
|
||||
var fakeSnapshotLock sync.RWMutex
|
||||
if err := runTransactions(&fakeSnapshotLock, path); err != nil {
|
||||
return nil, fmt.Errorf("cannot run transactions: %s", err)
|
||||
return nil, fmt.Errorf("cannot run transactions: %w", err)
|
||||
}
|
||||
|
||||
txnDir := path + "/txn"
|
||||
fs.MustRemoveAll(txnDir)
|
||||
if err := fs.MkdirAllFailIfExist(txnDir); err != nil {
|
||||
return nil, fmt.Errorf("cannot create %q: %s", txnDir, err)
|
||||
return nil, fmt.Errorf("cannot create %q: %w", txnDir, err)
|
||||
}
|
||||
|
||||
tmpDir := path + "/tmp"
|
||||
fs.MustRemoveAll(tmpDir)
|
||||
if err := fs.MkdirAllFailIfExist(tmpDir); err != nil {
|
||||
return nil, fmt.Errorf("cannot create %q: %s", tmpDir, err)
|
||||
return nil, fmt.Errorf("cannot create %q: %w", tmpDir, err)
|
||||
}
|
||||
|
||||
fs.MustSyncPath(path)
|
||||
|
@ -978,7 +978,7 @@ func openParts(path string) ([]*partWrapper, error) {
|
|||
// Open parts.
|
||||
fis, err := d.Readdir(-1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read directory: %s", err)
|
||||
return nil, fmt.Errorf("cannot read directory: %w", err)
|
||||
}
|
||||
var pws []*partWrapper
|
||||
for _, fi := range fis {
|
||||
|
@ -995,7 +995,7 @@ func openParts(path string) ([]*partWrapper, error) {
|
|||
p, err := openFilePart(partPath)
|
||||
if err != nil {
|
||||
mustCloseParts(pws)
|
||||
return nil, fmt.Errorf("cannot open part %q: %s", partPath, err)
|
||||
return nil, fmt.Errorf("cannot open part %q: %w", partPath, err)
|
||||
}
|
||||
pw := &partWrapper{
|
||||
p: p,
|
||||
|
@ -1028,11 +1028,11 @@ func (tb *Table) CreateSnapshotAt(dstDir string) error {
|
|||
srcDir := tb.path
|
||||
srcDir, err = filepath.Abs(srcDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain absolute dir for %q: %s", srcDir, err)
|
||||
return fmt.Errorf("cannot obtain absolute dir for %q: %w", srcDir, err)
|
||||
}
|
||||
dstDir, err = filepath.Abs(dstDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain absolute dir for %q: %s", dstDir, err)
|
||||
return fmt.Errorf("cannot obtain absolute dir for %q: %w", dstDir, err)
|
||||
}
|
||||
if strings.HasPrefix(dstDir, srcDir+"/") {
|
||||
return fmt.Errorf("cannot create snapshot %q inside the data dir %q", dstDir, srcDir)
|
||||
|
@ -1047,18 +1047,18 @@ func (tb *Table) CreateSnapshotAt(dstDir string) error {
|
|||
defer tb.snapshotLock.Unlock()
|
||||
|
||||
if err := fs.MkdirAllFailIfExist(dstDir); err != nil {
|
||||
return fmt.Errorf("cannot create snapshot dir %q: %s", dstDir, err)
|
||||
return fmt.Errorf("cannot create snapshot dir %q: %w", dstDir, err)
|
||||
}
|
||||
|
||||
d, err := os.Open(srcDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open difrectory: %s", err)
|
||||
return fmt.Errorf("cannot open difrectory: %w", err)
|
||||
}
|
||||
defer fs.MustClose(d)
|
||||
|
||||
fis, err := d.Readdir(-1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read directory: %s", err)
|
||||
return fmt.Errorf("cannot read directory: %w", err)
|
||||
}
|
||||
for _, fi := range fis {
|
||||
fn := fi.Name()
|
||||
|
@ -1068,7 +1068,7 @@ func (tb *Table) CreateSnapshotAt(dstDir string) error {
|
|||
srcPath := srcDir + "/" + fn
|
||||
dstPath := dstDir + "/" + fn
|
||||
if err := os.Link(srcPath, dstPath); err != nil {
|
||||
return fmt.Errorf("cannot hard link from %q to %q: %s", srcPath, dstPath, err)
|
||||
return fmt.Errorf("cannot hard link from %q to %q: %w", srcPath, dstPath, err)
|
||||
}
|
||||
default:
|
||||
// Skip other non-directories.
|
||||
|
@ -1082,7 +1082,7 @@ func (tb *Table) CreateSnapshotAt(dstDir string) error {
|
|||
srcPartPath := srcDir + "/" + fn
|
||||
dstPartPath := dstDir + "/" + fn
|
||||
if err := fs.HardLinkFiles(srcPartPath, dstPartPath); err != nil {
|
||||
return fmt.Errorf("cannot create hard links from %q to %q: %s", srcPartPath, dstPartPath, err)
|
||||
return fmt.Errorf("cannot create hard links from %q to %q: %w", srcPartPath, dstPartPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1107,13 +1107,13 @@ func runTransactions(txnLock *sync.RWMutex, path string) error {
|
|||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("cannot open %q: %s", txnDir, err)
|
||||
return fmt.Errorf("cannot open %q: %w", txnDir, err)
|
||||
}
|
||||
defer fs.MustClose(d)
|
||||
|
||||
fis, err := d.Readdir(-1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read directory %q: %s", d.Name(), err)
|
||||
return fmt.Errorf("cannot read directory %q: %w", d.Name(), err)
|
||||
}
|
||||
|
||||
// Sort transaction files by id, since transactions must be ordered.
|
||||
|
@ -1129,7 +1129,7 @@ func runTransactions(txnLock *sync.RWMutex, path string) error {
|
|||
}
|
||||
txnPath := txnDir + "/" + fn
|
||||
if err := runTransaction(txnLock, path, txnPath); err != nil {
|
||||
return fmt.Errorf("cannot run transaction from %q: %s", txnPath, err)
|
||||
return fmt.Errorf("cannot run transaction from %q: %w", txnPath, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -1143,7 +1143,7 @@ func runTransaction(txnLock *sync.RWMutex, pathPrefix, txnPath string) error {
|
|||
|
||||
data, err := ioutil.ReadFile(txnPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read transaction file: %s", err)
|
||||
return fmt.Errorf("cannot read transaction file: %w", err)
|
||||
}
|
||||
if len(data) > 0 && data[len(data)-1] == '\n' {
|
||||
data = data[:len(data)-1]
|
||||
|
@ -1164,7 +1164,7 @@ func runTransaction(txnLock *sync.RWMutex, pathPrefix, txnPath string) error {
|
|||
for _, path := range rmPaths {
|
||||
path, err := validatePath(pathPrefix, path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid path to remove: %s", err)
|
||||
return fmt.Errorf("invalid path to remove: %w", err)
|
||||
}
|
||||
removeWG.Add(1)
|
||||
fs.MustRemoveAllWithDoneCallback(path, removeWG.Done)
|
||||
|
@ -1175,15 +1175,15 @@ func runTransaction(txnLock *sync.RWMutex, pathPrefix, txnPath string) error {
|
|||
dstPath := mvPaths[1]
|
||||
srcPath, err = validatePath(pathPrefix, srcPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid source path to rename: %s", err)
|
||||
return fmt.Errorf("invalid source path to rename: %w", err)
|
||||
}
|
||||
dstPath, err = validatePath(pathPrefix, dstPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid destination path to rename: %s", err)
|
||||
return fmt.Errorf("invalid destination path to rename: %w", err)
|
||||
}
|
||||
if fs.IsPathExist(srcPath) {
|
||||
if err := os.Rename(srcPath, dstPath); err != nil {
|
||||
return fmt.Errorf("cannot rename %q to %q: %s", srcPath, dstPath, err)
|
||||
return fmt.Errorf("cannot rename %q to %q: %w", srcPath, dstPath, err)
|
||||
}
|
||||
} else if !fs.IsPathExist(dstPath) {
|
||||
// Emit info message for the expected condition after unclean shutdown on NFS disk.
|
||||
|
@ -1217,12 +1217,12 @@ func validatePath(pathPrefix, path string) (string, error) {
|
|||
|
||||
pathPrefix, err = filepath.Abs(pathPrefix)
|
||||
if err != nil {
|
||||
return path, fmt.Errorf("cannot determine absolute path for pathPrefix=%q: %s", pathPrefix, err)
|
||||
return path, fmt.Errorf("cannot determine absolute path for pathPrefix=%q: %w", pathPrefix, err)
|
||||
}
|
||||
|
||||
path, err = filepath.Abs(path)
|
||||
if err != nil {
|
||||
return path, fmt.Errorf("cannot determine absolute path for %q: %s", path, err)
|
||||
return path, fmt.Errorf("cannot determine absolute path for %q: %w", path, err)
|
||||
}
|
||||
if !strings.HasPrefix(path, pathPrefix+"/") {
|
||||
return path, fmt.Errorf("invalid path %q; must start with %q", path, pathPrefix+"/")
|
||||
|
|
|
@ -104,7 +104,7 @@ func (ts *TableSearch) Seek(k []byte) {
|
|||
}
|
||||
if len(errors) > 0 {
|
||||
// Return only the first error, since it has no sense in returning all errors.
|
||||
ts.err = fmt.Errorf("cannot seek %q: %s", k, errors[0])
|
||||
ts.err = fmt.Errorf("cannot seek %q: %w", k, errors[0])
|
||||
return
|
||||
}
|
||||
if len(ts.psHeap) == 0 {
|
||||
|
@ -149,7 +149,7 @@ func (ts *TableSearch) NextItem() bool {
|
|||
ts.err = ts.nextBlock()
|
||||
if ts.err != nil {
|
||||
if ts.err != io.EOF {
|
||||
ts.err = fmt.Errorf("cannot obtain the next block to search in the table: %s", ts.err)
|
||||
ts.err = fmt.Errorf("cannot obtain the next block to search in the table: %w", ts.err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -98,7 +98,7 @@ func testTableSearchConcurrent(tb *Table, items []string) error {
|
|||
select {
|
||||
case err := <-ch:
|
||||
if err != nil {
|
||||
return fmt.Errorf("unexpected error: %s", err)
|
||||
return fmt.Errorf("unexpected error: %w", err)
|
||||
}
|
||||
case <-time.After(time.Second * 5):
|
||||
return fmt.Errorf("timeout")
|
||||
|
@ -139,7 +139,7 @@ func testTableSearchSerial(tb *Table, items []string) error {
|
|||
return fmt.Errorf("superflouos item found at position %d when searching for %q: %q", n, key, ts.Item)
|
||||
}
|
||||
if err := ts.Error(); err != nil {
|
||||
return fmt.Errorf("unexpected error when searching for %q: %s", key, err)
|
||||
return fmt.Errorf("unexpected error when searching for %q: %w", key, err)
|
||||
}
|
||||
}
|
||||
ts.MustClose()
|
||||
|
@ -153,13 +153,13 @@ func newTestTable(path string, itemsCount int) (*Table, []string, error) {
|
|||
}
|
||||
tb, err := OpenTable(path, flushCallback, nil)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("cannot open table: %s", err)
|
||||
return nil, nil, fmt.Errorf("cannot open table: %w", err)
|
||||
}
|
||||
items := make([]string, itemsCount)
|
||||
for i := 0; i < itemsCount; i++ {
|
||||
item := fmt.Sprintf("%d:%d", rand.Intn(1e9), i)
|
||||
if err := tb.AddItems([][]byte{[]byte(item)}); err != nil {
|
||||
return nil, nil, fmt.Errorf("cannot add item: %s", err)
|
||||
return nil, nil, fmt.Errorf("cannot add item: %w", err)
|
||||
}
|
||||
items[i] = item
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ func benchmarkTableSearch(b *testing.B, itemsCount int) {
|
|||
|
||||
tb, items, err := newTestTable(path, itemsCount)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot create test table at %q with %d items: %s", path, itemsCount, err))
|
||||
panic(fmt.Errorf("cannot create test table at %q with %d items: %w", path, itemsCount, err))
|
||||
}
|
||||
|
||||
// Force finishing pending merges
|
||||
|
@ -106,7 +106,7 @@ func benchmarkTableSearchKeysExt(b *testing.B, tb *Table, keys [][]byte, stripSu
|
|||
}
|
||||
}
|
||||
if err := ts.Error(); err != nil {
|
||||
panic(fmt.Errorf("BUG: unexpected error for searchKeys[%d]=%q: %s", i, searchKey, err))
|
||||
panic(fmt.Errorf("BUG: unexpected error for searchKeys[%d]=%q: %w", i, searchKey, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -177,7 +177,7 @@ func tryOpeningQueue(path, name string, chunkFileSize, maxBlockSize, maxPendingB
|
|||
}
|
||||
|
||||
if err := fs.MkdirAllIfNotExist(path); err != nil {
|
||||
return nil, fmt.Errorf("cannot create directory %q: %s", path, err)
|
||||
return nil, fmt.Errorf("cannot create directory %q: %w", path, err)
|
||||
}
|
||||
|
||||
// Read metainfo.
|
||||
|
@ -193,13 +193,13 @@ func tryOpeningQueue(path, name string, chunkFileSize, maxBlockSize, maxPendingB
|
|||
mi.Reset()
|
||||
mi.Name = q.name
|
||||
if err := mi.WriteToFile(metainfoPath); err != nil {
|
||||
return nil, fmt.Errorf("cannot create %q: %s", metainfoPath, err)
|
||||
return nil, fmt.Errorf("cannot create %q: %w", metainfoPath, err)
|
||||
}
|
||||
|
||||
// Create initial chunk file.
|
||||
filepath := q.chunkFilePath(0)
|
||||
if err := fs.WriteFileAtomically(filepath, nil); err != nil {
|
||||
return nil, fmt.Errorf("cannot create %q: %s", filepath, err)
|
||||
return nil, fmt.Errorf("cannot create %q: %w", filepath, err)
|
||||
}
|
||||
}
|
||||
if mi.Name != q.name {
|
||||
|
@ -209,7 +209,7 @@ func tryOpeningQueue(path, name string, chunkFileSize, maxBlockSize, maxPendingB
|
|||
// Locate reader and writer chunks in the path.
|
||||
fis, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read contents of the directory %q: %s", path, err)
|
||||
return nil, fmt.Errorf("cannot read contents of the directory %q: %w", path, err)
|
||||
}
|
||||
for _, fi := range fis {
|
||||
fname := fi.Name()
|
||||
|
@ -406,11 +406,11 @@ func (q *Queue) writeBlockLocked(block []byte) error {
|
|||
q.writerPath = q.chunkFilePath(q.writerOffset)
|
||||
w, err := filestream.Create(q.writerPath, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create chunk file %q: %s", q.writerPath, err)
|
||||
return fmt.Errorf("cannot create chunk file %q: %w", q.writerPath, err)
|
||||
}
|
||||
q.writer = w
|
||||
if err := q.flushMetainfo(); err != nil {
|
||||
return fmt.Errorf("cannot flush metainfo: %s", err)
|
||||
return fmt.Errorf("cannot flush metainfo: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -421,12 +421,12 @@ func (q *Queue) writeBlockLocked(block []byte) error {
|
|||
err := q.write(header.B)
|
||||
headerBufPool.Put(header)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot write header with size 8 bytes to %q: %s", q.writerPath, err)
|
||||
return fmt.Errorf("cannot write header with size 8 bytes to %q: %w", q.writerPath, err)
|
||||
}
|
||||
|
||||
// Write block contents.
|
||||
if err := q.write(block); err != nil {
|
||||
return fmt.Errorf("cannot write block contents with size %d bytes to %q: %s", len(block), q.writerPath, err)
|
||||
return fmt.Errorf("cannot write block contents with size %d bytes to %q: %w", len(block), q.writerPath, err)
|
||||
}
|
||||
q.blocksWritten.Inc()
|
||||
q.bytesWritten.Add(len(block))
|
||||
|
@ -474,11 +474,11 @@ func (q *Queue) readBlockLocked(dst []byte) ([]byte, error) {
|
|||
q.readerPath = q.chunkFilePath(q.readerOffset)
|
||||
r, err := filestream.Open(q.readerPath, true)
|
||||
if err != nil {
|
||||
return dst, fmt.Errorf("cannot open chunk file %q: %s", q.readerPath, err)
|
||||
return dst, fmt.Errorf("cannot open chunk file %q: %w", q.readerPath, err)
|
||||
}
|
||||
q.reader = r
|
||||
if err := q.flushMetainfo(); err != nil {
|
||||
return dst, fmt.Errorf("cannot flush metainfo: %s", err)
|
||||
return dst, fmt.Errorf("cannot flush metainfo: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -489,7 +489,7 @@ func (q *Queue) readBlockLocked(dst []byte) ([]byte, error) {
|
|||
blockLen := encoding.UnmarshalUint64(header.B)
|
||||
headerBufPool.Put(header)
|
||||
if err != nil {
|
||||
return dst, fmt.Errorf("cannot read header with size 8 bytes from %q: %s", q.readerPath, err)
|
||||
return dst, fmt.Errorf("cannot read header with size 8 bytes from %q: %w", q.readerPath, err)
|
||||
}
|
||||
if blockLen > q.maxBlockSize {
|
||||
return dst, fmt.Errorf("too big block size read from %q: %d bytes; cannot exceed %d bytes", q.readerPath, blockLen, q.maxBlockSize)
|
||||
|
@ -499,7 +499,7 @@ func (q *Queue) readBlockLocked(dst []byte) ([]byte, error) {
|
|||
dstLen := len(dst)
|
||||
dst = bytesutil.Resize(dst, dstLen+int(blockLen))
|
||||
if err := q.readFull(dst[dstLen:]); err != nil {
|
||||
return dst, fmt.Errorf("cannot read block contents with size %d bytes from %q: %s", blockLen, q.readerPath, err)
|
||||
return dst, fmt.Errorf("cannot read block contents with size %d bytes from %q: %w", blockLen, q.readerPath, err)
|
||||
}
|
||||
q.blocksRead.Inc()
|
||||
q.bytesRead.Add(int(blockLen))
|
||||
|
@ -546,7 +546,7 @@ func (q *Queue) flushMetainfo() error {
|
|||
}
|
||||
metainfoPath := q.metainfoPath()
|
||||
if err := mi.WriteToFile(metainfoPath); err != nil {
|
||||
return fmt.Errorf("cannot write metainfo to %q: %s", metainfoPath, err)
|
||||
return fmt.Errorf("cannot write metainfo to %q: %w", metainfoPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -567,10 +567,10 @@ func (mi *metainfo) Reset() {
|
|||
func (mi *metainfo) WriteToFile(path string) error {
|
||||
data, err := json.Marshal(mi)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot marshal persistent queue metainfo %#v: %s", mi, err)
|
||||
return fmt.Errorf("cannot marshal persistent queue metainfo %#v: %w", mi, err)
|
||||
}
|
||||
if err := ioutil.WriteFile(path, data, 0600); err != nil {
|
||||
return fmt.Errorf("cannot write persistent queue metainfo to %q: %s", path, err)
|
||||
return fmt.Errorf("cannot write persistent queue metainfo to %q: %w", path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -582,10 +582,10 @@ func (mi *metainfo) ReadFromFile(path string) error {
|
|||
if os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("cannot read %q: %s", path, err)
|
||||
return fmt.Errorf("cannot read %q: %w", path, err)
|
||||
}
|
||||
if err := json.Unmarshal(data, mi); err != nil {
|
||||
return fmt.Errorf("cannot unmarshal persistent queue metainfo from %q: %s", path, err)
|
||||
return fmt.Errorf("cannot unmarshal persistent queue metainfo from %q: %w", path, err)
|
||||
}
|
||||
if mi.ReaderOffset > mi.WriterOffset {
|
||||
return fmt.Errorf("invalid data read from %q: readerOffset=%d cannot exceed writerOffset=%d", path, mi.ReaderOffset, mi.WriterOffset)
|
||||
|
|
|
@ -495,20 +495,20 @@ func TestQueueLimitedSize(t *testing.T) {
|
|||
|
||||
func mustCreateFile(path, contents string) {
|
||||
if err := ioutil.WriteFile(path, []byte(contents), 0600); err != nil {
|
||||
panic(fmt.Errorf("cannot create file %q with %d bytes contents: %s", path, len(contents), err))
|
||||
panic(fmt.Errorf("cannot create file %q with %d bytes contents: %w", path, len(contents), err))
|
||||
}
|
||||
}
|
||||
|
||||
func mustCreateDir(path string) {
|
||||
mustDeleteDir(path)
|
||||
if err := os.MkdirAll(path, 0700); err != nil {
|
||||
panic(fmt.Errorf("cannot create dir %q: %s", path, err))
|
||||
panic(fmt.Errorf("cannot create dir %q: %w", path, err))
|
||||
}
|
||||
}
|
||||
|
||||
func mustDeleteDir(path string) {
|
||||
if err := os.RemoveAll(path); err != nil {
|
||||
panic(fmt.Errorf("cannot remove dir %q: %s", path, err))
|
||||
panic(fmt.Errorf("cannot remove dir %q: %w", path, err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -516,6 +516,6 @@ func mustCreateEmptyMetainfo(path, name string) {
|
|||
var mi metainfo
|
||||
mi.Name = name
|
||||
if err := mi.WriteToFile(path + "/metainfo.json"); err != nil {
|
||||
panic(fmt.Errorf("cannot create metainfo: %s", err))
|
||||
panic(fmt.Errorf("cannot create metainfo: %w", err))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -93,7 +93,7 @@ func NewConfig(baseDir string, basicAuth *BasicAuthConfig, bearerToken, bearerTo
|
|||
path := getFilepath(baseDir, basicAuth.PasswordFile)
|
||||
pass, err := readPasswordFromFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read password from `password_file`=%q set in `basic_auth` section: %s", basicAuth.PasswordFile, err)
|
||||
return nil, fmt.Errorf("cannot read password from `password_file`=%q set in `basic_auth` section: %w", basicAuth.PasswordFile, err)
|
||||
}
|
||||
password = pass
|
||||
}
|
||||
|
@ -109,7 +109,7 @@ func NewConfig(baseDir string, basicAuth *BasicAuthConfig, bearerToken, bearerTo
|
|||
path := getFilepath(baseDir, bearerTokenFile)
|
||||
token, err := readPasswordFromFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read bearer token from `bearer_token_file`=%q: %s", bearerTokenFile, err)
|
||||
return nil, fmt.Errorf("cannot read bearer token from `bearer_token_file`=%q: %w", bearerTokenFile, err)
|
||||
}
|
||||
bearerToken = token
|
||||
}
|
||||
|
@ -131,7 +131,7 @@ func NewConfig(baseDir string, basicAuth *BasicAuthConfig, bearerToken, bearerTo
|
|||
keyPath := getFilepath(baseDir, tlsConfig.KeyFile)
|
||||
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot load TLS certificate from `cert_file`=%q, `key_file`=%q: %s", tlsConfig.CertFile, tlsConfig.KeyFile, err)
|
||||
return nil, fmt.Errorf("cannot load TLS certificate from `cert_file`=%q, `key_file`=%q: %w", tlsConfig.CertFile, tlsConfig.KeyFile, err)
|
||||
}
|
||||
tlsCertificate = &cert
|
||||
}
|
||||
|
@ -139,7 +139,7 @@ func NewConfig(baseDir string, basicAuth *BasicAuthConfig, bearerToken, bearerTo
|
|||
path := getFilepath(baseDir, tlsConfig.CAFile)
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read `ca_file` %q: %s", tlsConfig.CAFile, err)
|
||||
return nil, fmt.Errorf("cannot read `ca_file` %q: %w", tlsConfig.CAFile, err)
|
||||
}
|
||||
tlsRootCA = x509.NewCertPool()
|
||||
if !tlsRootCA.AppendCertsFromPEM(data) {
|
||||
|
|
|
@ -14,7 +14,7 @@ func MarshalWriteRequest(dst []byte, wr *WriteRequest) []byte {
|
|||
dst = dst[:dstLen+size]
|
||||
n, err := wr.MarshalToSizedBuffer(dst[dstLen:])
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("BUG: unexpected error when marshaling WriteRequest: %s", err))
|
||||
panic(fmt.Errorf("BUG: unexpected error when marshaling WriteRequest: %w", err))
|
||||
}
|
||||
return dst[:dstLen+n]
|
||||
}
|
||||
|
|
|
@ -26,11 +26,11 @@ type RelabelConfig struct {
|
|||
func LoadRelabelConfigs(path string) ([]ParsedRelabelConfig, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read `relabel_configs` from %q: %s", path, err)
|
||||
return nil, fmt.Errorf("cannot read `relabel_configs` from %q: %w", path, err)
|
||||
}
|
||||
var rcs []RelabelConfig
|
||||
if err := yaml.UnmarshalStrict(data, &rcs); err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal `relabel_configs` from %q: %s", path, err)
|
||||
return nil, fmt.Errorf("cannot unmarshal `relabel_configs` from %q: %w", path, err)
|
||||
}
|
||||
return ParseRelabelConfigs(nil, rcs)
|
||||
}
|
||||
|
@ -44,7 +44,7 @@ func ParseRelabelConfigs(dst []ParsedRelabelConfig, rcs []RelabelConfig) ([]Pars
|
|||
var err error
|
||||
dst, err = parseRelabelConfig(dst, &rcs[i])
|
||||
if err != nil {
|
||||
return dst, fmt.Errorf("error when parsing `relabel_config` #%d: %s", i+1, err)
|
||||
return dst, fmt.Errorf("error when parsing `relabel_config` #%d: %w", i+1, err)
|
||||
}
|
||||
}
|
||||
return dst, nil
|
||||
|
@ -67,7 +67,7 @@ func parseRelabelConfig(dst []ParsedRelabelConfig, rc *RelabelConfig) ([]ParsedR
|
|||
}
|
||||
re, err := regexp.Compile(regex)
|
||||
if err != nil {
|
||||
return dst, fmt.Errorf("cannot parse `regex` %q: %s", regex, err)
|
||||
return dst, fmt.Errorf("cannot parse `regex` %q: %w", regex, err)
|
||||
}
|
||||
regexCompiled = re
|
||||
}
|
||||
|
|
|
@ -94,13 +94,13 @@ func (c *client) ReadData(dst []byte) ([]byte, error) {
|
|||
fasthttp.ReleaseResponse(resp)
|
||||
if err == fasthttp.ErrTimeout {
|
||||
scrapesTimedout.Inc()
|
||||
return dst, fmt.Errorf("error when scraping %q with timeout %s: %s", c.scrapeURL, c.hc.ReadTimeout, err)
|
||||
return dst, fmt.Errorf("error when scraping %q with timeout %s: %w", c.scrapeURL, c.hc.ReadTimeout, err)
|
||||
}
|
||||
if err == fasthttp.ErrBodyTooLarge {
|
||||
return dst, fmt.Errorf("the response from %q exceeds -promscrape.maxScrapeSize=%d; "+
|
||||
"either reduce the response size for the target or increase -promscrape.maxScrapeSize", c.scrapeURL, *maxScrapeSize)
|
||||
}
|
||||
return dst, fmt.Errorf("error when scraping %q: %s", c.scrapeURL, err)
|
||||
return dst, fmt.Errorf("error when scraping %q: %w", c.scrapeURL, err)
|
||||
}
|
||||
dstLen := len(dst)
|
||||
if ce := resp.Header.Peek("Content-Encoding"); string(ce) == "gzip" {
|
||||
|
@ -109,7 +109,7 @@ func (c *client) ReadData(dst []byte) ([]byte, error) {
|
|||
if err != nil {
|
||||
fasthttp.ReleaseResponse(resp)
|
||||
scrapesGunzipFailed.Inc()
|
||||
return dst, fmt.Errorf("cannot ungzip response from %q: %s", c.scrapeURL, err)
|
||||
return dst, fmt.Errorf("cannot ungzip response from %q: %w", c.scrapeURL, err)
|
||||
}
|
||||
scrapesGunzipped.Inc()
|
||||
} else {
|
||||
|
@ -146,7 +146,7 @@ again:
|
|||
// Retry request if the server closed the keep-alive connection during the first attempt.
|
||||
attempts++
|
||||
if attempts > 3 {
|
||||
return fmt.Errorf("the server closed 3 subsequent connections: %s", err)
|
||||
return fmt.Errorf("the server closed 3 subsequent connections: %w", err)
|
||||
}
|
||||
goto again
|
||||
}
|
||||
|
|
|
@ -99,11 +99,11 @@ type StaticConfig struct {
|
|||
func loadStaticConfigs(path string) ([]StaticConfig, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read `static_configs` from %q: %s", path, err)
|
||||
return nil, fmt.Errorf("cannot read `static_configs` from %q: %w", path, err)
|
||||
}
|
||||
var stcs []StaticConfig
|
||||
if err := yaml.UnmarshalStrict(data, &stcs); err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal `static_configs` from %q: %s", path, err)
|
||||
return nil, fmt.Errorf("cannot unmarshal `static_configs` from %q: %w", path, err)
|
||||
}
|
||||
return stcs, nil
|
||||
}
|
||||
|
@ -112,11 +112,11 @@ func loadStaticConfigs(path string) ([]StaticConfig, error) {
|
|||
func loadConfig(path string) (cfg *Config, data []byte, err error) {
|
||||
data, err = ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("cannot read Prometheus config from %q: %s", path, err)
|
||||
return nil, nil, fmt.Errorf("cannot read Prometheus config from %q: %w", path, err)
|
||||
}
|
||||
var cfgObj Config
|
||||
if err := cfgObj.parse(data, path); err != nil {
|
||||
return nil, nil, fmt.Errorf("cannot parse Prometheus config from %q: %s", path, err)
|
||||
return nil, nil, fmt.Errorf("cannot parse Prometheus config from %q: %w", path, err)
|
||||
}
|
||||
if *dryRun {
|
||||
// This is a dirty hack for checking Prometheus config only.
|
||||
|
@ -130,18 +130,18 @@ func loadConfig(path string) (cfg *Config, data []byte, err error) {
|
|||
|
||||
func (cfg *Config) parse(data []byte, path string) error {
|
||||
if err := unmarshalMaybeStrict(data, cfg); err != nil {
|
||||
return fmt.Errorf("cannot unmarshal data: %s", err)
|
||||
return fmt.Errorf("cannot unmarshal data: %w", err)
|
||||
}
|
||||
absPath, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain abs path for %q: %s", path, err)
|
||||
return fmt.Errorf("cannot obtain abs path for %q: %w", path, err)
|
||||
}
|
||||
cfg.baseDir = filepath.Dir(absPath)
|
||||
for i := range cfg.ScrapeConfigs {
|
||||
sc := &cfg.ScrapeConfigs[i]
|
||||
swc, err := getScrapeWorkConfig(sc, cfg.baseDir, &cfg.Global)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse `scrape_config` #%d: %s", i+1, err)
|
||||
return fmt.Errorf("cannot parse `scrape_config` #%d: %w", i+1, err)
|
||||
}
|
||||
sc.swc = swc
|
||||
}
|
||||
|
@ -378,17 +378,17 @@ func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConf
|
|||
params := sc.Params
|
||||
ac, err := promauth.NewConfig(baseDir, sc.BasicAuth, sc.BearerToken, sc.BearerTokenFile, sc.TLSConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse auth config for `job_name` %q: %s", jobName, err)
|
||||
return nil, fmt.Errorf("cannot parse auth config for `job_name` %q: %w", jobName, err)
|
||||
}
|
||||
var relabelConfigs []promrelabel.ParsedRelabelConfig
|
||||
relabelConfigs, err = promrelabel.ParseRelabelConfigs(relabelConfigs[:0], sc.RelabelConfigs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse `relabel_configs` for `job_name` %q: %s", jobName, err)
|
||||
return nil, fmt.Errorf("cannot parse `relabel_configs` for `job_name` %q: %w", jobName, err)
|
||||
}
|
||||
var metricRelabelConfigs []promrelabel.ParsedRelabelConfig
|
||||
metricRelabelConfigs, err = promrelabel.ParseRelabelConfigs(metricRelabelConfigs[:0], sc.MetricRelabelConfigs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse `metric_relabel_configs` for `job_name` %q: %s", jobName, err)
|
||||
return nil, fmt.Errorf("cannot parse `metric_relabel_configs` for `job_name` %q: %w", jobName, err)
|
||||
}
|
||||
swc := &scrapeWorkConfig{
|
||||
scrapeInterval: scrapeInterval,
|
||||
|
@ -580,7 +580,7 @@ func appendScrapeWork(dst []ScrapeWork, swc *scrapeWorkConfig, target string, ex
|
|||
paramsStr := url.Values(paramsRelabeled).Encode()
|
||||
scrapeURL := fmt.Sprintf("%s://%s%s%s%s", schemeRelabeled, addressRelabeled, metricsPathRelabeled, optionalQuestion, paramsStr)
|
||||
if _, err := url.Parse(scrapeURL); err != nil {
|
||||
return dst, fmt.Errorf("invalid url %q for scheme=%q (%q), target=%q (%q), metrics_path=%q (%q) for `job_name` %q: %s",
|
||||
return dst, fmt.Errorf("invalid url %q for scheme=%q (%q), target=%q (%q), metrics_path=%q (%q) for `job_name` %q: %w",
|
||||
scrapeURL, swc.scheme, schemeRelabeled, target, addressRelabeled, swc.metricsPath, metricsPathRelabeled, swc.jobName, err)
|
||||
}
|
||||
// Set missing "instance" label according to https://www.robustperception.io/life-of-a-label
|
||||
|
|
|
@ -135,7 +135,7 @@ scrape_configs:
|
|||
func getFileSDScrapeWork(data []byte, path string) ([]ScrapeWork, error) {
|
||||
var cfg Config
|
||||
if err := cfg.parse(data, path); err != nil {
|
||||
return nil, fmt.Errorf("cannot parse data: %s", err)
|
||||
return nil, fmt.Errorf("cannot parse data: %w", err)
|
||||
}
|
||||
return cfg.getFileSDScrapeWork(nil), nil
|
||||
}
|
||||
|
@ -143,7 +143,7 @@ func getFileSDScrapeWork(data []byte, path string) ([]ScrapeWork, error) {
|
|||
func getStaticScrapeWork(data []byte, path string) ([]ScrapeWork, error) {
|
||||
var cfg Config
|
||||
if err := cfg.parse(data, path); err != nil {
|
||||
return nil, fmt.Errorf("cannot parse data: %s", err)
|
||||
return nil, fmt.Errorf("cannot parse data: %w", err)
|
||||
}
|
||||
return cfg.getStaticScrapeWork(), nil
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ type AgentConfig struct {
|
|||
func parseAgent(data []byte) (*Agent, error) {
|
||||
var a Agent
|
||||
if err := json.Unmarshal(data, &a); err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal agent info from %q: %s", data, err)
|
||||
return nil, fmt.Errorf("cannot unmarshal agent info from %q: %w", data, err)
|
||||
}
|
||||
return &a, nil
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
|||
}
|
||||
ac, err := promauth.NewConfig(baseDir, ba, token, "", sdc.TLSConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse auth config: %s", err)
|
||||
return nil, fmt.Errorf("cannot parse auth config: %w", err)
|
||||
}
|
||||
apiServer := sdc.Server
|
||||
if apiServer == "" {
|
||||
|
@ -62,7 +62,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
|||
}
|
||||
client, err := discoveryutils.NewClient(apiServer, ac)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create HTTP client for %q: %s", apiServer, err)
|
||||
return nil, fmt.Errorf("cannot create HTTP client for %q: %w", apiServer, err)
|
||||
}
|
||||
tagSeparator := ","
|
||||
if sdc.TagSeparator != nil {
|
||||
|
@ -92,7 +92,7 @@ func getToken(token *string) (string, error) {
|
|||
if tokenFile := os.Getenv("CONSUL_HTTP_TOKEN_FILE"); tokenFile != "" {
|
||||
data, err := ioutil.ReadFile(tokenFile)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("cannot read consul token file %q; probably, `token` arg is missing in `consul_sd_config`? error: %s", tokenFile, err)
|
||||
return "", fmt.Errorf("cannot read consul token file %q; probably, `token` arg is missing in `consul_sd_config`? error: %w", tokenFile, err)
|
||||
}
|
||||
return string(data), nil
|
||||
}
|
||||
|
@ -108,7 +108,7 @@ func getDatacenter(client *discoveryutils.Client, dc string) (string, error) {
|
|||
// See https://www.consul.io/api/agent.html#read-configuration
|
||||
data, err := client.GetAPIResponse("/v1/agent/self")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("cannot query consul agent info: %s", err)
|
||||
return "", fmt.Errorf("cannot query consul agent info: %w", err)
|
||||
}
|
||||
a, err := parseAgent(data)
|
||||
if err != nil {
|
||||
|
|
|
@ -30,11 +30,11 @@ type SDConfig struct {
|
|||
func GetLabels(sdc *SDConfig, baseDir string) ([]map[string]string, error) {
|
||||
cfg, err := getAPIConfig(sdc, baseDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get API config: %s", err)
|
||||
return nil, fmt.Errorf("cannot get API config: %w", err)
|
||||
}
|
||||
ms, err := getServiceNodesLabels(cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error when fetching service nodes data from Consul: %s", err)
|
||||
return nil, fmt.Errorf("error when fetching service nodes data from Consul: %w", err)
|
||||
}
|
||||
return ms, nil
|
||||
}
|
||||
|
|
|
@ -28,11 +28,11 @@ func getAllServiceNodes(cfg *apiConfig) ([]ServiceNode, error) {
|
|||
// See https://www.consul.io/api/catalog.html#list-services
|
||||
data, err := getAPIResponse(cfg, "/v1/catalog/services")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain services: %s", err)
|
||||
return nil, fmt.Errorf("cannot obtain services: %w", err)
|
||||
}
|
||||
var m map[string][]string
|
||||
if err := json.Unmarshal(data, &m); err != nil {
|
||||
return nil, fmt.Errorf("cannot parse services response %q: %s", data, err)
|
||||
return nil, fmt.Errorf("cannot parse services response %q: %w", data, err)
|
||||
}
|
||||
serviceNames := make(map[string]bool)
|
||||
for serviceName, tags := range m {
|
||||
|
@ -125,7 +125,7 @@ func getServiceNodes(cfg *apiConfig, serviceName string) ([]ServiceNode, error)
|
|||
}
|
||||
data, err := getAPIResponse(cfg, path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain instances for serviceName=%q: %s", serviceName, err)
|
||||
return nil, fmt.Errorf("cannot obtain instances for serviceName=%q: %w", serviceName, err)
|
||||
}
|
||||
return parseServiceNodes(data)
|
||||
}
|
||||
|
@ -173,7 +173,7 @@ type Check struct {
|
|||
func parseServiceNodes(data []byte) ([]ServiceNode, error) {
|
||||
var sns []ServiceNode
|
||||
if err := json.Unmarshal(data, &sns); err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal ServiceNodes from %q: %s", data, err)
|
||||
return nil, fmt.Errorf("cannot unmarshal ServiceNodes from %q: %w", data, err)
|
||||
}
|
||||
return sns, nil
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ func newAPIConfig(sdc *SDConfig) (*apiConfig, error) {
|
|||
if len(region) == 0 {
|
||||
r, err := getDefaultRegion()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot determine default ec2 region; probably, `region` param in `ec2_sd_configs` is missing; the error: %s", err)
|
||||
return nil, fmt.Errorf("cannot determine default ec2 region; probably, `region` param in `ec2_sd_configs` is missing; the error: %w", err)
|
||||
}
|
||||
region = r
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ func getDefaultRegion() (string, error) {
|
|||
}
|
||||
var id IdentityDocument
|
||||
if err := json.Unmarshal(data, &id); err != nil {
|
||||
return "", fmt.Errorf("cannot parse identity document: %s", err)
|
||||
return "", fmt.Errorf("cannot parse identity document: %w", err)
|
||||
}
|
||||
return id.Region, nil
|
||||
}
|
||||
|
@ -109,28 +109,28 @@ func getMetadataByPath(apiPath string) ([]byte, error) {
|
|||
sessionTokenURL := "http://169.254.169.254/latest/api/token"
|
||||
req, err := http.NewRequest("PUT", sessionTokenURL, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create request for IMDSv2 session token at url %q: %s", sessionTokenURL, err)
|
||||
return nil, fmt.Errorf("cannot create request for IMDSv2 session token at url %q: %w", sessionTokenURL, err)
|
||||
}
|
||||
req.Header.Set("X-aws-ec2-metadata-token-ttl-seconds", "60")
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain IMDSv2 session token from %q; probably, `region` is missing in `ec2_sd_config`; error: %s", sessionTokenURL, err)
|
||||
return nil, fmt.Errorf("cannot obtain IMDSv2 session token from %q; probably, `region` is missing in `ec2_sd_config`; error: %w", sessionTokenURL, err)
|
||||
}
|
||||
token, err := readResponseBody(resp, sessionTokenURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read IMDSv2 session token from %q; probably, `region` is missing in `ec2_sd_config`; error: %s", sessionTokenURL, err)
|
||||
return nil, fmt.Errorf("cannot read IMDSv2 session token from %q; probably, `region` is missing in `ec2_sd_config`; error: %w", sessionTokenURL, err)
|
||||
}
|
||||
|
||||
// Use session token in the request.
|
||||
apiURL := "http://169.254.169.254/latest/" + apiPath
|
||||
req, err = http.NewRequest("GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create request to %q: %s", apiURL, err)
|
||||
return nil, fmt.Errorf("cannot create request to %q: %w", apiURL, err)
|
||||
}
|
||||
req.Header.Set("X-aws-ec2-metadata-token", string(token))
|
||||
resp, err = client.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain response for %q: %s", apiURL, err)
|
||||
return nil, fmt.Errorf("cannot obtain response for %q: %w", apiURL, err)
|
||||
}
|
||||
return readResponseBody(resp, apiURL)
|
||||
}
|
||||
|
@ -158,11 +158,11 @@ func getAPIResponse(cfg *apiConfig, action, nextPageToken string) ([]byte, error
|
|||
apiURL += "&Version=2013-10-15"
|
||||
req, err := newSignedRequest(apiURL, "ec2", cfg.region, cfg.accessKey, cfg.secretKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create signed request: %s", err)
|
||||
return nil, fmt.Errorf("cannot create signed request: %w", err)
|
||||
}
|
||||
resp, err := discoveryutils.GetHTTPClient().Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot perform http request to %q: %s", apiURL, err)
|
||||
return nil, fmt.Errorf("cannot perform http request to %q: %w", apiURL, err)
|
||||
}
|
||||
return readResponseBody(resp, apiURL)
|
||||
}
|
||||
|
@ -171,7 +171,7 @@ func readResponseBody(resp *http.Response, apiURL string) ([]byte, error) {
|
|||
data, err := ioutil.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read response from %q: %s", apiURL, err)
|
||||
return nil, fmt.Errorf("cannot read response from %q: %w", apiURL, err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("unexpected status code for %q; got %d; want %d; response body: %q",
|
||||
|
|
|
@ -34,11 +34,11 @@ type Filter struct {
|
|||
func GetLabels(sdc *SDConfig) ([]map[string]string, error) {
|
||||
cfg, err := getAPIConfig(sdc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get API config: %s", err)
|
||||
return nil, fmt.Errorf("cannot get API config: %w", err)
|
||||
}
|
||||
ms, err := getInstancesLabels(cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error when fetching instances data from EC2: %s", err)
|
||||
return nil, fmt.Errorf("error when fetching instances data from EC2: %w", err)
|
||||
}
|
||||
return ms, nil
|
||||
}
|
||||
|
|
|
@ -31,11 +31,11 @@ func getReservations(cfg *apiConfig) ([]Reservation, error) {
|
|||
for {
|
||||
data, err := getAPIResponse(cfg, action, pageToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain instances: %s", err)
|
||||
return nil, fmt.Errorf("cannot obtain instances: %w", err)
|
||||
}
|
||||
ir, err := parseInstancesResponse(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse instance list: %s", err)
|
||||
return nil, fmt.Errorf("cannot parse instance list: %w", err)
|
||||
}
|
||||
rs = append(rs, ir.ReservationSet.Items...)
|
||||
if len(ir.NextPageToken) == 0 {
|
||||
|
@ -121,7 +121,7 @@ type Tag struct {
|
|||
func parseInstancesResponse(data []byte) (*InstancesResponse, error) {
|
||||
var v InstancesResponse
|
||||
if err := xml.Unmarshal(data, &v); err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal InstancesResponse from %q: %s", data, err)
|
||||
return nil, fmt.Errorf("cannot unmarshal InstancesResponse from %q: %w", data, err)
|
||||
}
|
||||
return &v, nil
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ func newSignedRequest(apiURL, service, region, accessKey, secretKey string) (*ht
|
|||
func newSignedRequestWithTime(apiURL, service, region, accessKey, secretKey string, t time.Time) (*http.Request, error) {
|
||||
uri, err := url.Parse(apiURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse %q: %s", apiURL, err)
|
||||
return nil, fmt.Errorf("cannot parse %q: %w", apiURL, err)
|
||||
}
|
||||
|
||||
// Create canonicalRequest
|
||||
|
@ -65,7 +65,7 @@ func newSignedRequestWithTime(apiURL, service, region, accessKey, secretKey stri
|
|||
|
||||
req, err := http.NewRequest("GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create request from %q: %s", apiURL, err)
|
||||
return nil, fmt.Errorf("cannot create request from %q: %w", apiURL, err)
|
||||
}
|
||||
req.Header.Set("x-amz-date", amzdate)
|
||||
req.Header.Set("Authorization", authHeader)
|
||||
|
|
|
@ -36,13 +36,13 @@ func newAPIConfig(sdc *SDConfig) (*apiConfig, error) {
|
|||
ctx := context.Background()
|
||||
client, err := google.DefaultClient(ctx, "https://www.googleapis.com/auth/compute.readonly")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create oauth2 client for gce: %s", err)
|
||||
return nil, fmt.Errorf("cannot create oauth2 client for gce: %w", err)
|
||||
}
|
||||
project := sdc.Project
|
||||
if len(project) == 0 {
|
||||
proj, err := getCurrentProject()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot determine the current project; make sure `vmagent` runs inside GCE; error: %s", err)
|
||||
return nil, fmt.Errorf("cannot determine the current project; make sure `vmagent` runs inside GCE; error: %w", err)
|
||||
}
|
||||
project = proj
|
||||
logger.Infof("autodetected the current GCE project: %q", project)
|
||||
|
@ -52,7 +52,7 @@ func newAPIConfig(sdc *SDConfig) (*apiConfig, error) {
|
|||
// Autodetect the current zone.
|
||||
zone, err := getCurrentZone()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot determine the current zone; make sure `vmagent` runs inside GCE; error: %s", err)
|
||||
return nil, fmt.Errorf("cannot determine the current zone; make sure `vmagent` runs inside GCE; error: %w", err)
|
||||
}
|
||||
zones = append(zones, zone)
|
||||
logger.Infof("autodetected the current GCE zone: %q", zone)
|
||||
|
@ -60,7 +60,7 @@ func newAPIConfig(sdc *SDConfig) (*apiConfig, error) {
|
|||
// Autodetect zones for project.
|
||||
zs, err := getZonesForProject(client, project, sdc.Filter)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain zones for project %q: %s", project, err)
|
||||
return nil, fmt.Errorf("cannot obtain zones for project %q: %w", project, err)
|
||||
}
|
||||
zones = zs
|
||||
logger.Infof("autodetected all the zones for the GCE project %q: %q", project, zones)
|
||||
|
@ -88,7 +88,7 @@ func getAPIResponse(client *http.Client, apiURL, filter, pageToken string) ([]by
|
|||
apiURL = appendNonEmptyQueryArg(apiURL, "pageToken", pageToken)
|
||||
resp, err := client.Get(apiURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot query %q: %s", apiURL, err)
|
||||
return nil, fmt.Errorf("cannot query %q: %w", apiURL, err)
|
||||
}
|
||||
return readResponseBody(resp, apiURL)
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ func readResponseBody(resp *http.Response, apiURL string) ([]byte, error) {
|
|||
data, err := ioutil.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read response from %q: %s", apiURL, err)
|
||||
return nil, fmt.Errorf("cannot read response from %q: %w", apiURL, err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("unexpected status code for %q; got %d; want %d; response body: %q",
|
||||
|
@ -144,12 +144,12 @@ func getGCEMetadata(path string) ([]byte, error) {
|
|||
metadataURL := "http://metadata.google.internal/computeMetadata/v1/" + path
|
||||
req, err := http.NewRequest("GET", metadataURL, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create http request for %q: %s", metadataURL, err)
|
||||
return nil, fmt.Errorf("cannot create http request for %q: %w", metadataURL, err)
|
||||
}
|
||||
req.Header.Set("Metadata-Flavor", "Google")
|
||||
resp, err := discoveryutils.GetHTTPClient().Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain response to %q: %s", metadataURL, err)
|
||||
return nil, fmt.Errorf("cannot obtain response to %q: %w", metadataURL, err)
|
||||
}
|
||||
return readResponseBody(resp, metadataURL)
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ func (z *ZoneYAML) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
func GetLabels(sdc *SDConfig) ([]map[string]string, error) {
|
||||
cfg, err := getAPIConfig(sdc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get API config: %s", err)
|
||||
return nil, fmt.Errorf("cannot get API config: %w", err)
|
||||
}
|
||||
ms := getInstancesLabels(cfg)
|
||||
return ms, nil
|
||||
|
|
|
@ -58,11 +58,11 @@ func getInstancesForProjectAndZone(client *http.Client, project, zone, filter st
|
|||
for {
|
||||
data, err := getAPIResponse(client, instsURL, filter, pageToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain instances: %s", err)
|
||||
return nil, fmt.Errorf("cannot obtain instances: %w", err)
|
||||
}
|
||||
il, err := parseInstanceList(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse instance list from %q: %s", instsURL, err)
|
||||
return nil, fmt.Errorf("cannot parse instance list from %q: %w", instsURL, err)
|
||||
}
|
||||
insts = append(insts, il.Items...)
|
||||
if len(il.NextPageToken) == 0 {
|
||||
|
@ -125,7 +125,7 @@ type MetadataEntry struct {
|
|||
func parseInstanceList(data []byte) (*InstanceList, error) {
|
||||
var il InstanceList
|
||||
if err := json.Unmarshal(data, &il); err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal InstanceList from %q: %s", data, err)
|
||||
return nil, fmt.Errorf("cannot unmarshal InstanceList from %q: %w", data, err)
|
||||
}
|
||||
return &il, nil
|
||||
}
|
||||
|
|
|
@ -14,11 +14,11 @@ func getZonesForProject(client *http.Client, project, filter string) ([]string,
|
|||
for {
|
||||
data, err := getAPIResponse(client, zonesURL, filter, pageToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain zones: %s", err)
|
||||
return nil, fmt.Errorf("cannot obtain zones: %w", err)
|
||||
}
|
||||
zl, err := parseZoneList(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse zone list from %q: %s", zonesURL, err)
|
||||
return nil, fmt.Errorf("cannot parse zone list from %q: %w", zonesURL, err)
|
||||
}
|
||||
for _, z := range zl.Items {
|
||||
zones = append(zones, z.Name)
|
||||
|
@ -45,7 +45,7 @@ type Zone struct {
|
|||
func parseZoneList(data []byte) (*ZoneList, error) {
|
||||
var zl ZoneList
|
||||
if err := json.Unmarshal(data, &zl); err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal ZoneList from %q: %s", data, err)
|
||||
return nil, fmt.Errorf("cannot unmarshal ZoneList from %q: %w", data, err)
|
||||
}
|
||||
return &zl, nil
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
|||
func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
||||
ac, err := promauth.NewConfig(baseDir, sdc.BasicAuth, sdc.BearerToken, sdc.BearerTokenFile, sdc.TLSConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse auth config: %s", err)
|
||||
return nil, fmt.Errorf("cannot parse auth config: %w", err)
|
||||
}
|
||||
apiServer := sdc.APIServer
|
||||
if len(apiServer) == 0 {
|
||||
|
@ -52,13 +52,13 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
|||
}
|
||||
acNew, err := promauth.NewConfig(".", nil, "", "/var/run/secrets/kubernetes.io/serviceaccount/token", &tlsConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot initialize service account auth: %s; probably, `kubernetes_sd_config->api_server` is missing in Prometheus configs?", err)
|
||||
return nil, fmt.Errorf("cannot initialize service account auth: %w; probably, `kubernetes_sd_config->api_server` is missing in Prometheus configs?", err)
|
||||
}
|
||||
ac = acNew
|
||||
}
|
||||
client, err := discoveryutils.NewClient(apiServer, ac)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create HTTP client for %q: %s", apiServer, err)
|
||||
return nil, fmt.Errorf("cannot create HTTP client for %q: %w", apiServer, err)
|
||||
}
|
||||
cfg := &apiConfig{
|
||||
client: client,
|
||||
|
|
|
@ -53,11 +53,11 @@ func getEndpoints(cfg *apiConfig) ([]Endpoints, error) {
|
|||
func getEndpointsByPath(cfg *apiConfig, path string) ([]Endpoints, error) {
|
||||
data, err := getAPIResponse(cfg, "endpoints", path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain endpoints data from API server: %s", err)
|
||||
return nil, fmt.Errorf("cannot obtain endpoints data from API server: %w", err)
|
||||
}
|
||||
epl, err := parseEndpointsList(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse endpoints response from API server: %s", err)
|
||||
return nil, fmt.Errorf("cannot parse endpoints response from API server: %w", err)
|
||||
}
|
||||
return epl.Items, nil
|
||||
}
|
||||
|
@ -119,7 +119,7 @@ type EndpointPort struct {
|
|||
func parseEndpointsList(data []byte) (*EndpointsList, error) {
|
||||
var esl EndpointsList
|
||||
if err := json.Unmarshal(data, &esl); err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal EndpointsList from %q: %s", data, err)
|
||||
return nil, fmt.Errorf("cannot unmarshal EndpointsList from %q: %w", data, err)
|
||||
}
|
||||
return &esl, nil
|
||||
}
|
||||
|
|
|
@ -43,11 +43,11 @@ func getIngresses(cfg *apiConfig) ([]Ingress, error) {
|
|||
func getIngressesByPath(cfg *apiConfig, path string) ([]Ingress, error) {
|
||||
data, err := getAPIResponse(cfg, "ingress", path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain ingresses data from API server: %s", err)
|
||||
return nil, fmt.Errorf("cannot obtain ingresses data from API server: %w", err)
|
||||
}
|
||||
igl, err := parseIngressList(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse ingresses response from API server: %s", err)
|
||||
return nil, fmt.Errorf("cannot parse ingresses response from API server: %w", err)
|
||||
}
|
||||
return igl.Items, nil
|
||||
}
|
||||
|
@ -108,7 +108,7 @@ type HTTPIngressPath struct {
|
|||
func parseIngressList(data []byte) (*IngressList, error) {
|
||||
var il IngressList
|
||||
if err := json.Unmarshal(data, &il); err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal IngressList from %q: %s", data, err)
|
||||
return nil, fmt.Errorf("cannot unmarshal IngressList from %q: %w", data, err)
|
||||
}
|
||||
return &il, nil
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ type Selector struct {
|
|||
func GetLabels(sdc *SDConfig, baseDir string) ([]map[string]string, error) {
|
||||
cfg, err := getAPIConfig(sdc, baseDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create API config: %s", err)
|
||||
return nil, fmt.Errorf("cannot create API config: %w", err)
|
||||
}
|
||||
switch sdc.Role {
|
||||
case "node":
|
||||
|
|
|
@ -11,11 +11,11 @@ import (
|
|||
func getNodesLabels(cfg *apiConfig) ([]map[string]string, error) {
|
||||
data, err := getAPIResponse(cfg, "node", "/api/v1/nodes")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain nodes data from API server: %s", err)
|
||||
return nil, fmt.Errorf("cannot obtain nodes data from API server: %w", err)
|
||||
}
|
||||
nl, err := parseNodeList(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse nodes response from API server: %s", err)
|
||||
return nil, fmt.Errorf("cannot parse nodes response from API server: %w", err)
|
||||
}
|
||||
var ms []map[string]string
|
||||
for _, n := range nl.Items {
|
||||
|
@ -67,7 +67,7 @@ type NodeDaemonEndpoints struct {
|
|||
func parseNodeList(data []byte) (*NodeList, error) {
|
||||
var nl NodeList
|
||||
if err := json.Unmarshal(data, &nl); err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal NodeList from %q: %s", data, err)
|
||||
return nil, fmt.Errorf("cannot unmarshal NodeList from %q: %w", data, err)
|
||||
}
|
||||
return &nl, nil
|
||||
}
|
||||
|
|
|
@ -47,11 +47,11 @@ func getPods(cfg *apiConfig) ([]Pod, error) {
|
|||
func getPodsByPath(cfg *apiConfig, path string) ([]Pod, error) {
|
||||
data, err := getAPIResponse(cfg, "pod", path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain pods data from API server: %s", err)
|
||||
return nil, fmt.Errorf("cannot obtain pods data from API server: %w", err)
|
||||
}
|
||||
pl, err := parsePodList(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse pods response from API server: %s", err)
|
||||
return nil, fmt.Errorf("cannot parse pods response from API server: %w", err)
|
||||
}
|
||||
return pl.Items, nil
|
||||
}
|
||||
|
@ -118,7 +118,7 @@ type PodCondition struct {
|
|||
func parsePodList(data []byte) (*PodList, error) {
|
||||
var pl PodList
|
||||
if err := json.Unmarshal(data, &pl); err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal PodList from %q: %s", data, err)
|
||||
return nil, fmt.Errorf("cannot unmarshal PodList from %q: %w", data, err)
|
||||
}
|
||||
return &pl, nil
|
||||
}
|
||||
|
|
|
@ -45,11 +45,11 @@ func getServices(cfg *apiConfig) ([]Service, error) {
|
|||
func getServicesByPath(cfg *apiConfig, path string) ([]Service, error) {
|
||||
data, err := getAPIResponse(cfg, "service", path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot obtain services data from API server: %s", err)
|
||||
return nil, fmt.Errorf("cannot obtain services data from API server: %w", err)
|
||||
}
|
||||
sl, err := parseServiceList(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse services response from API server: %s", err)
|
||||
return nil, fmt.Errorf("cannot parse services response from API server: %w", err)
|
||||
}
|
||||
return sl.Items, nil
|
||||
}
|
||||
|
@ -92,7 +92,7 @@ type ServicePort struct {
|
|||
func parseServiceList(data []byte) (*ServiceList, error) {
|
||||
var sl ServiceList
|
||||
if err := json.Unmarshal(data, &sl); err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal ServiceList from %q: %s", data, err)
|
||||
return nil, fmt.Errorf("cannot unmarshal ServiceList from %q: %w", data, err)
|
||||
}
|
||||
return &sl, nil
|
||||
}
|
||||
|
|
|
@ -112,13 +112,13 @@ func (c *Client) GetAPIResponse(path string) ([]byte, error) {
|
|||
var resp fasthttp.Response
|
||||
// There is no need in calling DoTimeout, since the timeout is already set in c.hc.ReadTimeout above.
|
||||
if err := c.hc.Do(&req, &resp); err != nil {
|
||||
return nil, fmt.Errorf("cannot fetch %q: %s", requestURL, err)
|
||||
return nil, fmt.Errorf("cannot fetch %q: %w", requestURL, err)
|
||||
}
|
||||
var data []byte
|
||||
if ce := resp.Header.Peek("Content-Encoding"); string(ce) == "gzip" {
|
||||
dst, err := fasthttp.AppendGunzipBytes(nil, resp.Body())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot ungzip response from %q: %s", requestURL, err)
|
||||
return nil, fmt.Errorf("cannot ungzip response from %q: %w", requestURL, err)
|
||||
}
|
||||
data = dst
|
||||
} else {
|
||||
|
|
|
@ -32,7 +32,7 @@ func TestScrapeWorkScrapeInternalFailure(t *testing.T) {
|
|||
var pushDataErr error
|
||||
sw.PushData = func(wr *prompbmarshal.WriteRequest) {
|
||||
if err := expectEqualTimeseries(wr.Timeseries, timeseriesExpected); err != nil {
|
||||
pushDataErr = fmt.Errorf("unexpected data pushed: %s\ngot\n%#v\nwant\n%#v", err, wr.Timeseries, timeseriesExpected)
|
||||
pushDataErr = fmt.Errorf("unexpected data pushed: %w\ngot\n%#v\nwant\n%#v", err, wr.Timeseries, timeseriesExpected)
|
||||
}
|
||||
pushDataCalls++
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ func TestScrapeWorkScrapeInternalSuccess(t *testing.T) {
|
|||
var pushDataErr error
|
||||
sw.PushData = func(wr *prompbmarshal.WriteRequest) {
|
||||
if err := expectEqualTimeseries(wr.Timeseries, timeseriesExpected); err != nil {
|
||||
pushDataErr = fmt.Errorf("unexpected data pushed: %s\ngot\n%#v\nwant\n%#v", err, wr.Timeseries, timeseriesExpected)
|
||||
pushDataErr = fmt.Errorf("unexpected data pushed: %w\ngot\n%#v\nwant\n%#v", err, wr.Timeseries, timeseriesExpected)
|
||||
}
|
||||
pushDataCalls++
|
||||
}
|
||||
|
@ -336,11 +336,11 @@ func parseData(data string) []prompbmarshal.TimeSeries {
|
|||
func expectEqualTimeseries(tss, tssExpected []prompbmarshal.TimeSeries) error {
|
||||
m, err := timeseriesToMap(tss)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid generated timeseries: %s", err)
|
||||
return fmt.Errorf("invalid generated timeseries: %w", err)
|
||||
}
|
||||
mExpected, err := timeseriesToMap(tssExpected)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid expected timeseries: %s", err)
|
||||
return fmt.Errorf("invalid expected timeseries: %w", err)
|
||||
}
|
||||
if len(m) != len(mExpected) {
|
||||
return fmt.Errorf("unexpected time series len; got %d; want %d", len(m), len(mExpected))
|
||||
|
|
|
@ -42,7 +42,7 @@ vm_tcplistener_write_calls_total{name="https", addr=":443"} 132356
|
|||
timestamp := int64(0)
|
||||
for pb.Next() {
|
||||
if err := sw.scrapeInternal(timestamp); err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %s", err))
|
||||
panic(fmt.Errorf("unexpected error: %w", err))
|
||||
}
|
||||
timestamp++
|
||||
}
|
||||
|
|
|
@ -63,7 +63,7 @@ func ParseColumnDescriptors(s string) ([]ColumnDescriptor, error) {
|
|||
}
|
||||
pos, err := strconv.Atoi(a[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse <column_pos> part from the entry #%d %q: %s", i+1, col, err)
|
||||
return nil, fmt.Errorf("cannot parse <column_pos> part from the entry #%d %q: %w", i+1, col, err)
|
||||
}
|
||||
if pos <= 0 {
|
||||
return nil, fmt.Errorf("<column_pos> cannot be smaller than 1; got %d for entry #%d %q", pos, i+1, col)
|
||||
|
@ -82,7 +82,7 @@ func ParseColumnDescriptors(s string) ([]ColumnDescriptor, error) {
|
|||
}
|
||||
parseTimestamp, err := parseTimeFormat(a[2])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse time format from the entry #%d %q: %s", i+1, col, err)
|
||||
return nil, fmt.Errorf("cannot parse time format from the entry #%d %q: %w", i+1, col, err)
|
||||
}
|
||||
cd.ParseTimestamp = parseTimestamp
|
||||
hasTimeCol = true
|
||||
|
@ -156,7 +156,7 @@ func parseUnixTimestampNanoseconds(s string) (int64, error) {
|
|||
func parseRFC3339(s string) (int64, error) {
|
||||
t, err := time.Parse(time.RFC3339, s)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse time in RFC3339 from %q: %s", s, err)
|
||||
return 0, fmt.Errorf("cannot parse time in RFC3339 from %q: %w", s, err)
|
||||
}
|
||||
return t.UnixNano() / 1e6, nil
|
||||
}
|
||||
|
@ -165,7 +165,7 @@ func newParseCustomTimeFunc(format string) func(s string) (int64, error) {
|
|||
return func(s string) (int64, error) {
|
||||
t, err := time.Parse(format, s)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse time in custom format %q from %q: %s", format, s, err)
|
||||
return 0, fmt.Errorf("cannot parse time in custom format %q from %q: %w", format, s, err)
|
||||
}
|
||||
return t.UnixNano() / 1e6, nil
|
||||
}
|
||||
|
|
|
@ -91,7 +91,7 @@ func parseRows(sc *scanner, dst []Row, tags []Tag, metrics []metric, cds []Colum
|
|||
if parseTimestamp := cd.ParseTimestamp; parseTimestamp != nil {
|
||||
timestamp, err := parseTimestamp(sc.Column)
|
||||
if err != nil {
|
||||
sc.Error = fmt.Errorf("cannot parse timestamp from %q: %s", sc.Column, err)
|
||||
sc.Error = fmt.Errorf("cannot parse timestamp from %q: %w", sc.Column, err)
|
||||
break
|
||||
}
|
||||
r.Timestamp = timestamp
|
||||
|
|
|
@ -30,13 +30,13 @@ func ParseStream(req *http.Request, callback func(rows []Row) error) error {
|
|||
format := q.Get("format")
|
||||
cds, err := ParseColumnDescriptors(format)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse the provided csv format: %s", err)
|
||||
return fmt.Errorf("cannot parse the provided csv format: %w", err)
|
||||
}
|
||||
r := req.Body
|
||||
if req.Header.Get("Content-Encoding") == "gzip" {
|
||||
zr, err := common.GetGzipReader(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read gzipped csv data: %s", err)
|
||||
return fmt.Errorf("cannot read gzipped csv data: %w", err)
|
||||
}
|
||||
defer common.PutGzipReader(zr)
|
||||
r = zr
|
||||
|
@ -60,7 +60,7 @@ func (ctx *streamContext) Read(r io.Reader, cds []ColumnDescriptor) bool {
|
|||
if ctx.err != nil {
|
||||
if ctx.err != io.EOF {
|
||||
readErrors.Inc()
|
||||
ctx.err = fmt.Errorf("cannot read csv data: %s", ctx.err)
|
||||
ctx.err = fmt.Errorf("cannot read csv data: %w", ctx.err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -75,7 +75,7 @@ func (r *Row) unmarshal(s string, tagsPool []Tag) ([]Tag, error) {
|
|||
var err error
|
||||
tagsPool, err = unmarshalTags(tagsPool, metricAndTags[n+1:])
|
||||
if err != nil {
|
||||
return tagsPool, fmt.Errorf("cannot umarshal tags: %s", err)
|
||||
return tagsPool, fmt.Errorf("cannot umarshal tags: %w", err)
|
||||
}
|
||||
tags := tagsPool[tagsStart:]
|
||||
r.Tags = tags[:len(tags):len(tags)]
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue