mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
all: use %w instead of %s for wrapping errors in fmt.Errorf
This will simplify examining the returned errors such as httpserver.ErrorWithStatusCode . See https://blog.golang.org/go1.13-errors for details.
This commit is contained in:
parent
586c5be404
commit
d5dddb0953
146 changed files with 826 additions and 826 deletions
|
@ -162,7 +162,7 @@ func getTLSConfig(argIdx int) (*tls.Config, error) {
|
||||||
}
|
}
|
||||||
cfg, err := promauth.NewConfig(".", nil, "", "", tlsConfig)
|
cfg, err := promauth.NewConfig(".", nil, "", "", tlsConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot populate TLS config: %s", err)
|
return nil, fmt.Errorf("cannot populate TLS config: %w", err)
|
||||||
}
|
}
|
||||||
tlsCfg := cfg.NewTLSConfig()
|
tlsCfg := cfg.NewTLSConfig()
|
||||||
return tlsCfg, nil
|
return tlsCfg, nil
|
||||||
|
|
|
@ -33,7 +33,7 @@ func loadRelabelConfigs() (*relabelConfigs, error) {
|
||||||
if *relabelConfigPathGlobal != "" {
|
if *relabelConfigPathGlobal != "" {
|
||||||
global, err := promrelabel.LoadRelabelConfigs(*relabelConfigPathGlobal)
|
global, err := promrelabel.LoadRelabelConfigs(*relabelConfigPathGlobal)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot load -remoteWrite.relabelConfig=%q: %s", *relabelConfigPathGlobal, err)
|
return nil, fmt.Errorf("cannot load -remoteWrite.relabelConfig=%q: %w", *relabelConfigPathGlobal, err)
|
||||||
}
|
}
|
||||||
rcs.global = global
|
rcs.global = global
|
||||||
}
|
}
|
||||||
|
@ -45,7 +45,7 @@ func loadRelabelConfigs() (*relabelConfigs, error) {
|
||||||
for i, path := range *relabelConfigPaths {
|
for i, path := range *relabelConfigPaths {
|
||||||
prc, err := promrelabel.LoadRelabelConfigs(path)
|
prc, err := promrelabel.LoadRelabelConfigs(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot load relabel configs from -remoteWrite.urlRelabelConfig=%q: %s", path, err)
|
return nil, fmt.Errorf("cannot load relabel configs from -remoteWrite.urlRelabelConfig=%q: %w", path, err)
|
||||||
}
|
}
|
||||||
rcs.perURL[i] = prc
|
rcs.perURL[i] = prc
|
||||||
}
|
}
|
||||||
|
|
|
@ -72,7 +72,7 @@ func (ar *AlertingRule) Exec(ctx context.Context, q datasource.Querier, series b
|
||||||
ar.lastExecError = err
|
ar.lastExecError = err
|
||||||
ar.lastExecTime = time.Now()
|
ar.lastExecTime = time.Now()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to execute query %q: %s", ar.Expr, err)
|
return nil, fmt.Errorf("failed to execute query %q: %w", ar.Expr, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for h, a := range ar.alerts {
|
for h, a := range ar.alerts {
|
||||||
|
@ -103,7 +103,7 @@ func (ar *AlertingRule) Exec(ctx context.Context, q datasource.Querier, series b
|
||||||
a, err := ar.newAlert(m, ar.lastExecTime)
|
a, err := ar.newAlert(m, ar.lastExecTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ar.lastExecError = err
|
ar.lastExecError = err
|
||||||
return nil, fmt.Errorf("failed to create alert: %s", err)
|
return nil, fmt.Errorf("failed to create alert: %w", err)
|
||||||
}
|
}
|
||||||
a.ID = h
|
a.ID = h
|
||||||
a.State = notifier.StatePending
|
a.State = notifier.StatePending
|
||||||
|
@ -363,7 +363,7 @@ func (ar *AlertingRule) Restore(ctx context.Context, q datasource.Querier, lookb
|
||||||
|
|
||||||
a, err := ar.newAlert(m, time.Unix(int64(m.Value), 0))
|
a, err := ar.newAlert(m, time.Unix(int64(m.Value), 0))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create alert: %s", err)
|
return fmt.Errorf("failed to create alert: %w", err)
|
||||||
}
|
}
|
||||||
a.ID = hash(m)
|
a.ID = hash(m)
|
||||||
a.State = notifier.StatePending
|
a.State = notifier.StatePending
|
||||||
|
|
|
@ -46,19 +46,19 @@ func (g *Group) Validate(validateAnnotations, validateExpressions bool) error {
|
||||||
}
|
}
|
||||||
uniqueRules[r.ID] = struct{}{}
|
uniqueRules[r.ID] = struct{}{}
|
||||||
if err := r.Validate(); err != nil {
|
if err := r.Validate(); err != nil {
|
||||||
return fmt.Errorf("invalid rule %q.%q: %s", g.Name, ruleName, err)
|
return fmt.Errorf("invalid rule %q.%q: %w", g.Name, ruleName, err)
|
||||||
}
|
}
|
||||||
if validateExpressions {
|
if validateExpressions {
|
||||||
if _, err := metricsql.Parse(r.Expr); err != nil {
|
if _, err := metricsql.Parse(r.Expr); err != nil {
|
||||||
return fmt.Errorf("invalid expression for rule %q.%q: %s", g.Name, ruleName, err)
|
return fmt.Errorf("invalid expression for rule %q.%q: %w", g.Name, ruleName, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if validateAnnotations {
|
if validateAnnotations {
|
||||||
if err := notifier.ValidateTemplates(r.Annotations); err != nil {
|
if err := notifier.ValidateTemplates(r.Annotations); err != nil {
|
||||||
return fmt.Errorf("invalid annotations for rule %q.%q: %s", g.Name, ruleName, err)
|
return fmt.Errorf("invalid annotations for rule %q.%q: %w", g.Name, ruleName, err)
|
||||||
}
|
}
|
||||||
if err := notifier.ValidateTemplates(r.Labels); err != nil {
|
if err := notifier.ValidateTemplates(r.Labels); err != nil {
|
||||||
return fmt.Errorf("invalid labels for rule %q.%q: %s", g.Name, ruleName, err)
|
return fmt.Errorf("invalid labels for rule %q.%q: %w", g.Name, ruleName, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -137,7 +137,7 @@ func Parse(pathPatterns []string, validateAnnotations, validateExpressions bool)
|
||||||
for _, pattern := range pathPatterns {
|
for _, pattern := range pathPatterns {
|
||||||
matches, err := filepath.Glob(pattern)
|
matches, err := filepath.Glob(pattern)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error reading file pattern %s: %v", pattern, err)
|
return nil, fmt.Errorf("error reading file pattern %s: %w", pattern, err)
|
||||||
}
|
}
|
||||||
fp = append(fp, matches...)
|
fp = append(fp, matches...)
|
||||||
}
|
}
|
||||||
|
@ -150,7 +150,7 @@ func Parse(pathPatterns []string, validateAnnotations, validateExpressions bool)
|
||||||
}
|
}
|
||||||
for _, g := range gr {
|
for _, g := range gr {
|
||||||
if err := g.Validate(validateAnnotations, validateExpressions); err != nil {
|
if err := g.Validate(validateAnnotations, validateExpressions); err != nil {
|
||||||
return nil, fmt.Errorf("invalid group %q in file %q: %s", g.Name, file, err)
|
return nil, fmt.Errorf("invalid group %q in file %q: %w", g.Name, file, err)
|
||||||
}
|
}
|
||||||
if _, ok := uniqueGroups[g.Name]; ok {
|
if _, ok := uniqueGroups[g.Name]; ok {
|
||||||
return nil, fmt.Errorf("group name %q duplicate in file %q", g.Name, file)
|
return nil, fmt.Errorf("group name %q duplicate in file %q", g.Name, file)
|
||||||
|
|
|
@ -31,7 +31,7 @@ func Init() (Querier, error) {
|
||||||
}
|
}
|
||||||
tr, err := utils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
|
tr, err := utils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create transport: %s", err)
|
return nil, fmt.Errorf("failed to create transport: %w", err)
|
||||||
}
|
}
|
||||||
c := &http.Client{Transport: tr}
|
c := &http.Client{Transport: tr}
|
||||||
return NewVMStorage(*addr, *basicAuthUsername, *basicAuthPassword, c), nil
|
return NewVMStorage(*addr, *basicAuthUsername, *basicAuthPassword, c), nil
|
||||||
|
|
|
@ -32,7 +32,7 @@ func (r response) metrics() ([]Metric, error) {
|
||||||
for i, res := range r.Data.Result {
|
for i, res := range r.Data.Result {
|
||||||
f, err = strconv.ParseFloat(res.TV[1].(string), 64)
|
f, err = strconv.ParseFloat(res.TV[1].(string), 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("metric %v, unable to parse float64 from %s: %s", res, res.TV[1], err)
|
return nil, fmt.Errorf("metric %v, unable to parse float64 from %s: %w", res, res.TV[1], err)
|
||||||
}
|
}
|
||||||
m.Labels = nil
|
m.Labels = nil
|
||||||
for k, v := range r.Data.Result[i].Labels {
|
for k, v := range r.Data.Result[i].Labels {
|
||||||
|
@ -80,25 +80,25 @@ func (s *VMStorage) Query(ctx context.Context, query string) ([]Metric, error) {
|
||||||
}
|
}
|
||||||
resp, err := s.c.Do(req.WithContext(ctx))
|
resp, err := s.c.Do(req.WithContext(ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error getting response from %s:%s", req.URL, err)
|
return nil, fmt.Errorf("error getting response from %s: %w", req.URL, err)
|
||||||
}
|
}
|
||||||
defer func() { _ = resp.Body.Close() }()
|
defer func() { _ = resp.Body.Close() }()
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
body, _ := ioutil.ReadAll(resp.Body)
|
body, _ := ioutil.ReadAll(resp.Body)
|
||||||
return nil, fmt.Errorf("datasource returns unxeprected response code %d for %s with err %s. Reponse body %s", resp.StatusCode, req.URL, err, body)
|
return nil, fmt.Errorf("datasource returns unxeprected response code %d for %s with err %w; reponse body: %s", resp.StatusCode, req.URL, err, body)
|
||||||
}
|
}
|
||||||
r := &response{}
|
r := &response{}
|
||||||
if err := json.NewDecoder(resp.Body).Decode(r); err != nil {
|
if err := json.NewDecoder(resp.Body).Decode(r); err != nil {
|
||||||
return nil, fmt.Errorf("error parsing metrics for %s:%s", req.URL, err)
|
return nil, fmt.Errorf("error parsing metrics for %s: %w", req.URL, err)
|
||||||
}
|
}
|
||||||
if r.Status == statusError {
|
if r.Status == statusError {
|
||||||
return nil, fmt.Errorf("response error, query: %s, errorType: %s, error: %s", req.URL, r.ErrorType, r.Error)
|
return nil, fmt.Errorf("response error, query: %s, errorType: %s, error: %s", req.URL, r.ErrorType, r.Error)
|
||||||
}
|
}
|
||||||
if r.Status != statusSuccess {
|
if r.Status != statusSuccess {
|
||||||
return nil, fmt.Errorf("unkown status:%s, Expected success or error ", r.Status)
|
return nil, fmt.Errorf("unknown status: %s, Expected success or error ", r.Status)
|
||||||
}
|
}
|
||||||
if r.Data.ResultType != rtVector {
|
if r.Data.ResultType != rtVector {
|
||||||
return nil, fmt.Errorf("unkown restul type:%s. Expected vector", r.Data.ResultType)
|
return nil, fmt.Errorf("unknown restul type:%s. Expected vector", r.Data.ResultType)
|
||||||
}
|
}
|
||||||
return r.metrics()
|
return r.metrics()
|
||||||
}
|
}
|
||||||
|
|
|
@ -84,7 +84,7 @@ func (g *Group) Restore(ctx context.Context, q datasource.Querier, lookback time
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err := rr.Restore(ctx, q, lookback); err != nil {
|
if err := rr.Restore(ctx, q, lookback); err != nil {
|
||||||
return fmt.Errorf("error while restoring rule %q: %s", rule, err)
|
return fmt.Errorf("error while restoring rule %q: %w", rule, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -251,7 +251,7 @@ func (e *executor) exec(ctx context.Context, rule Rule, returnSeries bool, inter
|
||||||
tss, err := rule.Exec(ctx, e.querier, returnSeries)
|
tss, err := rule.Exec(ctx, e.querier, returnSeries)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
execErrors.Inc()
|
execErrors.Inc()
|
||||||
return fmt.Errorf("rule %q: failed to execute: %s", rule, err)
|
return fmt.Errorf("rule %q: failed to execute: %w", rule, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(tss) > 0 && e.rw != nil {
|
if len(tss) > 0 && e.rw != nil {
|
||||||
|
@ -259,7 +259,7 @@ func (e *executor) exec(ctx context.Context, rule Rule, returnSeries bool, inter
|
||||||
for _, ts := range tss {
|
for _, ts := range tss {
|
||||||
if err := e.rw.Push(ts); err != nil {
|
if err := e.rw.Push(ts); err != nil {
|
||||||
remoteWriteErrors.Inc()
|
remoteWriteErrors.Inc()
|
||||||
return fmt.Errorf("rule %q: remote write failure: %s", rule, err)
|
return fmt.Errorf("rule %q: remote write failure: %w", rule, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -293,7 +293,7 @@ func (e *executor) exec(ctx context.Context, rule Rule, returnSeries bool, inter
|
||||||
for _, nt := range e.notifiers {
|
for _, nt := range e.notifiers {
|
||||||
if err := nt.Send(ctx, alerts); err != nil {
|
if err := nt.Send(ctx, alerts); err != nil {
|
||||||
alertsSendErrors.Inc()
|
alertsSendErrors.Inc()
|
||||||
errGr.Add(fmt.Errorf("rule %q: failed to send alerts: %s", rule, err))
|
errGr.Add(fmt.Errorf("rule %q: failed to send alerts: %w", rule, err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return errGr.Err()
|
return errGr.Err()
|
||||||
|
|
|
@ -105,20 +105,20 @@ var (
|
||||||
func newManager(ctx context.Context) (*manager, error) {
|
func newManager(ctx context.Context) (*manager, error) {
|
||||||
q, err := datasource.Init()
|
q, err := datasource.Init()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to init datasource: %s", err)
|
return nil, fmt.Errorf("failed to init datasource: %w", err)
|
||||||
}
|
}
|
||||||
eu, err := getExternalURL(*externalURL, *httpListenAddr, httpserver.IsTLS())
|
eu, err := getExternalURL(*externalURL, *httpListenAddr, httpserver.IsTLS())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to init `external.url`: %s", err)
|
return nil, fmt.Errorf("failed to init `external.url`: %w", err)
|
||||||
}
|
}
|
||||||
notifier.InitTemplateFunc(eu)
|
notifier.InitTemplateFunc(eu)
|
||||||
aug, err := getAlertURLGenerator(eu, *externalAlertSource, *validateTemplates)
|
aug, err := getAlertURLGenerator(eu, *externalAlertSource, *validateTemplates)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to init `external.alert.source`: %s", err)
|
return nil, fmt.Errorf("failed to init `external.alert.source`: %w", err)
|
||||||
}
|
}
|
||||||
nts, err := notifier.Init(aug)
|
nts, err := notifier.Init(aug)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to init notifier: %s", err)
|
return nil, fmt.Errorf("failed to init notifier: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
manager := &manager{
|
manager := &manager{
|
||||||
|
@ -128,13 +128,13 @@ func newManager(ctx context.Context) (*manager, error) {
|
||||||
}
|
}
|
||||||
rw, err := remotewrite.Init(ctx)
|
rw, err := remotewrite.Init(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to init remoteWrite: %s", err)
|
return nil, fmt.Errorf("failed to init remoteWrite: %w", err)
|
||||||
}
|
}
|
||||||
manager.rw = rw
|
manager.rw = rw
|
||||||
|
|
||||||
rr, err := remoteread.Init()
|
rr, err := remoteread.Init()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to init remoteRead: %s", err)
|
return nil, fmt.Errorf("failed to init remoteRead: %w", err)
|
||||||
}
|
}
|
||||||
manager.rr = rr
|
manager.rr = rr
|
||||||
return manager, nil
|
return manager, nil
|
||||||
|
@ -169,7 +169,7 @@ func getAlertURLGenerator(externalURL *url.URL, externalAlertSource string, vali
|
||||||
if err := notifier.ValidateTemplates(map[string]string{
|
if err := notifier.ValidateTemplates(map[string]string{
|
||||||
"tpl": externalAlertSource,
|
"tpl": externalAlertSource,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return nil, fmt.Errorf("error validating source template %s:%w", externalAlertSource, err)
|
return nil, fmt.Errorf("error validating source template %s: %w", externalAlertSource, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
m := map[string]string{
|
m := map[string]string{
|
||||||
|
|
|
@ -83,7 +83,7 @@ func (m *manager) update(ctx context.Context, path []string, validateTpl, valida
|
||||||
logger.Infof("reading rules configuration file from %q", strings.Join(path, ";"))
|
logger.Infof("reading rules configuration file from %q", strings.Join(path, ";"))
|
||||||
groupsCfg, err := config.Parse(path, validateTpl, validateExpr)
|
groupsCfg, err := config.Parse(path, validateTpl, validateExpr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot parse configuration file: %s", err)
|
return fmt.Errorf("cannot parse configuration file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
groupsRegistry := make(map[uint64]*Group)
|
groupsRegistry := make(map[uint64]*Group)
|
||||||
|
|
|
@ -89,7 +89,7 @@ func templateAnnotations(annotations map[string]string, header string, data aler
|
||||||
builder.WriteString(header)
|
builder.WriteString(header)
|
||||||
builder.WriteString(text)
|
builder.WriteString(text)
|
||||||
if err := templateAnnotation(&buf, builder.String(), data); err != nil {
|
if err := templateAnnotation(&buf, builder.String(), data); err != nil {
|
||||||
eg.Add(fmt.Errorf("key %q, template %q: %s", key, text, err))
|
eg.Add(fmt.Errorf("key %q, template %q: %w", key, text, err))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
r[key] = buf.String()
|
r[key] = buf.String()
|
||||||
|
|
|
@ -43,7 +43,7 @@ func (am *AlertManager) Send(ctx context.Context, alerts []Alert) error {
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
body, err := ioutil.ReadAll(resp.Body)
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to read response from %q: %s", am.alertURL, err)
|
return fmt.Errorf("failed to read response from %q: %w", am.alertURL, err)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("invalid SC %d from %q; response body: %s", resp.StatusCode, am.alertURL, string(body))
|
return fmt.Errorf("invalid SC %d from %q; response body: %s", resp.StatusCode, am.alertURL, string(body))
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,7 +36,7 @@ func Init(gen AlertURLGenerator) ([]Notifier, error) {
|
||||||
ca, serverName := tlsCAFile.GetOptionalArg(i), tlsServerName.GetOptionalArg(i)
|
ca, serverName := tlsCAFile.GetOptionalArg(i), tlsServerName.GetOptionalArg(i)
|
||||||
tr, err := utils.Transport(addr, cert, key, ca, serverName, *tlsInsecureSkipVerify)
|
tr, err := utils.Transport(addr, cert, key, ca, serverName, *tlsInsecureSkipVerify)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create transport: %s", err)
|
return nil, fmt.Errorf("failed to create transport: %w", err)
|
||||||
}
|
}
|
||||||
user, pass := basicAuthUsername.GetOptionalArg(i), basicAuthPassword.GetOptionalArg(i)
|
user, pass := basicAuthUsername.GetOptionalArg(i), basicAuthPassword.GetOptionalArg(i)
|
||||||
am := NewAlertManager(addr, user, pass, gen, &http.Client{Transport: tr})
|
am := NewAlertManager(addr, user, pass, gen, &http.Client{Transport: tr})
|
||||||
|
|
|
@ -71,7 +71,7 @@ func (rr *RecordingRule) Exec(ctx context.Context, q datasource.Querier, series
|
||||||
rr.lastExecTime = time.Now()
|
rr.lastExecTime = time.Now()
|
||||||
rr.lastExecError = err
|
rr.lastExecError = err
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to execute query %q: %s", rr.Expr, err)
|
return nil, fmt.Errorf("failed to execute query %q: %w", rr.Expr, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
duplicates := make(map[uint64]prompbmarshal.TimeSeries, len(qMetrics))
|
duplicates := make(map[uint64]prompbmarshal.TimeSeries, len(qMetrics))
|
||||||
|
|
|
@ -32,7 +32,7 @@ func Init() (datasource.Querier, error) {
|
||||||
}
|
}
|
||||||
tr, err := utils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
|
tr, err := utils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create transport: %s", err)
|
return nil, fmt.Errorf("failed to create transport: %w", err)
|
||||||
}
|
}
|
||||||
c := &http.Client{Transport: tr}
|
c := &http.Client{Transport: tr}
|
||||||
return datasource.NewVMStorage(*addr, *basicAuthUsername, *basicAuthPassword, c), nil
|
return datasource.NewVMStorage(*addr, *basicAuthUsername, *basicAuthPassword, c), nil
|
||||||
|
|
|
@ -38,7 +38,7 @@ func Init(ctx context.Context) (*Client, error) {
|
||||||
|
|
||||||
t, err := utils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
|
t, err := utils.Transport(*addr, *tlsCertFile, *tlsKeyFile, *tlsCAFile, *tlsServerName, *tlsInsecureSkipVerify)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create transport: %s", err)
|
return nil, fmt.Errorf("failed to create transport: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return NewClient(ctx, Config{
|
return NewClient(ctx, Config{
|
||||||
|
|
|
@ -30,7 +30,7 @@ func TLSConfig(certFile, keyFile, CAFile, serverName string, insecureSkipVerify
|
||||||
if certFile != "" {
|
if certFile != "" {
|
||||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot load TLS certificate from `cert_file`=%q, `key_file`=%q: %s", certFile, keyFile, err)
|
return nil, fmt.Errorf("cannot load TLS certificate from `cert_file`=%q, `key_file`=%q: %w", certFile, keyFile, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
certs = []tls.Certificate{cert}
|
certs = []tls.Certificate{cert}
|
||||||
|
@ -40,7 +40,7 @@ func TLSConfig(certFile, keyFile, CAFile, serverName string, insecureSkipVerify
|
||||||
if CAFile != "" {
|
if CAFile != "" {
|
||||||
pem, err := ioutil.ReadFile(CAFile)
|
pem, err := ioutil.ReadFile(CAFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot read `ca_file` %q: %s", CAFile, err)
|
return nil, fmt.Errorf("cannot read `ca_file` %q: %w", CAFile, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
rootCAs = x509.NewCertPool()
|
rootCAs = x509.NewCertPool()
|
||||||
|
|
|
@ -80,7 +80,7 @@ func (rh *requestHandler) listGroups() ([]byte, error) {
|
||||||
b, err := json.Marshal(lr)
|
b, err := json.Marshal(lr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, &httpserver.ErrorWithStatusCode{
|
return nil, &httpserver.ErrorWithStatusCode{
|
||||||
Err: fmt.Errorf(`error encoding list of active alerts: %s`, err),
|
Err: fmt.Errorf(`error encoding list of active alerts: %w`, err),
|
||||||
StatusCode: http.StatusInternalServerError,
|
StatusCode: http.StatusInternalServerError,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -117,7 +117,7 @@ func (rh *requestHandler) listAlerts() ([]byte, error) {
|
||||||
b, err := json.Marshal(lr)
|
b, err := json.Marshal(lr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, &httpserver.ErrorWithStatusCode{
|
return nil, &httpserver.ErrorWithStatusCode{
|
||||||
Err: fmt.Errorf(`error encoding list of active alerts: %s`, err),
|
Err: fmt.Errorf(`error encoding list of active alerts: %w`, err),
|
||||||
StatusCode: http.StatusInternalServerError,
|
StatusCode: http.StatusInternalServerError,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -138,11 +138,11 @@ func (rh *requestHandler) alert(path string) ([]byte, error) {
|
||||||
|
|
||||||
groupID, err := uint64FromPath(parts[0])
|
groupID, err := uint64FromPath(parts[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, badRequest(fmt.Errorf(`cannot parse groupID: %s`, err))
|
return nil, badRequest(fmt.Errorf(`cannot parse groupID: %w`, err))
|
||||||
}
|
}
|
||||||
alertID, err := uint64FromPath(parts[1])
|
alertID, err := uint64FromPath(parts[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, badRequest(fmt.Errorf(`cannot parse alertID: %s`, err))
|
return nil, badRequest(fmt.Errorf(`cannot parse alertID: %w`, err))
|
||||||
}
|
}
|
||||||
resp, err := rh.m.AlertAPI(groupID, alertID)
|
resp, err := rh.m.AlertAPI(groupID, alertID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -82,11 +82,11 @@ var stopCh chan struct{}
|
||||||
func readAuthConfig(path string) (map[string]*UserInfo, error) {
|
func readAuthConfig(path string) (map[string]*UserInfo, error) {
|
||||||
data, err := ioutil.ReadFile(path)
|
data, err := ioutil.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot read %q: %s", path, err)
|
return nil, fmt.Errorf("cannot read %q: %w", path, err)
|
||||||
}
|
}
|
||||||
m, err := parseAuthConfig(data)
|
m, err := parseAuthConfig(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse %q: %s", path, err)
|
return nil, fmt.Errorf("cannot parse %q: %w", path, err)
|
||||||
}
|
}
|
||||||
logger.Infof("Loaded information about %d users from %q", len(m), path)
|
logger.Infof("Loaded information about %d users from %q", len(m), path)
|
||||||
return m, nil
|
return m, nil
|
||||||
|
@ -95,7 +95,7 @@ func readAuthConfig(path string) (map[string]*UserInfo, error) {
|
||||||
func parseAuthConfig(data []byte) (map[string]*UserInfo, error) {
|
func parseAuthConfig(data []byte) (map[string]*UserInfo, error) {
|
||||||
var ac AuthConfig
|
var ac AuthConfig
|
||||||
if err := yaml.UnmarshalStrict(data, &ac); err != nil {
|
if err := yaml.UnmarshalStrict(data, &ac); err != nil {
|
||||||
return nil, fmt.Errorf("cannot unmarshal AuthConfig data: %s", err)
|
return nil, fmt.Errorf("cannot unmarshal AuthConfig data: %w", err)
|
||||||
}
|
}
|
||||||
uis := ac.Users
|
uis := ac.Users
|
||||||
if len(uis) == 0 {
|
if len(uis) == 0 {
|
||||||
|
@ -115,7 +115,7 @@ func parseAuthConfig(data []byte) (map[string]*UserInfo, error) {
|
||||||
// Validate urlPrefix
|
// Validate urlPrefix
|
||||||
target, err := url.Parse(urlPrefix)
|
target, err := url.Parse(urlPrefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid `url_prefix: %q`: %s", urlPrefix, err)
|
return nil, fmt.Errorf("invalid `url_prefix: %q`: %w", urlPrefix, err)
|
||||||
}
|
}
|
||||||
if target.Scheme != "http" && target.Scheme != "https" {
|
if target.Scheme != "http" && target.Scheme != "https" {
|
||||||
return nil, fmt.Errorf("unsupported scheme for `url_prefix: %q`: %q; must be `http` or `https`", urlPrefix, target.Scheme)
|
return nil, fmt.Errorf("unsupported scheme for `url_prefix: %q`: %q; must be `http` or `https`", urlPrefix, target.Scheme)
|
||||||
|
|
|
@ -110,12 +110,12 @@ func newSrcFS() (*fslocal.FS, error) {
|
||||||
// Verify the snapshot exists.
|
// Verify the snapshot exists.
|
||||||
f, err := os.Open(snapshotPath)
|
f, err := os.Open(snapshotPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot open snapshot at %q: %s", snapshotPath, err)
|
return nil, fmt.Errorf("cannot open snapshot at %q: %w", snapshotPath, err)
|
||||||
}
|
}
|
||||||
fi, err := f.Stat()
|
fi, err := f.Stat()
|
||||||
_ = f.Close()
|
_ = f.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot stat %q: %s", snapshotPath, err)
|
return nil, fmt.Errorf("cannot stat %q: %w", snapshotPath, err)
|
||||||
}
|
}
|
||||||
if !fi.IsDir() {
|
if !fi.IsDir() {
|
||||||
return nil, fmt.Errorf("snapshot %q must be a directory", snapshotPath)
|
return nil, fmt.Errorf("snapshot %q must be a directory", snapshotPath)
|
||||||
|
@ -126,7 +126,7 @@ func newSrcFS() (*fslocal.FS, error) {
|
||||||
MaxBytesPerSecond: *maxBytesPerSecond,
|
MaxBytesPerSecond: *maxBytesPerSecond,
|
||||||
}
|
}
|
||||||
if err := fs.Init(); err != nil {
|
if err := fs.Init(); err != nil {
|
||||||
return nil, fmt.Errorf("cannot initialize fs: %s", err)
|
return nil, fmt.Errorf("cannot initialize fs: %w", err)
|
||||||
}
|
}
|
||||||
return fs, nil
|
return fs, nil
|
||||||
}
|
}
|
||||||
|
@ -134,7 +134,7 @@ func newSrcFS() (*fslocal.FS, error) {
|
||||||
func newDstFS() (common.RemoteFS, error) {
|
func newDstFS() (common.RemoteFS, error) {
|
||||||
fs, err := actions.NewRemoteFS(*dst)
|
fs, err := actions.NewRemoteFS(*dst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse `-dst`=%q: %s", *dst, err)
|
return nil, fmt.Errorf("cannot parse `-dst`=%q: %w", *dst, err)
|
||||||
}
|
}
|
||||||
return fs, nil
|
return fs, nil
|
||||||
}
|
}
|
||||||
|
@ -145,7 +145,7 @@ func newOriginFS() (common.RemoteFS, error) {
|
||||||
}
|
}
|
||||||
fs, err := actions.NewRemoteFS(*origin)
|
fs, err := actions.NewRemoteFS(*origin)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse `-origin`=%q: %s", *origin, err)
|
return nil, fmt.Errorf("cannot parse `-origin`=%q: %w", *origin, err)
|
||||||
}
|
}
|
||||||
return fs, nil
|
return fs, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -122,7 +122,7 @@ func (ctx *InsertCtx) AddLabel(name, value string) {
|
||||||
func (ctx *InsertCtx) FlushBufs() error {
|
func (ctx *InsertCtx) FlushBufs() error {
|
||||||
if err := vmstorage.AddRows(ctx.mrs); err != nil {
|
if err := vmstorage.AddRows(ctx.mrs); err != nil {
|
||||||
return &httpserver.ErrorWithStatusCode{
|
return &httpserver.ErrorWithStatusCode{
|
||||||
Err: fmt.Errorf("cannot store metrics: %s", err),
|
Err: fmt.Errorf("cannot store metrics: %w", err),
|
||||||
StatusCode: http.StatusServiceUnavailable,
|
StatusCode: http.StatusServiceUnavailable,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -71,7 +71,7 @@ func newDstFS() (*fslocal.FS, error) {
|
||||||
MaxBytesPerSecond: *maxBytesPerSecond,
|
MaxBytesPerSecond: *maxBytesPerSecond,
|
||||||
}
|
}
|
||||||
if err := fs.Init(); err != nil {
|
if err := fs.Init(); err != nil {
|
||||||
return nil, fmt.Errorf("cannot initialize local fs: %s", err)
|
return nil, fmt.Errorf("cannot initialize local fs: %w", err)
|
||||||
}
|
}
|
||||||
return fs, nil
|
return fs, nil
|
||||||
}
|
}
|
||||||
|
@ -79,7 +79,7 @@ func newDstFS() (*fslocal.FS, error) {
|
||||||
func newSrcFS() (common.RemoteFS, error) {
|
func newSrcFS() (common.RemoteFS, error) {
|
||||||
fs, err := actions.NewRemoteFS(*src)
|
fs, err := actions.NewRemoteFS(*src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse `-src`=%q: %s", *src, err)
|
return nil, fmt.Errorf("cannot parse `-src`=%q: %w", *src, err)
|
||||||
}
|
}
|
||||||
return fs, nil
|
return fs, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -98,7 +98,7 @@ func timeseriesWorker(workerID uint) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err := tsw.pts.Unpack(&rs, rss.tr, rss.fetchData); err != nil {
|
if err := tsw.pts.Unpack(&rs, rss.tr, rss.fetchData); err != nil {
|
||||||
tsw.doneCh <- fmt.Errorf("error during time series unpacking: %s", err)
|
tsw.doneCh <- fmt.Errorf("error during time series unpacking: %w", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if len(rs.Timestamps) > 0 || !rss.fetchData {
|
if len(rs.Timestamps) > 0 || !rss.fetchData {
|
||||||
|
@ -187,7 +187,7 @@ func unpackWorker() {
|
||||||
sb := getSortBlock()
|
sb := getSortBlock()
|
||||||
if err := sb.unpackFrom(upw.br, upw.tr, upw.fetchData); err != nil {
|
if err := sb.unpackFrom(upw.br, upw.tr, upw.fetchData); err != nil {
|
||||||
putSortBlock(sb)
|
putSortBlock(sb)
|
||||||
upw.doneCh <- fmt.Errorf("cannot unpack block: %s", err)
|
upw.doneCh <- fmt.Errorf("cannot unpack block: %w", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
upw.sb = sb
|
upw.sb = sb
|
||||||
|
@ -200,7 +200,7 @@ func (pts *packedTimeseries) Unpack(dst *Result, tr storage.TimeRange, fetchData
|
||||||
dst.reset()
|
dst.reset()
|
||||||
|
|
||||||
if err := dst.MetricName.Unmarshal(bytesutil.ToUnsafeBytes(pts.metricName)); err != nil {
|
if err := dst.MetricName.Unmarshal(bytesutil.ToUnsafeBytes(pts.metricName)); err != nil {
|
||||||
return fmt.Errorf("cannot unmarshal metricName %q: %s", pts.metricName, err)
|
return fmt.Errorf("cannot unmarshal metricName %q: %w", pts.metricName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Feed workers with work
|
// Feed workers with work
|
||||||
|
@ -329,7 +329,7 @@ func (sb *sortBlock) unpackFrom(br storage.BlockRef, tr storage.TimeRange, fetch
|
||||||
br.MustReadBlock(&sb.b, fetchData)
|
br.MustReadBlock(&sb.b, fetchData)
|
||||||
if fetchData {
|
if fetchData {
|
||||||
if err := sb.b.UnmarshalData(); err != nil {
|
if err := sb.b.UnmarshalData(); err != nil {
|
||||||
return fmt.Errorf("cannot unmarshal block: %s", err)
|
return fmt.Errorf("cannot unmarshal block: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
timestamps := sb.b.Timestamps()
|
timestamps := sb.b.Timestamps()
|
||||||
|
@ -398,7 +398,7 @@ func DeleteSeries(sq *storage.SearchQuery) (int, error) {
|
||||||
func GetLabels(deadline Deadline) ([]string, error) {
|
func GetLabels(deadline Deadline) ([]string, error) {
|
||||||
labels, err := vmstorage.SearchTagKeys(*maxTagKeysPerSearch)
|
labels, err := vmstorage.SearchTagKeys(*maxTagKeysPerSearch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error during labels search: %s", err)
|
return nil, fmt.Errorf("error during labels search: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Substitute "" with "__name__"
|
// Substitute "" with "__name__"
|
||||||
|
@ -424,7 +424,7 @@ func GetLabelValues(labelName string, deadline Deadline) ([]string, error) {
|
||||||
// Search for tag values
|
// Search for tag values
|
||||||
labelValues, err := vmstorage.SearchTagValues([]byte(labelName), *maxTagValuesPerSearch)
|
labelValues, err := vmstorage.SearchTagValues([]byte(labelName), *maxTagValuesPerSearch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error during label values search for labelName=%q: %s", labelName, err)
|
return nil, fmt.Errorf("error during label values search for labelName=%q: %w", labelName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sort labelValues like Prometheus does
|
// Sort labelValues like Prometheus does
|
||||||
|
@ -437,7 +437,7 @@ func GetLabelValues(labelName string, deadline Deadline) ([]string, error) {
|
||||||
func GetLabelEntries(deadline Deadline) ([]storage.TagEntry, error) {
|
func GetLabelEntries(deadline Deadline) ([]storage.TagEntry, error) {
|
||||||
labelEntries, err := vmstorage.SearchTagEntries(*maxTagKeysPerSearch, *maxTagValuesPerSearch)
|
labelEntries, err := vmstorage.SearchTagEntries(*maxTagKeysPerSearch, *maxTagValuesPerSearch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error during label entries request: %s", err)
|
return nil, fmt.Errorf("error during label entries request: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Substitute "" with "__name__"
|
// Substitute "" with "__name__"
|
||||||
|
@ -464,7 +464,7 @@ func GetLabelEntries(deadline Deadline) ([]storage.TagEntry, error) {
|
||||||
func GetTSDBStatusForDate(deadline Deadline, date uint64, topN int) (*storage.TSDBStatus, error) {
|
func GetTSDBStatusForDate(deadline Deadline, date uint64, topN int) (*storage.TSDBStatus, error) {
|
||||||
status, err := vmstorage.GetTSDBStatusForDate(date, topN)
|
status, err := vmstorage.GetTSDBStatusForDate(date, topN)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error during tsdb status request: %s", err)
|
return nil, fmt.Errorf("error during tsdb status request: %w", err)
|
||||||
}
|
}
|
||||||
return status, nil
|
return status, nil
|
||||||
}
|
}
|
||||||
|
@ -473,7 +473,7 @@ func GetTSDBStatusForDate(deadline Deadline, date uint64, topN int) (*storage.TS
|
||||||
func GetSeriesCount(deadline Deadline) (uint64, error) {
|
func GetSeriesCount(deadline Deadline) (uint64, error) {
|
||||||
n, err := vmstorage.GetSeriesCount()
|
n, err := vmstorage.GetSeriesCount()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("error during series count request: %s", err)
|
return 0, fmt.Errorf("error during series count request: %w", err)
|
||||||
}
|
}
|
||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
|
@ -529,7 +529,7 @@ func ProcessSearchQuery(sq *storage.SearchQuery, fetchData bool, deadline Deadli
|
||||||
m[string(metricName)] = append(brs, *sr.MetricBlockRef.BlockRef)
|
m[string(metricName)] = append(brs, *sr.MetricBlockRef.BlockRef)
|
||||||
}
|
}
|
||||||
if err := sr.Error(); err != nil {
|
if err := sr.Error(); err != nil {
|
||||||
return nil, fmt.Errorf("search error after reading %d data blocks: %s", blocksRead, err)
|
return nil, fmt.Errorf("search error after reading %d data blocks: %w", blocksRead, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var rss Results
|
var rss Results
|
||||||
|
@ -555,7 +555,7 @@ func setupTfss(tagFilterss [][]storage.TagFilter) ([]*storage.TagFilters, error)
|
||||||
for i := range tagFilters {
|
for i := range tagFilters {
|
||||||
tf := &tagFilters[i]
|
tf := &tagFilters[i]
|
||||||
if err := tfs.Add(tf.Key, tf.Value, tf.IsNegative, tf.IsRegexp); err != nil {
|
if err := tfs.Add(tf.Key, tf.Value, tf.IsNegative, tf.IsRegexp); err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse tag filter %s: %s", tf, err)
|
return nil, fmt.Errorf("cannot parse tag filter %s: %w", tf, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
tfss = append(tfss, tfs)
|
tfss = append(tfss, tfs)
|
||||||
|
|
|
@ -46,7 +46,7 @@ const defaultStep = 5 * 60 * 1000
|
||||||
func FederateHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
func FederateHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
||||||
ct := currentTime()
|
ct := currentTime()
|
||||||
if err := r.ParseForm(); err != nil {
|
if err := r.ParseForm(); err != nil {
|
||||||
return fmt.Errorf("cannot parse request form values: %s", err)
|
return fmt.Errorf("cannot parse request form values: %w", err)
|
||||||
}
|
}
|
||||||
matches := r.Form["match[]"]
|
matches := r.Form["match[]"]
|
||||||
if len(matches) == 0 {
|
if len(matches) == 0 {
|
||||||
|
@ -82,7 +82,7 @@ func FederateHandler(startTime time.Time, w http.ResponseWriter, r *http.Request
|
||||||
}
|
}
|
||||||
rss, err := netstorage.ProcessSearchQuery(sq, true, deadline)
|
rss, err := netstorage.ProcessSearchQuery(sq, true, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot fetch data for %q: %s", sq, err)
|
return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
resultsCh := make(chan *quicktemplate.ByteBuffer)
|
resultsCh := make(chan *quicktemplate.ByteBuffer)
|
||||||
|
@ -105,7 +105,7 @@ func FederateHandler(startTime time.Time, w http.ResponseWriter, r *http.Request
|
||||||
|
|
||||||
err = <-doneCh
|
err = <-doneCh
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error during data fetching: %s", err)
|
return fmt.Errorf("error during data fetching: %w", err)
|
||||||
}
|
}
|
||||||
federateDuration.UpdateDuration(startTime)
|
federateDuration.UpdateDuration(startTime)
|
||||||
return nil
|
return nil
|
||||||
|
@ -117,7 +117,7 @@ var federateDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/fe
|
||||||
func ExportHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
func ExportHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
||||||
ct := currentTime()
|
ct := currentTime()
|
||||||
if err := r.ParseForm(); err != nil {
|
if err := r.ParseForm(); err != nil {
|
||||||
return fmt.Errorf("cannot parse request form values: %s", err)
|
return fmt.Errorf("cannot parse request form values: %w", err)
|
||||||
}
|
}
|
||||||
matches := r.Form["match[]"]
|
matches := r.Form["match[]"]
|
||||||
if len(matches) == 0 {
|
if len(matches) == 0 {
|
||||||
|
@ -143,7 +143,7 @@ func ExportHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
||||||
end = start + defaultStep
|
end = start + defaultStep
|
||||||
}
|
}
|
||||||
if err := exportHandler(w, matches, start, end, format, maxRowsPerLine, deadline); err != nil {
|
if err := exportHandler(w, matches, start, end, format, maxRowsPerLine, deadline); err != nil {
|
||||||
return fmt.Errorf("error when exporting data for queries=%q on the time range (start=%d, end=%d): %s", matches, start, end, err)
|
return fmt.Errorf("error when exporting data for queries=%q on the time range (start=%d, end=%d): %w", matches, start, end, err)
|
||||||
}
|
}
|
||||||
exportDuration.UpdateDuration(startTime)
|
exportDuration.UpdateDuration(startTime)
|
||||||
return nil
|
return nil
|
||||||
|
@ -202,7 +202,7 @@ func exportHandler(w http.ResponseWriter, matches []string, start, end int64, fo
|
||||||
}
|
}
|
||||||
rss, err := netstorage.ProcessSearchQuery(sq, true, deadline)
|
rss, err := netstorage.ProcessSearchQuery(sq, true, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot fetch data for %q: %s", sq, err)
|
return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
resultsCh := make(chan *quicktemplate.ByteBuffer, runtime.GOMAXPROCS(-1))
|
resultsCh := make(chan *quicktemplate.ByteBuffer, runtime.GOMAXPROCS(-1))
|
||||||
|
@ -227,7 +227,7 @@ func exportHandler(w http.ResponseWriter, matches []string, start, end int64, fo
|
||||||
}
|
}
|
||||||
err = <-doneCh
|
err = <-doneCh
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error during data fetching: %s", err)
|
return fmt.Errorf("error during data fetching: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -237,7 +237,7 @@ func exportHandler(w http.ResponseWriter, matches []string, start, end int64, fo
|
||||||
// See https://prometheus.io/docs/prometheus/latest/querying/api/#delete-series
|
// See https://prometheus.io/docs/prometheus/latest/querying/api/#delete-series
|
||||||
func DeleteHandler(startTime time.Time, r *http.Request) error {
|
func DeleteHandler(startTime time.Time, r *http.Request) error {
|
||||||
if err := r.ParseForm(); err != nil {
|
if err := r.ParseForm(); err != nil {
|
||||||
return fmt.Errorf("cannot parse request form values: %s", err)
|
return fmt.Errorf("cannot parse request form values: %w", err)
|
||||||
}
|
}
|
||||||
if r.FormValue("start") != "" || r.FormValue("end") != "" {
|
if r.FormValue("start") != "" || r.FormValue("end") != "" {
|
||||||
return fmt.Errorf("start and end aren't supported. Remove these args from the query in order to delete all the matching metrics")
|
return fmt.Errorf("start and end aren't supported. Remove these args from the query in order to delete all the matching metrics")
|
||||||
|
@ -255,7 +255,7 @@ func DeleteHandler(startTime time.Time, r *http.Request) error {
|
||||||
}
|
}
|
||||||
deletedCount, err := netstorage.DeleteSeries(sq)
|
deletedCount, err := netstorage.DeleteSeries(sq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot delete time series matching %q: %s", matches, err)
|
return fmt.Errorf("cannot delete time series matching %q: %w", matches, err)
|
||||||
}
|
}
|
||||||
if deletedCount > 0 {
|
if deletedCount > 0 {
|
||||||
promql.ResetRollupResultCache()
|
promql.ResetRollupResultCache()
|
||||||
|
@ -273,14 +273,14 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr
|
||||||
deadline := getDeadlineForQuery(r)
|
deadline := getDeadlineForQuery(r)
|
||||||
|
|
||||||
if err := r.ParseForm(); err != nil {
|
if err := r.ParseForm(); err != nil {
|
||||||
return fmt.Errorf("cannot parse form values: %s", err)
|
return fmt.Errorf("cannot parse form values: %w", err)
|
||||||
}
|
}
|
||||||
var labelValues []string
|
var labelValues []string
|
||||||
if len(r.Form["match[]"]) == 0 && len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
|
if len(r.Form["match[]"]) == 0 && len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
|
||||||
var err error
|
var err error
|
||||||
labelValues, err = netstorage.GetLabelValues(labelName, deadline)
|
labelValues, err = netstorage.GetLabelValues(labelName, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf(`cannot obtain label values for %q: %s`, labelName, err)
|
return fmt.Errorf(`cannot obtain label values for %q: %w`, labelName, err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Extended functionality that allows filtering by label filters and time range
|
// Extended functionality that allows filtering by label filters and time range
|
||||||
|
@ -302,7 +302,7 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr
|
||||||
}
|
}
|
||||||
labelValues, err = labelValuesWithMatches(labelName, matches, start, end, deadline)
|
labelValues, err = labelValuesWithMatches(labelName, matches, start, end, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot obtain label values for %q, match[]=%q, start=%d, end=%d: %s", labelName, matches, start, end, err)
|
return fmt.Errorf("cannot obtain label values for %q, match[]=%q, start=%d, end=%d: %w", labelName, matches, start, end, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -343,7 +343,7 @@ func labelValuesWithMatches(labelName string, matches []string, start, end int64
|
||||||
}
|
}
|
||||||
rss, err := netstorage.ProcessSearchQuery(sq, false, deadline)
|
rss, err := netstorage.ProcessSearchQuery(sq, false, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot fetch data for %q: %s", sq, err)
|
return nil, fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
m := make(map[string]struct{})
|
m := make(map[string]struct{})
|
||||||
|
@ -358,7 +358,7 @@ func labelValuesWithMatches(labelName string, matches []string, start, end int64
|
||||||
mLock.Unlock()
|
mLock.Unlock()
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error when data fetching: %s", err)
|
return nil, fmt.Errorf("error when data fetching: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
labelValues := make([]string, 0, len(m))
|
labelValues := make([]string, 0, len(m))
|
||||||
|
@ -376,7 +376,7 @@ func LabelsCountHandler(startTime time.Time, w http.ResponseWriter, r *http.Requ
|
||||||
deadline := getDeadlineForQuery(r)
|
deadline := getDeadlineForQuery(r)
|
||||||
labelEntries, err := netstorage.GetLabelEntries(deadline)
|
labelEntries, err := netstorage.GetLabelEntries(deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf(`cannot obtain label entries: %s`, err)
|
return fmt.Errorf(`cannot obtain label entries: %w`, err)
|
||||||
}
|
}
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
WriteLabelsCountResponse(w, labelEntries)
|
WriteLabelsCountResponse(w, labelEntries)
|
||||||
|
@ -394,14 +394,14 @@ const secsPerDay = 3600 * 24
|
||||||
func TSDBStatusHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
func TSDBStatusHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
||||||
deadline := getDeadlineForQuery(r)
|
deadline := getDeadlineForQuery(r)
|
||||||
if err := r.ParseForm(); err != nil {
|
if err := r.ParseForm(); err != nil {
|
||||||
return fmt.Errorf("cannot parse form values: %s", err)
|
return fmt.Errorf("cannot parse form values: %w", err)
|
||||||
}
|
}
|
||||||
date := fasttime.UnixDate()
|
date := fasttime.UnixDate()
|
||||||
dateStr := r.FormValue("date")
|
dateStr := r.FormValue("date")
|
||||||
if len(dateStr) > 0 {
|
if len(dateStr) > 0 {
|
||||||
t, err := time.Parse("2006-01-02", dateStr)
|
t, err := time.Parse("2006-01-02", dateStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot parse `date` arg %q: %s", dateStr, err)
|
return fmt.Errorf("cannot parse `date` arg %q: %w", dateStr, err)
|
||||||
}
|
}
|
||||||
date = uint64(t.Unix()) / secsPerDay
|
date = uint64(t.Unix()) / secsPerDay
|
||||||
}
|
}
|
||||||
|
@ -410,7 +410,7 @@ func TSDBStatusHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque
|
||||||
if len(topNStr) > 0 {
|
if len(topNStr) > 0 {
|
||||||
n, err := strconv.Atoi(topNStr)
|
n, err := strconv.Atoi(topNStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot parse `topN` arg %q: %s", topNStr, err)
|
return fmt.Errorf("cannot parse `topN` arg %q: %w", topNStr, err)
|
||||||
}
|
}
|
||||||
if n <= 0 {
|
if n <= 0 {
|
||||||
n = 1
|
n = 1
|
||||||
|
@ -422,7 +422,7 @@ func TSDBStatusHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque
|
||||||
}
|
}
|
||||||
status, err := netstorage.GetTSDBStatusForDate(deadline, date, topN)
|
status, err := netstorage.GetTSDBStatusForDate(deadline, date, topN)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf(`cannot obtain tsdb status for date=%d, topN=%d: %s`, date, topN, err)
|
return fmt.Errorf(`cannot obtain tsdb status for date=%d, topN=%d: %w`, date, topN, err)
|
||||||
}
|
}
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
WriteTSDBStatusResponse(w, status)
|
WriteTSDBStatusResponse(w, status)
|
||||||
|
@ -439,14 +439,14 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
||||||
deadline := getDeadlineForQuery(r)
|
deadline := getDeadlineForQuery(r)
|
||||||
|
|
||||||
if err := r.ParseForm(); err != nil {
|
if err := r.ParseForm(); err != nil {
|
||||||
return fmt.Errorf("cannot parse form values: %s", err)
|
return fmt.Errorf("cannot parse form values: %w", err)
|
||||||
}
|
}
|
||||||
var labels []string
|
var labels []string
|
||||||
if len(r.Form["match[]"]) == 0 && len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
|
if len(r.Form["match[]"]) == 0 && len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
|
||||||
var err error
|
var err error
|
||||||
labels, err = netstorage.GetLabels(deadline)
|
labels, err = netstorage.GetLabels(deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot obtain labels: %s", err)
|
return fmt.Errorf("cannot obtain labels: %w", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Extended functionality that allows filtering by label filters and time range
|
// Extended functionality that allows filtering by label filters and time range
|
||||||
|
@ -466,7 +466,7 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
||||||
}
|
}
|
||||||
labels, err = labelsWithMatches(matches, start, end, deadline)
|
labels, err = labelsWithMatches(matches, start, end, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot obtain labels for match[]=%q, start=%d, end=%d: %s", matches, start, end, err)
|
return fmt.Errorf("cannot obtain labels for match[]=%q, start=%d, end=%d: %w", matches, start, end, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -494,7 +494,7 @@ func labelsWithMatches(matches []string, start, end int64, deadline netstorage.D
|
||||||
}
|
}
|
||||||
rss, err := netstorage.ProcessSearchQuery(sq, false, deadline)
|
rss, err := netstorage.ProcessSearchQuery(sq, false, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot fetch data for %q: %s", sq, err)
|
return nil, fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
m := make(map[string]struct{})
|
m := make(map[string]struct{})
|
||||||
|
@ -510,7 +510,7 @@ func labelsWithMatches(matches []string, start, end int64, deadline netstorage.D
|
||||||
mLock.Unlock()
|
mLock.Unlock()
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error when data fetching: %s", err)
|
return nil, fmt.Errorf("error when data fetching: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
labels := make([]string, 0, len(m))
|
labels := make([]string, 0, len(m))
|
||||||
|
@ -528,7 +528,7 @@ func SeriesCountHandler(startTime time.Time, w http.ResponseWriter, r *http.Requ
|
||||||
deadline := getDeadlineForQuery(r)
|
deadline := getDeadlineForQuery(r)
|
||||||
n, err := netstorage.GetSeriesCount(deadline)
|
n, err := netstorage.GetSeriesCount(deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot obtain series count: %s", err)
|
return fmt.Errorf("cannot obtain series count: %w", err)
|
||||||
}
|
}
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
WriteSeriesCountResponse(w, n)
|
WriteSeriesCountResponse(w, n)
|
||||||
|
@ -545,7 +545,7 @@ func SeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
||||||
ct := currentTime()
|
ct := currentTime()
|
||||||
|
|
||||||
if err := r.ParseForm(); err != nil {
|
if err := r.ParseForm(); err != nil {
|
||||||
return fmt.Errorf("cannot parse form values: %s", err)
|
return fmt.Errorf("cannot parse form values: %w", err)
|
||||||
}
|
}
|
||||||
matches := r.Form["match[]"]
|
matches := r.Form["match[]"]
|
||||||
if len(matches) == 0 {
|
if len(matches) == 0 {
|
||||||
|
@ -580,7 +580,7 @@ func SeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
||||||
}
|
}
|
||||||
rss, err := netstorage.ProcessSearchQuery(sq, false, deadline)
|
rss, err := netstorage.ProcessSearchQuery(sq, false, deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot fetch data for %q: %s", sq, err)
|
return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
resultsCh := make(chan *quicktemplate.ByteBuffer)
|
resultsCh := make(chan *quicktemplate.ByteBuffer)
|
||||||
|
@ -605,7 +605,7 @@ func SeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
||||||
}
|
}
|
||||||
err = <-doneCh
|
err = <-doneCh
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error during data fetching: %s", err)
|
return fmt.Errorf("error during data fetching: %w", err)
|
||||||
}
|
}
|
||||||
seriesDuration.UpdateDuration(startTime)
|
seriesDuration.UpdateDuration(startTime)
|
||||||
return nil
|
return nil
|
||||||
|
@ -652,17 +652,17 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e
|
||||||
if childQuery, windowStr, offsetStr := promql.IsMetricSelectorWithRollup(query); childQuery != "" {
|
if childQuery, windowStr, offsetStr := promql.IsMetricSelectorWithRollup(query); childQuery != "" {
|
||||||
window, err := parsePositiveDuration(windowStr, step)
|
window, err := parsePositiveDuration(windowStr, step)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot parse window: %s", err)
|
return fmt.Errorf("cannot parse window: %w", err)
|
||||||
}
|
}
|
||||||
offset, err := parseDuration(offsetStr, step)
|
offset, err := parseDuration(offsetStr, step)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot parse offset: %s", err)
|
return fmt.Errorf("cannot parse offset: %w", err)
|
||||||
}
|
}
|
||||||
start -= offset
|
start -= offset
|
||||||
end := start
|
end := start
|
||||||
start = end - window
|
start = end - window
|
||||||
if err := exportHandler(w, []string{childQuery}, start, end, "promapi", 0, deadline); err != nil {
|
if err := exportHandler(w, []string{childQuery}, start, end, "promapi", 0, deadline); err != nil {
|
||||||
return fmt.Errorf("error when exporting data for query=%q on the time range (start=%d, end=%d): %s", childQuery, start, end, err)
|
return fmt.Errorf("error when exporting data for query=%q on the time range (start=%d, end=%d): %w", childQuery, start, end, err)
|
||||||
}
|
}
|
||||||
queryDuration.UpdateDuration(startTime)
|
queryDuration.UpdateDuration(startTime)
|
||||||
return nil
|
return nil
|
||||||
|
@ -670,24 +670,24 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e
|
||||||
if childQuery, windowStr, stepStr, offsetStr := promql.IsRollup(query); childQuery != "" {
|
if childQuery, windowStr, stepStr, offsetStr := promql.IsRollup(query); childQuery != "" {
|
||||||
newStep, err := parsePositiveDuration(stepStr, step)
|
newStep, err := parsePositiveDuration(stepStr, step)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot parse step: %s", err)
|
return fmt.Errorf("cannot parse step: %w", err)
|
||||||
}
|
}
|
||||||
if newStep > 0 {
|
if newStep > 0 {
|
||||||
step = newStep
|
step = newStep
|
||||||
}
|
}
|
||||||
window, err := parsePositiveDuration(windowStr, step)
|
window, err := parsePositiveDuration(windowStr, step)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot parse window: %s", err)
|
return fmt.Errorf("cannot parse window: %w", err)
|
||||||
}
|
}
|
||||||
offset, err := parseDuration(offsetStr, step)
|
offset, err := parseDuration(offsetStr, step)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot parse offset: %s", err)
|
return fmt.Errorf("cannot parse offset: %w", err)
|
||||||
}
|
}
|
||||||
start -= offset
|
start -= offset
|
||||||
end := start
|
end := start
|
||||||
start = end - window
|
start = end - window
|
||||||
if err := queryRangeHandler(w, childQuery, start, end, step, r, ct); err != nil {
|
if err := queryRangeHandler(w, childQuery, start, end, step, r, ct); err != nil {
|
||||||
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %s", childQuery, start, end, step, err)
|
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %w", childQuery, start, end, step, err)
|
||||||
}
|
}
|
||||||
queryDuration.UpdateDuration(startTime)
|
queryDuration.UpdateDuration(startTime)
|
||||||
return nil
|
return nil
|
||||||
|
@ -702,7 +702,7 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e
|
||||||
}
|
}
|
||||||
result, err := promql.Exec(&ec, query, true)
|
result, err := promql.Exec(&ec, query, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error when executing query=%q for (time=%d, step=%d): %s", query, start, step, err)
|
return fmt.Errorf("error when executing query=%q for (time=%d, step=%d): %w", query, start, step, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
@ -750,7 +750,7 @@ func QueryRangeHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := queryRangeHandler(w, query, start, end, step, r, ct); err != nil {
|
if err := queryRangeHandler(w, query, start, end, step, r, ct); err != nil {
|
||||||
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %s", query, start, end, step, err)
|
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %w", query, start, end, step, err)
|
||||||
}
|
}
|
||||||
queryRangeDuration.UpdateDuration(startTime)
|
queryRangeDuration.UpdateDuration(startTime)
|
||||||
return nil
|
return nil
|
||||||
|
@ -788,7 +788,7 @@ func queryRangeHandler(w http.ResponseWriter, query string, start, end, step int
|
||||||
}
|
}
|
||||||
result, err := promql.Exec(&ec, query, false)
|
result, err := promql.Exec(&ec, query, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot execute query: %s", err)
|
return fmt.Errorf("cannot execute query: %w", err)
|
||||||
}
|
}
|
||||||
queryOffset := getLatencyOffsetMilliseconds()
|
queryOffset := getLatencyOffsetMilliseconds()
|
||||||
if ct-end < queryOffset {
|
if ct-end < queryOffset {
|
||||||
|
@ -897,7 +897,7 @@ func getTime(r *http.Request, argKey string, defaultValue int64) (int64, error)
|
||||||
// Try parsing duration relative to the current time
|
// Try parsing duration relative to the current time
|
||||||
d, err1 := time.ParseDuration(argValue)
|
d, err1 := time.ParseDuration(argValue)
|
||||||
if err1 != nil {
|
if err1 != nil {
|
||||||
return 0, fmt.Errorf("cannot parse %q=%q: %s", argKey, argValue, err)
|
return 0, fmt.Errorf("cannot parse %q=%q: %w", argKey, argValue, err)
|
||||||
}
|
}
|
||||||
if d > 0 {
|
if d > 0 {
|
||||||
d = -d
|
d = -d
|
||||||
|
@ -939,7 +939,7 @@ func getDuration(r *http.Request, argKey string, defaultValue int64) (int64, err
|
||||||
// Try parsing string format
|
// Try parsing string format
|
||||||
d, err := time.ParseDuration(argValue)
|
d, err := time.ParseDuration(argValue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("cannot parse %q=%q: %s", argKey, argValue, err)
|
return 0, fmt.Errorf("cannot parse %q=%q: %w", argKey, argValue, err)
|
||||||
}
|
}
|
||||||
secs = d.Seconds()
|
secs = d.Seconds()
|
||||||
}
|
}
|
||||||
|
@ -1001,7 +1001,7 @@ func getTagFilterssFromMatches(matches []string) ([][]storage.TagFilter, error)
|
||||||
for _, match := range matches {
|
for _, match := range matches {
|
||||||
tagFilters, err := promql.ParseMetricSelector(match)
|
tagFilters, err := promql.ParseMetricSelector(match)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse %q: %s", match, err)
|
return nil, fmt.Errorf("cannot parse %q: %w", match, err)
|
||||||
}
|
}
|
||||||
tagFilterss = append(tagFilterss, tagFilters)
|
tagFilterss = append(tagFilterss, tagFilters)
|
||||||
}
|
}
|
||||||
|
|
|
@ -119,7 +119,7 @@ func testIncrementalParallelAggr(iafc *incrementalAggrFuncContext, tssSrc, tssEx
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
tssActual := iafc.finalizeTimeseries()
|
tssActual := iafc.finalizeTimeseries()
|
||||||
if err := expectTimeseriesEqual(tssActual, tssExpected); err != nil {
|
if err := expectTimeseriesEqual(tssActual, tssExpected); err != nil {
|
||||||
return fmt.Errorf("%s; tssActual=%v, tssExpected=%v", err, tssActual, tssExpected)
|
return fmt.Errorf("%w; tssActual=%v, tssExpected=%v", err, tssActual, tssExpected)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -164,7 +164,7 @@ func expectTsEqual(actual, expected *timeseries) error {
|
||||||
return fmt.Errorf("unexpected timestamps; got %v; want %v", actual.Timestamps, expected.Timestamps)
|
return fmt.Errorf("unexpected timestamps; got %v; want %v", actual.Timestamps, expected.Timestamps)
|
||||||
}
|
}
|
||||||
if err := compareValues(actual.Values, expected.Values); err != nil {
|
if err := compareValues(actual.Values, expected.Values); err != nil {
|
||||||
return fmt.Errorf("%s; actual %v; expected %v", err, actual.Values, expected.Values)
|
return fmt.Errorf("%w; actual %v; expected %v", err, actual.Values, expected.Values)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -160,14 +160,14 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
|
||||||
}
|
}
|
||||||
rv, err := evalRollupFunc(ec, "default_rollup", rollupDefault, e, re, nil)
|
rv, err := evalRollupFunc(ec, "default_rollup", rollupDefault, e, re, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(`cannot evaluate %q: %s`, me.AppendString(nil), err)
|
return nil, fmt.Errorf(`cannot evaluate %q: %w`, me.AppendString(nil), err)
|
||||||
}
|
}
|
||||||
return rv, nil
|
return rv, nil
|
||||||
}
|
}
|
||||||
if re, ok := e.(*metricsql.RollupExpr); ok {
|
if re, ok := e.(*metricsql.RollupExpr); ok {
|
||||||
rv, err := evalRollupFunc(ec, "default_rollup", rollupDefault, e, re, nil)
|
rv, err := evalRollupFunc(ec, "default_rollup", rollupDefault, e, re, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(`cannot evaluate %q: %s`, re.AppendString(nil), err)
|
return nil, fmt.Errorf(`cannot evaluate %q: %w`, re.AppendString(nil), err)
|
||||||
}
|
}
|
||||||
return rv, nil
|
return rv, nil
|
||||||
}
|
}
|
||||||
|
@ -189,7 +189,7 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
|
||||||
}
|
}
|
||||||
rv, err := tf(tfa)
|
rv, err := tf(tfa)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(`cannot evaluate %q: %s`, fe.AppendString(nil), err)
|
return nil, fmt.Errorf(`cannot evaluate %q: %w`, fe.AppendString(nil), err)
|
||||||
}
|
}
|
||||||
return rv, nil
|
return rv, nil
|
||||||
}
|
}
|
||||||
|
@ -203,7 +203,7 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
|
||||||
}
|
}
|
||||||
rv, err := evalRollupFunc(ec, fe.Name, rf, e, re, nil)
|
rv, err := evalRollupFunc(ec, fe.Name, rf, e, re, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(`cannot evaluate %q: %s`, fe.AppendString(nil), err)
|
return nil, fmt.Errorf(`cannot evaluate %q: %w`, fe.AppendString(nil), err)
|
||||||
}
|
}
|
||||||
return rv, nil
|
return rv, nil
|
||||||
}
|
}
|
||||||
|
@ -240,7 +240,7 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
|
||||||
}
|
}
|
||||||
rv, err := af(afa)
|
rv, err := af(afa)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(`cannot evaluate %q: %s`, ae.AppendString(nil), err)
|
return nil, fmt.Errorf(`cannot evaluate %q: %w`, ae.AppendString(nil), err)
|
||||||
}
|
}
|
||||||
return rv, nil
|
return rv, nil
|
||||||
}
|
}
|
||||||
|
@ -264,7 +264,7 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
|
||||||
}
|
}
|
||||||
rv, err := bf(bfa)
|
rv, err := bf(bfa)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(`cannot evaluate %q: %s`, be.AppendString(nil), err)
|
return nil, fmt.Errorf(`cannot evaluate %q: %w`, be.AppendString(nil), err)
|
||||||
}
|
}
|
||||||
return rv, nil
|
return rv, nil
|
||||||
}
|
}
|
||||||
|
@ -375,7 +375,7 @@ func evalRollupFuncArgs(ec *EvalConfig, fe *metricsql.FuncExpr) ([]interface{},
|
||||||
}
|
}
|
||||||
ts, err := evalExpr(ec, arg)
|
ts, err := evalExpr(ec, arg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("cannot evaluate arg #%d for %q: %s", i+1, fe.AppendString(nil), err)
|
return nil, nil, fmt.Errorf("cannot evaluate arg #%d for %q: %w", i+1, fe.AppendString(nil), err)
|
||||||
}
|
}
|
||||||
args[i] = ts
|
args[i] = ts
|
||||||
}
|
}
|
||||||
|
|
|
@ -285,7 +285,7 @@ func getRollupConfigs(name string, rf rollupFunc, expr metricsql.Expr, start, en
|
||||||
case "aggr_over_time":
|
case "aggr_over_time":
|
||||||
aggrFuncNames, err := getRollupAggrFuncNames(expr)
|
aggrFuncNames, err := getRollupAggrFuncNames(expr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("invalid args to %s: %s", expr.AppendString(nil), err)
|
return nil, nil, fmt.Errorf("invalid args to %s: %w", expr.AppendString(nil), err)
|
||||||
}
|
}
|
||||||
for _, aggrFuncName := range aggrFuncNames {
|
for _, aggrFuncName := range aggrFuncNames {
|
||||||
if rollupFuncsRemoveCounterResets[aggrFuncName] {
|
if rollupFuncsRemoveCounterResets[aggrFuncName] {
|
||||||
|
|
|
@ -286,7 +286,7 @@ var (
|
||||||
var buf [8]byte
|
var buf [8]byte
|
||||||
if _, err := rand.Read(buf[:]); err != nil {
|
if _, err := rand.Read(buf[:]); err != nil {
|
||||||
// do not use logger.Panicf, since it isn't initialized yet.
|
// do not use logger.Panicf, since it isn't initialized yet.
|
||||||
panic(fmt.Errorf("FATAL: cannot read random data for rollupResultCacheKeyPrefix: %s", err))
|
panic(fmt.Errorf("FATAL: cannot read random data for rollupResultCacheKeyPrefix: %w", err))
|
||||||
}
|
}
|
||||||
return encoding.UnmarshalUint64(buf[:])
|
return encoding.UnmarshalUint64(buf[:])
|
||||||
}()
|
}()
|
||||||
|
@ -414,7 +414,7 @@ func (mi *rollupResultCacheMetainfo) Unmarshal(src []byte) error {
|
||||||
for i := 0; i < entriesLen; i++ {
|
for i := 0; i < entriesLen; i++ {
|
||||||
tail, err := mi.entries[i].Unmarshal(src)
|
tail, err := mi.entries[i].Unmarshal(src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot unmarshal entry #%d: %s", i, err)
|
return fmt.Errorf("cannot unmarshal entry #%d: %w", i, err)
|
||||||
}
|
}
|
||||||
src = tail
|
src = tail
|
||||||
}
|
}
|
||||||
|
|
|
@ -217,7 +217,7 @@ func (ts *timeseries) unmarshalFastNoTimestamps(src []byte) ([]byte, error) {
|
||||||
|
|
||||||
tail, err := unmarshalMetricNameFast(&ts.MetricName, src)
|
tail, err := unmarshalMetricNameFast(&ts.MetricName, src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tail, fmt.Errorf("cannot unmarshal MetricName: %s", err)
|
return tail, fmt.Errorf("cannot unmarshal MetricName: %w", err)
|
||||||
}
|
}
|
||||||
src = tail
|
src = tail
|
||||||
|
|
||||||
|
@ -275,7 +275,7 @@ func unmarshalMetricNameFast(mn *storage.MetricName, src []byte) ([]byte, error)
|
||||||
|
|
||||||
tail, metricGroup, err := unmarshalBytesFast(src)
|
tail, metricGroup, err := unmarshalBytesFast(src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tail, fmt.Errorf("cannot unmarshal MetricGroup: %s", err)
|
return tail, fmt.Errorf("cannot unmarshal MetricGroup: %w", err)
|
||||||
}
|
}
|
||||||
src = tail
|
src = tail
|
||||||
mn.MetricGroup = metricGroup[:len(metricGroup):len(metricGroup)]
|
mn.MetricGroup = metricGroup[:len(metricGroup):len(metricGroup)]
|
||||||
|
@ -292,13 +292,13 @@ func unmarshalMetricNameFast(mn *storage.MetricName, src []byte) ([]byte, error)
|
||||||
for i := range mn.Tags {
|
for i := range mn.Tags {
|
||||||
tail, key, err := unmarshalBytesFast(src)
|
tail, key, err := unmarshalBytesFast(src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tail, fmt.Errorf("cannot unmarshal key for tag[%d]: %s", i, err)
|
return tail, fmt.Errorf("cannot unmarshal key for tag[%d]: %w", i, err)
|
||||||
}
|
}
|
||||||
src = tail
|
src = tail
|
||||||
|
|
||||||
tail, value, err := unmarshalBytesFast(src)
|
tail, value, err := unmarshalBytesFast(src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tail, fmt.Errorf("cannot unmarshal value for tag[%d]: %s", i, err)
|
return tail, fmt.Errorf("cannot unmarshal value for tag[%d]: %w", i, err)
|
||||||
}
|
}
|
||||||
src = tail
|
src = tail
|
||||||
|
|
||||||
|
|
|
@ -414,7 +414,7 @@ func transformHistogramShare(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||||
}
|
}
|
||||||
les, err := getScalar(args[0], 0)
|
les, err := getScalar(args[0], 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse le: %s", err)
|
return nil, fmt.Errorf("cannot parse le: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert buckets with `vmrange` labels to buckets with `le` labels.
|
// Convert buckets with `vmrange` labels to buckets with `le` labels.
|
||||||
|
@ -425,7 +425,7 @@ func transformHistogramShare(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||||
if len(args) > 2 {
|
if len(args) > 2 {
|
||||||
s, err := getString(args[2], 2)
|
s, err := getString(args[2], 2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse boundsLabel (arg #3): %s", err)
|
return nil, fmt.Errorf("cannot parse boundsLabel (arg #3): %w", err)
|
||||||
}
|
}
|
||||||
boundsLabel = s
|
boundsLabel = s
|
||||||
}
|
}
|
||||||
|
@ -513,7 +513,7 @@ func transformHistogramQuantile(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||||
}
|
}
|
||||||
phis, err := getScalar(args[0], 0)
|
phis, err := getScalar(args[0], 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse phi: %s", err)
|
return nil, fmt.Errorf("cannot parse phi: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert buckets with `vmrange` labels to buckets with `le` labels.
|
// Convert buckets with `vmrange` labels to buckets with `le` labels.
|
||||||
|
@ -524,7 +524,7 @@ func transformHistogramQuantile(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||||
if len(args) > 2 {
|
if len(args) > 2 {
|
||||||
s, err := getString(args[2], 2)
|
s, err := getString(args[2], 2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse boundsLabel (arg #3): %s", err)
|
return nil, fmt.Errorf("cannot parse boundsLabel (arg #3): %w", err)
|
||||||
}
|
}
|
||||||
boundsLabel = s
|
boundsLabel = s
|
||||||
}
|
}
|
||||||
|
@ -1034,7 +1034,7 @@ func transformLabelMap(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||||
}
|
}
|
||||||
label, err := getString(args[1], 1)
|
label, err := getString(args[1], 1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot read label name: %s", err)
|
return nil, fmt.Errorf("cannot read label name: %w", err)
|
||||||
}
|
}
|
||||||
srcValues, dstValues, err := getStringPairs(args[2:])
|
srcValues, dstValues, err := getStringPairs(args[2:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1179,7 +1179,7 @@ func transformLabelTransform(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||||
|
|
||||||
r, err := metricsql.CompileRegexp(regex)
|
r, err := metricsql.CompileRegexp(regex)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(`cannot compile regex %q: %s`, regex, err)
|
return nil, fmt.Errorf(`cannot compile regex %q: %w`, regex, err)
|
||||||
}
|
}
|
||||||
return labelReplace(args[0], label, r, label, replacement)
|
return labelReplace(args[0], label, r, label, replacement)
|
||||||
}
|
}
|
||||||
|
@ -1208,7 +1208,7 @@ func transformLabelReplace(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||||
|
|
||||||
r, err := metricsql.CompileRegexpAnchored(regex)
|
r, err := metricsql.CompileRegexpAnchored(regex)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(`cannot compile regex %q: %s`, regex, err)
|
return nil, fmt.Errorf(`cannot compile regex %q: %w`, regex, err)
|
||||||
}
|
}
|
||||||
return labelReplace(args[0], srcLabel, r, dstLabel, replacement)
|
return labelReplace(args[0], srcLabel, r, dstLabel, replacement)
|
||||||
}
|
}
|
||||||
|
@ -1238,7 +1238,7 @@ func transformLabelValue(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||||
}
|
}
|
||||||
labelName, err := getString(args[1], 1)
|
labelName, err := getString(args[1], 1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot get label name: %s", err)
|
return nil, fmt.Errorf("cannot get label name: %w", err)
|
||||||
}
|
}
|
||||||
rvs := args[0]
|
rvs := args[0]
|
||||||
for _, ts := range rvs {
|
for _, ts := range rvs {
|
||||||
|
@ -1265,15 +1265,15 @@ func transformLabelMatch(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||||
}
|
}
|
||||||
labelName, err := getString(args[1], 1)
|
labelName, err := getString(args[1], 1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot get label name: %s", err)
|
return nil, fmt.Errorf("cannot get label name: %w", err)
|
||||||
}
|
}
|
||||||
labelRe, err := getString(args[2], 2)
|
labelRe, err := getString(args[2], 2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot get regexp: %s", err)
|
return nil, fmt.Errorf("cannot get regexp: %w", err)
|
||||||
}
|
}
|
||||||
r, err := metricsql.CompileRegexpAnchored(labelRe)
|
r, err := metricsql.CompileRegexpAnchored(labelRe)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(`cannot compile regexp %q: %s`, labelRe, err)
|
return nil, fmt.Errorf(`cannot compile regexp %q: %w`, labelRe, err)
|
||||||
}
|
}
|
||||||
tss := args[0]
|
tss := args[0]
|
||||||
rvs := tss[:0]
|
rvs := tss[:0]
|
||||||
|
@ -1293,15 +1293,15 @@ func transformLabelMismatch(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||||
}
|
}
|
||||||
labelName, err := getString(args[1], 1)
|
labelName, err := getString(args[1], 1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot get label name: %s", err)
|
return nil, fmt.Errorf("cannot get label name: %w", err)
|
||||||
}
|
}
|
||||||
labelRe, err := getString(args[2], 2)
|
labelRe, err := getString(args[2], 2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot get regexp: %s", err)
|
return nil, fmt.Errorf("cannot get regexp: %w", err)
|
||||||
}
|
}
|
||||||
r, err := metricsql.CompileRegexpAnchored(labelRe)
|
r, err := metricsql.CompileRegexpAnchored(labelRe)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(`cannot compile regexp %q: %s`, labelRe, err)
|
return nil, fmt.Errorf(`cannot compile regexp %q: %w`, labelRe, err)
|
||||||
}
|
}
|
||||||
tss := args[0]
|
tss := args[0]
|
||||||
rvs := tss[:0]
|
rvs := tss[:0]
|
||||||
|
@ -1401,7 +1401,7 @@ func newTransformFuncSortByLabel(isDesc bool) transformFunc {
|
||||||
}
|
}
|
||||||
label, err := getString(args[1], 1)
|
label, err := getString(args[1], 1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse label name for sorting: %s", err)
|
return nil, fmt.Errorf("cannot parse label name for sorting: %w", err)
|
||||||
}
|
}
|
||||||
rvs := args[0]
|
rvs := args[0]
|
||||||
sort.SliceStable(rvs, func(i, j int) bool {
|
sort.SliceStable(rvs, func(i, j int) bool {
|
||||||
|
|
|
@ -171,7 +171,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
snapshotPath, err := Storage.CreateSnapshot()
|
snapshotPath, err := Storage.CreateSnapshot()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fmt.Errorf("cannot create snapshot: %s", err)
|
err = fmt.Errorf("cannot create snapshot: %w", err)
|
||||||
jsonResponseError(w, err)
|
jsonResponseError(w, err)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -185,7 +185,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
snapshots, err := Storage.ListSnapshots()
|
snapshots, err := Storage.ListSnapshots()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fmt.Errorf("cannot list snapshots: %s", err)
|
err = fmt.Errorf("cannot list snapshots: %w", err)
|
||||||
jsonResponseError(w, err)
|
jsonResponseError(w, err)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -202,7 +202,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
snapshotName := r.FormValue("snapshot")
|
snapshotName := r.FormValue("snapshot")
|
||||||
if err := Storage.DeleteSnapshot(snapshotName); err != nil {
|
if err := Storage.DeleteSnapshot(snapshotName); err != nil {
|
||||||
err = fmt.Errorf("cannot delete snapshot %q: %s", snapshotName, err)
|
err = fmt.Errorf("cannot delete snapshot %q: %w", snapshotName, err)
|
||||||
jsonResponseError(w, err)
|
jsonResponseError(w, err)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -212,13 +212,13 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
snapshots, err := Storage.ListSnapshots()
|
snapshots, err := Storage.ListSnapshots()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fmt.Errorf("cannot list snapshots: %s", err)
|
err = fmt.Errorf("cannot list snapshots: %w", err)
|
||||||
jsonResponseError(w, err)
|
jsonResponseError(w, err)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
for _, snapshotName := range snapshots {
|
for _, snapshotName := range snapshots {
|
||||||
if err := Storage.DeleteSnapshot(snapshotName); err != nil {
|
if err := Storage.DeleteSnapshot(snapshotName); err != nil {
|
||||||
err = fmt.Errorf("cannot delete snapshot %q: %s", snapshotName, err)
|
err = fmt.Errorf("cannot delete snapshot %q: %w", snapshotName, err)
|
||||||
jsonResponseError(w, err)
|
jsonResponseError(w, err)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
|
@ -55,13 +55,13 @@ func (b *Backup) Run() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := dst.DeleteFile(fscommon.BackupCompleteFilename); err != nil {
|
if err := dst.DeleteFile(fscommon.BackupCompleteFilename); err != nil {
|
||||||
return fmt.Errorf("cannot delete `backup complete` file at %s: %s", dst, err)
|
return fmt.Errorf("cannot delete `backup complete` file at %s: %w", dst, err)
|
||||||
}
|
}
|
||||||
if err := runBackup(src, dst, origin, concurrency); err != nil {
|
if err := runBackup(src, dst, origin, concurrency); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := dst.CreateFile(fscommon.BackupCompleteFilename, []byte("ok")); err != nil {
|
if err := dst.CreateFile(fscommon.BackupCompleteFilename, []byte("ok")); err != nil {
|
||||||
return fmt.Errorf("cannot create `backup complete` file at %s: %s", dst, err)
|
return fmt.Errorf("cannot create `backup complete` file at %s: %w", dst, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -74,17 +74,17 @@ func runBackup(src *fslocal.FS, dst common.RemoteFS, origin common.OriginFS, con
|
||||||
logger.Infof("obtaining list of parts at %s", src)
|
logger.Infof("obtaining list of parts at %s", src)
|
||||||
srcParts, err := src.ListParts()
|
srcParts, err := src.ListParts()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot list src parts: %s", err)
|
return fmt.Errorf("cannot list src parts: %w", err)
|
||||||
}
|
}
|
||||||
logger.Infof("obtaining list of parts at %s", dst)
|
logger.Infof("obtaining list of parts at %s", dst)
|
||||||
dstParts, err := dst.ListParts()
|
dstParts, err := dst.ListParts()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot list dst parts: %s", err)
|
return fmt.Errorf("cannot list dst parts: %w", err)
|
||||||
}
|
}
|
||||||
logger.Infof("obtaining list of parts at %s", origin)
|
logger.Infof("obtaining list of parts at %s", origin)
|
||||||
originParts, err := origin.ListParts()
|
originParts, err := origin.ListParts()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot list origin parts: %s", err)
|
return fmt.Errorf("cannot list origin parts: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
backupSize := getPartsSize(srcParts)
|
backupSize := getPartsSize(srcParts)
|
||||||
|
@ -97,7 +97,7 @@ func runBackup(src *fslocal.FS, dst common.RemoteFS, origin common.OriginFS, con
|
||||||
err = runParallel(concurrency, partsToDelete, func(p common.Part) error {
|
err = runParallel(concurrency, partsToDelete, func(p common.Part) error {
|
||||||
logger.Infof("deleting %s from %s", &p, dst)
|
logger.Infof("deleting %s from %s", &p, dst)
|
||||||
if err := dst.DeletePart(p); err != nil {
|
if err := dst.DeletePart(p); err != nil {
|
||||||
return fmt.Errorf("cannot delete %s from %s: %s", &p, dst, err)
|
return fmt.Errorf("cannot delete %s from %s: %w", &p, dst, err)
|
||||||
}
|
}
|
||||||
atomic.AddUint64(&deletedParts, 1)
|
atomic.AddUint64(&deletedParts, 1)
|
||||||
return nil
|
return nil
|
||||||
|
@ -109,7 +109,7 @@ func runBackup(src *fslocal.FS, dst common.RemoteFS, origin common.OriginFS, con
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := dst.RemoveEmptyDirs(); err != nil {
|
if err := dst.RemoveEmptyDirs(); err != nil {
|
||||||
return fmt.Errorf("cannot remove empty directories at %s: %s", dst, err)
|
return fmt.Errorf("cannot remove empty directories at %s: %w", dst, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -122,7 +122,7 @@ func runBackup(src *fslocal.FS, dst common.RemoteFS, origin common.OriginFS, con
|
||||||
err = runParallel(concurrency, originCopyParts, func(p common.Part) error {
|
err = runParallel(concurrency, originCopyParts, func(p common.Part) error {
|
||||||
logger.Infof("server-side copying %s from %s to %s", &p, origin, dst)
|
logger.Infof("server-side copying %s from %s to %s", &p, origin, dst)
|
||||||
if err := dst.CopyPart(origin, p); err != nil {
|
if err := dst.CopyPart(origin, p); err != nil {
|
||||||
return fmt.Errorf("cannot copy %s from %s to %s: %s", &p, origin, dst, err)
|
return fmt.Errorf("cannot copy %s from %s to %s: %w", &p, origin, dst, err)
|
||||||
}
|
}
|
||||||
atomic.AddUint64(&copiedParts, 1)
|
atomic.AddUint64(&copiedParts, 1)
|
||||||
return nil
|
return nil
|
||||||
|
@ -144,17 +144,17 @@ func runBackup(src *fslocal.FS, dst common.RemoteFS, origin common.OriginFS, con
|
||||||
logger.Infof("uploading %s from %s to %s", &p, src, dst)
|
logger.Infof("uploading %s from %s to %s", &p, src, dst)
|
||||||
rc, err := src.NewReadCloser(p)
|
rc, err := src.NewReadCloser(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot create reader for %s from %s: %s", &p, src, err)
|
return fmt.Errorf("cannot create reader for %s from %s: %w", &p, src, err)
|
||||||
}
|
}
|
||||||
sr := &statReader{
|
sr := &statReader{
|
||||||
r: rc,
|
r: rc,
|
||||||
bytesRead: &bytesUploaded,
|
bytesRead: &bytesUploaded,
|
||||||
}
|
}
|
||||||
if err := dst.UploadPart(p, sr); err != nil {
|
if err := dst.UploadPart(p, sr); err != nil {
|
||||||
return fmt.Errorf("cannot upload %s to %s: %s", &p, dst, err)
|
return fmt.Errorf("cannot upload %s to %s: %w", &p, dst, err)
|
||||||
}
|
}
|
||||||
if err = rc.Close(); err != nil {
|
if err = rc.Close(); err != nil {
|
||||||
return fmt.Errorf("cannot close reader for %s from %s: %s", &p, src, err)
|
return fmt.Errorf("cannot close reader for %s from %s: %w", &p, src, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, func(elapsed time.Duration) {
|
}, func(elapsed time.Duration) {
|
||||||
|
|
|
@ -43,11 +43,11 @@ func (r *Restore) Run() error {
|
||||||
|
|
||||||
// Make sure VictoriaMetrics doesn't run during the restore process.
|
// Make sure VictoriaMetrics doesn't run during the restore process.
|
||||||
if err := fs.MkdirAllIfNotExist(r.Dst.Dir); err != nil {
|
if err := fs.MkdirAllIfNotExist(r.Dst.Dir); err != nil {
|
||||||
return fmt.Errorf("cannot create dir %q: %s", r.Dst.Dir, err)
|
return fmt.Errorf("cannot create dir %q: %w", r.Dst.Dir, err)
|
||||||
}
|
}
|
||||||
flockF, err := fs.CreateFlockFile(r.Dst.Dir)
|
flockF, err := fs.CreateFlockFile(r.Dst.Dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot create lock file in %q; make sure VictoriaMetrics doesn't use the dir; error: %s", r.Dst.Dir, err)
|
return fmt.Errorf("cannot create lock file in %q; make sure VictoriaMetrics doesn't use the dir; error: %w", r.Dst.Dir, err)
|
||||||
}
|
}
|
||||||
defer fs.MustClose(flockF)
|
defer fs.MustClose(flockF)
|
||||||
|
|
||||||
|
@ -71,12 +71,12 @@ func (r *Restore) Run() error {
|
||||||
logger.Infof("obtaining list of parts at %s", src)
|
logger.Infof("obtaining list of parts at %s", src)
|
||||||
srcParts, err := src.ListParts()
|
srcParts, err := src.ListParts()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot list src parts: %s", err)
|
return fmt.Errorf("cannot list src parts: %w", err)
|
||||||
}
|
}
|
||||||
logger.Infof("obtaining list of parts at %s", dst)
|
logger.Infof("obtaining list of parts at %s", dst)
|
||||||
dstParts, err := dst.ListParts()
|
dstParts, err := dst.ListParts()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot list dst parts: %s", err)
|
return fmt.Errorf("cannot list dst parts: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
backupSize := getPartsSize(srcParts)
|
backupSize := getPartsSize(srcParts)
|
||||||
|
@ -129,7 +129,7 @@ func (r *Restore) Run() error {
|
||||||
logger.Infof("deleting %s from %s", path, dst)
|
logger.Infof("deleting %s from %s", path, dst)
|
||||||
size, err := dst.DeletePath(path)
|
size, err := dst.DeletePath(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot delete %s from %s: %s", path, dst, err)
|
return fmt.Errorf("cannot delete %s from %s: %w", path, dst, err)
|
||||||
}
|
}
|
||||||
deleteSize += size
|
deleteSize += size
|
||||||
}
|
}
|
||||||
|
@ -137,14 +137,14 @@ func (r *Restore) Run() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := dst.RemoveEmptyDirs(); err != nil {
|
if err := dst.RemoveEmptyDirs(); err != nil {
|
||||||
return fmt.Errorf("cannot remove empty directories at %s: %s", dst, err)
|
return fmt.Errorf("cannot remove empty directories at %s: %w", dst, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Re-read dstParts, since additional parts may be removed on the previous step.
|
// Re-read dstParts, since additional parts may be removed on the previous step.
|
||||||
dstParts, err = dst.ListParts()
|
dstParts, err = dst.ListParts()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot list dst parts after the deletion: %s", err)
|
return fmt.Errorf("cannot list dst parts after the deletion: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
partsToCopy := common.PartsDifference(srcParts, dstParts)
|
partsToCopy := common.PartsDifference(srcParts, dstParts)
|
||||||
|
@ -166,17 +166,17 @@ func (r *Restore) Run() error {
|
||||||
logger.Infof("downloading %s from %s to %s", &p, src, dst)
|
logger.Infof("downloading %s from %s to %s", &p, src, dst)
|
||||||
wc, err := dst.NewWriteCloser(p)
|
wc, err := dst.NewWriteCloser(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot create writer for %q to %s: %s", &p, dst, err)
|
return fmt.Errorf("cannot create writer for %q to %s: %w", &p, dst, err)
|
||||||
}
|
}
|
||||||
sw := &statWriter{
|
sw := &statWriter{
|
||||||
w: wc,
|
w: wc,
|
||||||
bytesWritten: &bytesDownloaded,
|
bytesWritten: &bytesDownloaded,
|
||||||
}
|
}
|
||||||
if err := src.DownloadPart(p, sw); err != nil {
|
if err := src.DownloadPart(p, sw); err != nil {
|
||||||
return fmt.Errorf("cannot download %s to %s: %s", &p, dst, err)
|
return fmt.Errorf("cannot download %s to %s: %w", &p, dst, err)
|
||||||
}
|
}
|
||||||
if err := wc.Close(); err != nil {
|
if err := wc.Close(); err != nil {
|
||||||
return fmt.Errorf("cannot close reader from %s from %s: %s", &p, src, err)
|
return fmt.Errorf("cannot close reader from %s from %s: %w", &p, src, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -207,7 +207,7 @@ func NewRemoteFS(path string) (common.RemoteFS, error) {
|
||||||
Dir: dir,
|
Dir: dir,
|
||||||
}
|
}
|
||||||
if err := fs.Init(); err != nil {
|
if err := fs.Init(); err != nil {
|
||||||
return nil, fmt.Errorf("cannot initialize connection to gcs: %s", err)
|
return nil, fmt.Errorf("cannot initialize connection to gcs: %w", err)
|
||||||
}
|
}
|
||||||
return fs, nil
|
return fs, nil
|
||||||
case "s3":
|
case "s3":
|
||||||
|
@ -226,7 +226,7 @@ func NewRemoteFS(path string) (common.RemoteFS, error) {
|
||||||
Dir: dir,
|
Dir: dir,
|
||||||
}
|
}
|
||||||
if err := fs.Init(); err != nil {
|
if err := fs.Init(); err != nil {
|
||||||
return nil, fmt.Errorf("cannot initialize connection to s3: %s", err)
|
return nil, fmt.Errorf("cannot initialize connection to s3: %w", err)
|
||||||
}
|
}
|
||||||
return fs, nil
|
return fs, nil
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -13,11 +13,11 @@ import (
|
||||||
func FsyncFile(path string) error {
|
func FsyncFile(path string) error {
|
||||||
if err := fsync(path); err != nil {
|
if err := fsync(path); err != nil {
|
||||||
_ = os.RemoveAll(path)
|
_ = os.RemoveAll(path)
|
||||||
return fmt.Errorf("cannot fsync file %q: %s", path, err)
|
return fmt.Errorf("cannot fsync file %q: %w", path, err)
|
||||||
}
|
}
|
||||||
dir := filepath.Dir(path)
|
dir := filepath.Dir(path)
|
||||||
if err := fsync(dir); err != nil {
|
if err := fsync(dir); err != nil {
|
||||||
return fmt.Errorf("cannot fsync dir %q: %s", dir, err)
|
return fmt.Errorf("cannot fsync dir %q: %w", dir, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -45,7 +45,7 @@ func fsync(path string) error {
|
||||||
func AppendFiles(dst []string, dir string) ([]string, error) {
|
func AppendFiles(dst []string, dir string) ([]string, error) {
|
||||||
d, err := os.Open(dir)
|
d, err := os.Open(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot open %q: %s", dir, err)
|
return nil, fmt.Errorf("cannot open %q: %w", dir, err)
|
||||||
}
|
}
|
||||||
dst, err = appendFilesInternal(dst, d)
|
dst, err = appendFilesInternal(dst, d)
|
||||||
if err1 := d.Close(); err1 != nil {
|
if err1 := d.Close(); err1 != nil {
|
||||||
|
@ -58,14 +58,14 @@ func appendFilesInternal(dst []string, d *os.File) ([]string, error) {
|
||||||
dir := d.Name()
|
dir := d.Name()
|
||||||
dfi, err := d.Stat()
|
dfi, err := d.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot stat %q: %s", dir, err)
|
return nil, fmt.Errorf("cannot stat %q: %w", dir, err)
|
||||||
}
|
}
|
||||||
if !dfi.IsDir() {
|
if !dfi.IsDir() {
|
||||||
return nil, fmt.Errorf("%q isn't a directory", dir)
|
return nil, fmt.Errorf("%q isn't a directory", dir)
|
||||||
}
|
}
|
||||||
fis, err := d.Readdir(-1)
|
fis, err := d.Readdir(-1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot read directory contents in %q: %s", dir, err)
|
return nil, fmt.Errorf("cannot read directory contents in %q: %w", dir, err)
|
||||||
}
|
}
|
||||||
for _, fi := range fis {
|
for _, fi := range fis {
|
||||||
name := fi.Name()
|
name := fi.Name()
|
||||||
|
@ -82,7 +82,7 @@ func appendFilesInternal(dst []string, d *os.File) ([]string, error) {
|
||||||
// Process directory
|
// Process directory
|
||||||
dst, err = AppendFiles(dst, path)
|
dst, err = AppendFiles(dst, path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot list %q: %s", path, err)
|
return nil, fmt.Errorf("cannot list %q: %w", path, err)
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -100,17 +100,17 @@ func appendFilesInternal(dst []string, d *os.File) ([]string, error) {
|
||||||
// Skip symlink that points to nowhere.
|
// Skip symlink that points to nowhere.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("cannot resolve symlink %q: %s", pathOrig, err)
|
return nil, fmt.Errorf("cannot resolve symlink %q: %w", pathOrig, err)
|
||||||
}
|
}
|
||||||
sfi, err := os.Stat(pathReal)
|
sfi, err := os.Stat(pathReal)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot stat %q from symlink %q: %s", pathReal, path, err)
|
return nil, fmt.Errorf("cannot stat %q from symlink %q: %w", pathReal, path, err)
|
||||||
}
|
}
|
||||||
if sfi.IsDir() {
|
if sfi.IsDir() {
|
||||||
// Symlink points to directory
|
// Symlink points to directory
|
||||||
dstNew, err := AppendFiles(dst, pathReal)
|
dstNew, err := AppendFiles(dst, pathReal)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot list files at %q from symlink %q: %s", pathReal, path, err)
|
return nil, fmt.Errorf("cannot list files at %q from symlink %q: %w", pathReal, path, err)
|
||||||
}
|
}
|
||||||
pathReal += "/"
|
pathReal += "/"
|
||||||
for i := len(dst); i < len(dstNew); i++ {
|
for i := len(dst); i < len(dstNew); i++ {
|
||||||
|
@ -163,14 +163,14 @@ func removeEmptyDirsInternal(d *os.File) (bool, error) {
|
||||||
dir := d.Name()
|
dir := d.Name()
|
||||||
dfi, err := d.Stat()
|
dfi, err := d.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("cannot stat %q: %s", dir, err)
|
return false, fmt.Errorf("cannot stat %q: %w", dir, err)
|
||||||
}
|
}
|
||||||
if !dfi.IsDir() {
|
if !dfi.IsDir() {
|
||||||
return false, fmt.Errorf("%q isn't a directory", dir)
|
return false, fmt.Errorf("%q isn't a directory", dir)
|
||||||
}
|
}
|
||||||
fis, err := d.Readdir(-1)
|
fis, err := d.Readdir(-1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("cannot read directory contents in %q: %s", dir, err)
|
return false, fmt.Errorf("cannot read directory contents in %q: %w", dir, err)
|
||||||
}
|
}
|
||||||
dirEntries := 0
|
dirEntries := 0
|
||||||
hasFlock := false
|
hasFlock := false
|
||||||
|
@ -184,7 +184,7 @@ func removeEmptyDirsInternal(d *os.File) (bool, error) {
|
||||||
// Process directory
|
// Process directory
|
||||||
ok, err := removeEmptyDirs(path)
|
ok, err := removeEmptyDirs(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("cannot list %q: %s", path, err)
|
return false, fmt.Errorf("cannot list %q: %w", path, err)
|
||||||
}
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
dirEntries++
|
dirEntries++
|
||||||
|
@ -209,21 +209,21 @@ func removeEmptyDirsInternal(d *os.File) (bool, error) {
|
||||||
// Remove symlink that points to nowere.
|
// Remove symlink that points to nowere.
|
||||||
logger.Infof("removing broken symlink %q", pathOrig)
|
logger.Infof("removing broken symlink %q", pathOrig)
|
||||||
if err := os.Remove(pathOrig); err != nil {
|
if err := os.Remove(pathOrig); err != nil {
|
||||||
return false, fmt.Errorf("cannot remove %q: %s", pathOrig, err)
|
return false, fmt.Errorf("cannot remove %q: %w", pathOrig, err)
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return false, fmt.Errorf("cannot resolve symlink %q: %s", pathOrig, err)
|
return false, fmt.Errorf("cannot resolve symlink %q: %w", pathOrig, err)
|
||||||
}
|
}
|
||||||
sfi, err := os.Stat(pathReal)
|
sfi, err := os.Stat(pathReal)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("cannot stat %q from symlink %q: %s", pathReal, path, err)
|
return false, fmt.Errorf("cannot stat %q from symlink %q: %w", pathReal, path, err)
|
||||||
}
|
}
|
||||||
if sfi.IsDir() {
|
if sfi.IsDir() {
|
||||||
// Symlink points to directory
|
// Symlink points to directory
|
||||||
ok, err := removeEmptyDirs(pathReal)
|
ok, err := removeEmptyDirs(pathReal)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("cannot list files at %q from symlink %q: %s", pathReal, path, err)
|
return false, fmt.Errorf("cannot list files at %q from symlink %q: %w", pathReal, path, err)
|
||||||
}
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
dirEntries++
|
dirEntries++
|
||||||
|
@ -231,7 +231,7 @@ func removeEmptyDirsInternal(d *os.File) (bool, error) {
|
||||||
// Remove the symlink
|
// Remove the symlink
|
||||||
logger.Infof("removing symlink that points to empty dir %q", pathOrig)
|
logger.Infof("removing symlink that points to empty dir %q", pathOrig)
|
||||||
if err := os.Remove(pathOrig); err != nil {
|
if err := os.Remove(pathOrig); err != nil {
|
||||||
return false, fmt.Errorf("cannot remove %q: %s", pathOrig, err)
|
return false, fmt.Errorf("cannot remove %q: %w", pathOrig, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
|
@ -252,11 +252,11 @@ func removeEmptyDirsInternal(d *os.File) (bool, error) {
|
||||||
if hasFlock {
|
if hasFlock {
|
||||||
flockFilepath := dir + "/flock.lock"
|
flockFilepath := dir + "/flock.lock"
|
||||||
if err := os.Remove(flockFilepath); err != nil {
|
if err := os.Remove(flockFilepath); err != nil {
|
||||||
return false, fmt.Errorf("cannot remove %q: %s", flockFilepath, err)
|
return false, fmt.Errorf("cannot remove %q: %w", flockFilepath, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := os.Remove(dir); err != nil {
|
if err := os.Remove(dir); err != nil {
|
||||||
return false, fmt.Errorf("cannot remove %q: %s", dir, err)
|
return false, fmt.Errorf("cannot remove %q: %w", dir, err)
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -64,7 +64,7 @@ func (fs *FS) ListParts() ([]common.Part, error) {
|
||||||
}
|
}
|
||||||
fi, err := os.Stat(file)
|
fi, err := os.Stat(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot stat %q: %s", file, err)
|
return nil, fmt.Errorf("cannot stat %q: %w", file, err)
|
||||||
}
|
}
|
||||||
path := file[len(dir):]
|
path := file[len(dir):]
|
||||||
size := uint64(fi.Size())
|
size := uint64(fi.Size())
|
||||||
|
@ -100,7 +100,7 @@ func (fs *FS) NewReadCloser(p common.Part) (io.ReadCloser, error) {
|
||||||
path := fs.path(p)
|
path := fs.path(p)
|
||||||
r, err := filestream.OpenReaderAt(path, int64(p.Offset), true)
|
r, err := filestream.OpenReaderAt(path, int64(p.Offset), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot open %q at %q: %s", p.Path, fs.Dir, err)
|
return nil, fmt.Errorf("cannot open %q at %q: %w", p.Path, fs.Dir, err)
|
||||||
}
|
}
|
||||||
lrc := &limitedReadCloser{
|
lrc := &limitedReadCloser{
|
||||||
r: r,
|
r: r,
|
||||||
|
@ -121,7 +121,7 @@ func (fs *FS) NewWriteCloser(p common.Part) (io.WriteCloser, error) {
|
||||||
}
|
}
|
||||||
w, err := filestream.OpenWriterAt(path, int64(p.Offset), true)
|
w, err := filestream.OpenWriterAt(path, int64(p.Offset), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot open writer for %q at offset %d: %s", path, p.Offset, err)
|
return nil, fmt.Errorf("cannot open writer for %q at offset %d: %w", path, p.Offset, err)
|
||||||
}
|
}
|
||||||
wc := &writeCloser{
|
wc := &writeCloser{
|
||||||
w: w,
|
w: w,
|
||||||
|
@ -148,16 +148,16 @@ func (fs *FS) DeletePath(path string) (uint64, error) {
|
||||||
// The file could be deleted earlier via symlink.
|
// The file could be deleted earlier via symlink.
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
return 0, fmt.Errorf("cannot open %q at %q: %s", path, fullPath, err)
|
return 0, fmt.Errorf("cannot open %q at %q: %w", path, fullPath, err)
|
||||||
}
|
}
|
||||||
fi, err := f.Stat()
|
fi, err := f.Stat()
|
||||||
_ = f.Close()
|
_ = f.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("cannot stat %q at %q: %s", path, fullPath, err)
|
return 0, fmt.Errorf("cannot stat %q at %q: %w", path, fullPath, err)
|
||||||
}
|
}
|
||||||
size := uint64(fi.Size())
|
size := uint64(fi.Size())
|
||||||
if err := os.Remove(fullPath); err != nil {
|
if err := os.Remove(fullPath); err != nil {
|
||||||
return 0, fmt.Errorf("cannot remove %q: %s", fullPath, err)
|
return 0, fmt.Errorf("cannot remove %q: %w", fullPath, err)
|
||||||
}
|
}
|
||||||
return size, nil
|
return size, nil
|
||||||
}
|
}
|
||||||
|
@ -170,7 +170,7 @@ func (fs *FS) RemoveEmptyDirs() error {
|
||||||
func (fs *FS) mkdirAll(filePath string) error {
|
func (fs *FS) mkdirAll(filePath string) error {
|
||||||
dir := filepath.Dir(filePath)
|
dir := filepath.Dir(filePath)
|
||||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||||
return fmt.Errorf("cannot create directory %q: %s", dir, err)
|
return fmt.Errorf("cannot create directory %q: %w", dir, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -60,7 +60,7 @@ func (fs *FS) ListParts() ([]common.Part, error) {
|
||||||
// Check for correct part size.
|
// Check for correct part size.
|
||||||
fi, err := os.Stat(file)
|
fi, err := os.Stat(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot stat file %q for part %q: %s", file, p.Path, err)
|
return nil, fmt.Errorf("cannot stat file %q for part %q: %w", file, p.Path, err)
|
||||||
}
|
}
|
||||||
p.ActualSize = uint64(fi.Size())
|
p.ActualSize = uint64(fi.Size())
|
||||||
parts = append(parts, p)
|
parts = append(parts, p)
|
||||||
|
@ -72,7 +72,7 @@ func (fs *FS) ListParts() ([]common.Part, error) {
|
||||||
func (fs *FS) DeletePart(p common.Part) error {
|
func (fs *FS) DeletePart(p common.Part) error {
|
||||||
path := fs.path(p)
|
path := fs.path(p)
|
||||||
if err := os.Remove(path); err != nil {
|
if err := os.Remove(path); err != nil {
|
||||||
return fmt.Errorf("cannot remove %q: %s", path, err)
|
return fmt.Errorf("cannot remove %q: %w", path, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -103,12 +103,12 @@ func (fs *FS) CopyPart(srcFS common.OriginFS, p common.Part) error {
|
||||||
// Cannot create hardlink. Just copy file contents
|
// Cannot create hardlink. Just copy file contents
|
||||||
srcFile, err := os.Open(srcPath)
|
srcFile, err := os.Open(srcPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot open file %q: %s", srcPath, err)
|
return fmt.Errorf("cannot open file %q: %w", srcPath, err)
|
||||||
}
|
}
|
||||||
dstFile, err := os.Create(dstPath)
|
dstFile, err := os.Create(dstPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = srcFile.Close()
|
_ = srcFile.Close()
|
||||||
return fmt.Errorf("cannot create file %q: %s", dstPath, err)
|
return fmt.Errorf("cannot create file %q: %w", dstPath, err)
|
||||||
}
|
}
|
||||||
n, err := io.Copy(dstFile, srcFile)
|
n, err := io.Copy(dstFile, srcFile)
|
||||||
if err1 := dstFile.Close(); err1 != nil {
|
if err1 := dstFile.Close(); err1 != nil {
|
||||||
|
@ -137,14 +137,14 @@ func (fs *FS) DownloadPart(p common.Part, w io.Writer) error {
|
||||||
path := fs.path(p)
|
path := fs.path(p)
|
||||||
r, err := os.Open(path)
|
r, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot open %q: %s", path, err)
|
return fmt.Errorf("cannot open %q: %w", path, err)
|
||||||
}
|
}
|
||||||
n, err := io.Copy(w, r)
|
n, err := io.Copy(w, r)
|
||||||
if err1 := r.Close(); err1 != nil && err == nil {
|
if err1 := r.Close(); err1 != nil && err == nil {
|
||||||
err = err1
|
err = err1
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot download data from %q: %s", path, err)
|
return fmt.Errorf("cannot download data from %q: %w", path, err)
|
||||||
}
|
}
|
||||||
if uint64(n) != p.Size {
|
if uint64(n) != p.Size {
|
||||||
return fmt.Errorf("wrong data size downloaded from %q; got %d bytes; want %d bytes", path, n, p.Size)
|
return fmt.Errorf("wrong data size downloaded from %q; got %d bytes; want %d bytes", path, n, p.Size)
|
||||||
|
@ -160,7 +160,7 @@ func (fs *FS) UploadPart(p common.Part, r io.Reader) error {
|
||||||
}
|
}
|
||||||
w, err := os.Create(path)
|
w, err := os.Create(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot create file %q: %s", path, err)
|
return fmt.Errorf("cannot create file %q: %w", path, err)
|
||||||
}
|
}
|
||||||
n, err := io.Copy(w, r)
|
n, err := io.Copy(w, r)
|
||||||
if err1 := w.Close(); err1 != nil && err == nil {
|
if err1 := w.Close(); err1 != nil && err == nil {
|
||||||
|
@ -168,7 +168,7 @@ func (fs *FS) UploadPart(p common.Part, r io.Reader) error {
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = os.RemoveAll(path)
|
_ = os.RemoveAll(path)
|
||||||
return fmt.Errorf("cannot upload data to %q: %s", path, err)
|
return fmt.Errorf("cannot upload data to %q: %w", path, err)
|
||||||
}
|
}
|
||||||
if uint64(n) != p.Size {
|
if uint64(n) != p.Size {
|
||||||
_ = os.RemoveAll(path)
|
_ = os.RemoveAll(path)
|
||||||
|
@ -184,7 +184,7 @@ func (fs *FS) UploadPart(p common.Part, r io.Reader) error {
|
||||||
func (fs *FS) mkdirAll(filePath string) error {
|
func (fs *FS) mkdirAll(filePath string) error {
|
||||||
dir := filepath.Dir(filePath)
|
dir := filepath.Dir(filePath)
|
||||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||||
return fmt.Errorf("cannot create directory %q: %s", dir, err)
|
return fmt.Errorf("cannot create directory %q: %w", dir, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -200,7 +200,7 @@ func (fs *FS) DeleteFile(filePath string) error {
|
||||||
path := filepath.Join(fs.Dir, filePath)
|
path := filepath.Join(fs.Dir, filePath)
|
||||||
err := os.Remove(path)
|
err := os.Remove(path)
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
return fmt.Errorf("cannot remove %q: %s", path, err)
|
return fmt.Errorf("cannot remove %q: %w", path, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -214,7 +214,7 @@ func (fs *FS) CreateFile(filePath string, data []byte) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := ioutil.WriteFile(path, data, 0600); err != nil {
|
if err := ioutil.WriteFile(path, data, 0600); err != nil {
|
||||||
return fmt.Errorf("cannot write %d bytes to %q: %s", len(data), path, err)
|
return fmt.Errorf("cannot write %d bytes to %q: %w", len(data), path, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -227,7 +227,7 @@ func (fs *FS) HasFile(filePath string) (bool, error) {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return false, fmt.Errorf("cannot stat %q: %s", path, err)
|
return false, fmt.Errorf("cannot stat %q: %w", path, err)
|
||||||
}
|
}
|
||||||
if fi.IsDir() {
|
if fi.IsDir() {
|
||||||
return false, fmt.Errorf("%q is directory, while file is needed", path)
|
return false, fmt.Errorf("%q is directory, while file is needed", path)
|
||||||
|
|
|
@ -49,13 +49,13 @@ func (fs *FS) Init() error {
|
||||||
creds := option.WithCredentialsFile(fs.CredsFilePath)
|
creds := option.WithCredentialsFile(fs.CredsFilePath)
|
||||||
c, err := storage.NewClient(ctx, creds)
|
c, err := storage.NewClient(ctx, creds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot create gcs client with credsFile %q: %s", fs.CredsFilePath, err)
|
return fmt.Errorf("cannot create gcs client with credsFile %q: %w", fs.CredsFilePath, err)
|
||||||
}
|
}
|
||||||
client = c
|
client = c
|
||||||
} else {
|
} else {
|
||||||
c, err := storage.NewClient(ctx)
|
c, err := storage.NewClient(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot create default gcs client: %q", err)
|
return fmt.Errorf("cannot create default gcs client: %w", err)
|
||||||
}
|
}
|
||||||
client = c
|
client = c
|
||||||
}
|
}
|
||||||
|
@ -82,7 +82,7 @@ func (fs *FS) ListParts() ([]common.Part, error) {
|
||||||
Prefix: dir,
|
Prefix: dir,
|
||||||
}
|
}
|
||||||
if err := q.SetAttrSelection(selectAttrs); err != nil {
|
if err := q.SetAttrSelection(selectAttrs); err != nil {
|
||||||
return nil, fmt.Errorf("error in SetAttrSelection: %s", err)
|
return nil, fmt.Errorf("error in SetAttrSelection: %w", err)
|
||||||
}
|
}
|
||||||
it := fs.bkt.Objects(ctx, q)
|
it := fs.bkt.Objects(ctx, q)
|
||||||
var parts []common.Part
|
var parts []common.Part
|
||||||
|
@ -92,7 +92,7 @@ func (fs *FS) ListParts() ([]common.Part, error) {
|
||||||
return parts, nil
|
return parts, nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error when iterating objects at %q: %s", dir, err)
|
return nil, fmt.Errorf("error when iterating objects at %q: %w", dir, err)
|
||||||
}
|
}
|
||||||
file := attr.Name
|
file := attr.Name
|
||||||
if !strings.HasPrefix(file, dir) {
|
if !strings.HasPrefix(file, dir) {
|
||||||
|
@ -116,7 +116,7 @@ func (fs *FS) DeletePart(p common.Part) error {
|
||||||
o := fs.object(p)
|
o := fs.object(p)
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
if err := o.Delete(ctx); err != nil {
|
if err := o.Delete(ctx); err != nil {
|
||||||
return fmt.Errorf("cannot delete %q at %s (remote path %q): %s", p.Path, fs, o.ObjectName(), err)
|
return fmt.Errorf("cannot delete %q at %s (remote path %q): %w", p.Path, fs, o.ObjectName(), err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -140,7 +140,7 @@ func (fs *FS) CopyPart(srcFS common.OriginFS, p common.Part) error {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
attr, err := copier.Run(ctx)
|
attr, err := copier.Run(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot copy %q from %s to %s: %s", p.Path, src, fs, err)
|
return fmt.Errorf("cannot copy %q from %s to %s: %w", p.Path, src, fs, err)
|
||||||
}
|
}
|
||||||
if uint64(attr.Size) != p.Size {
|
if uint64(attr.Size) != p.Size {
|
||||||
return fmt.Errorf("unexpected %q size after copying from %s to %s; got %d bytes; want %d bytes", p.Path, src, fs, attr.Size, p.Size)
|
return fmt.Errorf("unexpected %q size after copying from %s to %s; got %d bytes; want %d bytes", p.Path, src, fs, attr.Size, p.Size)
|
||||||
|
@ -154,14 +154,14 @@ func (fs *FS) DownloadPart(p common.Part, w io.Writer) error {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
r, err := o.NewReader(ctx)
|
r, err := o.NewReader(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot open reader for %q at %s (remote path %q): %s", p.Path, fs, o.ObjectName(), err)
|
return fmt.Errorf("cannot open reader for %q at %s (remote path %q): %w", p.Path, fs, o.ObjectName(), err)
|
||||||
}
|
}
|
||||||
n, err := io.Copy(w, r)
|
n, err := io.Copy(w, r)
|
||||||
if err1 := r.Close(); err1 != nil && err == nil {
|
if err1 := r.Close(); err1 != nil && err == nil {
|
||||||
err = err1
|
err = err1
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot download %q from at %s (remote path %q): %s", p.Path, fs, o.ObjectName(), err)
|
return fmt.Errorf("cannot download %q from at %s (remote path %q): %w", p.Path, fs, o.ObjectName(), err)
|
||||||
}
|
}
|
||||||
if uint64(n) != p.Size {
|
if uint64(n) != p.Size {
|
||||||
return fmt.Errorf("wrong data size downloaded from %q at %s; got %d bytes; want %d bytes", p.Path, fs, n, p.Size)
|
return fmt.Errorf("wrong data size downloaded from %q at %s; got %d bytes; want %d bytes", p.Path, fs, n, p.Size)
|
||||||
|
@ -179,7 +179,7 @@ func (fs *FS) UploadPart(p common.Part, r io.Reader) error {
|
||||||
err = err1
|
err = err1
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot upload data to %q at %s (remote path %q): %s", p.Path, fs, o.ObjectName(), err)
|
return fmt.Errorf("cannot upload data to %q at %s (remote path %q): %w", p.Path, fs, o.ObjectName(), err)
|
||||||
}
|
}
|
||||||
if uint64(n) != p.Size {
|
if uint64(n) != p.Size {
|
||||||
return fmt.Errorf("wrong data size uploaded to %q at %s; got %d bytes; want %d bytes", p.Path, fs, n, p.Size)
|
return fmt.Errorf("wrong data size uploaded to %q at %s; got %d bytes; want %d bytes", p.Path, fs, n, p.Size)
|
||||||
|
@ -201,7 +201,7 @@ func (fs *FS) DeleteFile(filePath string) error {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
if err := o.Delete(ctx); err != nil {
|
if err := o.Delete(ctx); err != nil {
|
||||||
if err != storage.ErrObjectNotExist {
|
if err != storage.ErrObjectNotExist {
|
||||||
return fmt.Errorf("cannot delete %q at %s (remote path %q): %s", filePath, fs, o.ObjectName(), err)
|
return fmt.Errorf("cannot delete %q at %s (remote path %q): %w", filePath, fs, o.ObjectName(), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -218,14 +218,14 @@ func (fs *FS) CreateFile(filePath string, data []byte) error {
|
||||||
n, err := w.Write(data)
|
n, err := w.Write(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = w.Close()
|
_ = w.Close()
|
||||||
return fmt.Errorf("cannot upload %d bytes to %q at %s (remote path %q): %s", len(data), filePath, fs, o.ObjectName(), err)
|
return fmt.Errorf("cannot upload %d bytes to %q at %s (remote path %q): %w", len(data), filePath, fs, o.ObjectName(), err)
|
||||||
}
|
}
|
||||||
if n != len(data) {
|
if n != len(data) {
|
||||||
_ = w.Close()
|
_ = w.Close()
|
||||||
return fmt.Errorf("wrong data size uploaded to %q at %s (remote path %q); got %d bytes; want %d bytes", filePath, fs, o.ObjectName(), n, len(data))
|
return fmt.Errorf("wrong data size uploaded to %q at %s (remote path %q); got %d bytes; want %d bytes", filePath, fs, o.ObjectName(), n, len(data))
|
||||||
}
|
}
|
||||||
if err := w.Close(); err != nil {
|
if err := w.Close(); err != nil {
|
||||||
return fmt.Errorf("cannot close %q at %s (remote path %q): %s", filePath, fs, o.ObjectName(), err)
|
return fmt.Errorf("cannot close %q at %s (remote path %q): %w", filePath, fs, o.ObjectName(), err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -240,7 +240,7 @@ func (fs *FS) HasFile(filePath string) (bool, error) {
|
||||||
if err == storage.ErrObjectNotExist {
|
if err == storage.ErrObjectNotExist {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return false, fmt.Errorf("unexpected error when obtaining attributes for %q at %s (remote path %q): %s", filePath, fs, o.ObjectName(), err)
|
return false, fmt.Errorf("unexpected error when obtaining attributes for %q at %s (remote path %q): %w", filePath, fs, o.ObjectName(), err)
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -66,7 +66,7 @@ func (fs *FS) Init() error {
|
||||||
}
|
}
|
||||||
sess, err := session.NewSessionWithOptions(opts)
|
sess, err := session.NewSessionWithOptions(opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot create S3 session: %s", err)
|
return fmt.Errorf("cannot create S3 session: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(fs.CustomEndpoint) > 0 {
|
if len(fs.CustomEndpoint) > 0 {
|
||||||
|
@ -81,7 +81,7 @@ func (fs *FS) Init() error {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
region, err := s3manager.GetBucketRegion(ctx, sess, fs.Bucket, "")
|
region, err := s3manager.GetBucketRegion(ctx, sess, fs.Bucket, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot determine region for bucket %q: %s", fs.Bucket, err)
|
return fmt.Errorf("cannot determine region for bucket %q: %w", fs.Bucket, err)
|
||||||
}
|
}
|
||||||
sess.Config.WithRegion(region)
|
sess.Config.WithRegion(region)
|
||||||
logger.Infof("bucket %q is stored at region %q; switching to this region", fs.Bucket, region)
|
logger.Infof("bucket %q is stored at region %q; switching to this region", fs.Bucket, region)
|
||||||
|
@ -133,7 +133,7 @@ func (fs *FS) ListParts() ([]common.Part, error) {
|
||||||
err = errOuter
|
err = errOuter
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error when listing s3 objects inside dir %q: %s", dir, err)
|
return nil, fmt.Errorf("error when listing s3 objects inside dir %q: %w", dir, err)
|
||||||
}
|
}
|
||||||
return parts, nil
|
return parts, nil
|
||||||
}
|
}
|
||||||
|
@ -147,7 +147,7 @@ func (fs *FS) DeletePart(p common.Part) error {
|
||||||
}
|
}
|
||||||
_, err := fs.s3.DeleteObject(input)
|
_, err := fs.s3.DeleteObject(input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot delete %q at %s (remote path %q): %s", p.Path, fs, path, err)
|
return fmt.Errorf("cannot delete %q at %s (remote path %q): %w", p.Path, fs, path, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -175,7 +175,7 @@ func (fs *FS) CopyPart(srcFS common.OriginFS, p common.Part) error {
|
||||||
}
|
}
|
||||||
_, err := fs.s3.CopyObject(input)
|
_, err := fs.s3.CopyObject(input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot copy %q from %s to %s (copySource %q): %s", p.Path, src, fs, copySource, err)
|
return fmt.Errorf("cannot copy %q from %s to %s (copySource %q): %w", p.Path, src, fs, copySource, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -189,7 +189,7 @@ func (fs *FS) DownloadPart(p common.Part, w io.Writer) error {
|
||||||
}
|
}
|
||||||
o, err := fs.s3.GetObject(input)
|
o, err := fs.s3.GetObject(input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot open %q at %s (remote path %q): %s", p.Path, fs, path, err)
|
return fmt.Errorf("cannot open %q at %s (remote path %q): %w", p.Path, fs, path, err)
|
||||||
}
|
}
|
||||||
r := o.Body
|
r := o.Body
|
||||||
n, err := io.Copy(w, r)
|
n, err := io.Copy(w, r)
|
||||||
|
@ -197,7 +197,7 @@ func (fs *FS) DownloadPart(p common.Part, w io.Writer) error {
|
||||||
err = err1
|
err = err1
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot download %q from at %s (remote path %q): %s", p.Path, fs, path, err)
|
return fmt.Errorf("cannot download %q from at %s (remote path %q): %w", p.Path, fs, path, err)
|
||||||
}
|
}
|
||||||
if uint64(n) != p.Size {
|
if uint64(n) != p.Size {
|
||||||
return fmt.Errorf("wrong data size downloaded from %q at %s; got %d bytes; want %d bytes", p.Path, fs, n, p.Size)
|
return fmt.Errorf("wrong data size downloaded from %q at %s; got %d bytes; want %d bytes", p.Path, fs, n, p.Size)
|
||||||
|
@ -218,7 +218,7 @@ func (fs *FS) UploadPart(p common.Part, r io.Reader) error {
|
||||||
}
|
}
|
||||||
_, err := fs.uploader.Upload(input)
|
_, err := fs.uploader.Upload(input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot upoad data to %q at %s (remote path %q): %s", p.Path, fs, path, err)
|
return fmt.Errorf("cannot upoad data to %q at %s (remote path %q): %w", p.Path, fs, path, err)
|
||||||
}
|
}
|
||||||
if uint64(sr.size) != p.Size {
|
if uint64(sr.size) != p.Size {
|
||||||
return fmt.Errorf("wrong data size uploaded to %q at %s; got %d bytes; want %d bytes", p.Path, fs, sr.size, p.Size)
|
return fmt.Errorf("wrong data size uploaded to %q at %s; got %d bytes; want %d bytes", p.Path, fs, sr.size, p.Size)
|
||||||
|
@ -249,7 +249,7 @@ func (fs *FS) DeleteFile(filePath string) error {
|
||||||
Key: aws.String(path),
|
Key: aws.String(path),
|
||||||
}
|
}
|
||||||
if _, err := fs.s3.DeleteObject(input); err != nil {
|
if _, err := fs.s3.DeleteObject(input); err != nil {
|
||||||
return fmt.Errorf("cannot delete %q at %s (remote path %q): %s", filePath, fs, path, err)
|
return fmt.Errorf("cannot delete %q at %s (remote path %q): %w", filePath, fs, path, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -269,7 +269,7 @@ func (fs *FS) CreateFile(filePath string, data []byte) error {
|
||||||
}
|
}
|
||||||
_, err := fs.uploader.Upload(input)
|
_, err := fs.uploader.Upload(input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot upoad data to %q at %s (remote path %q): %s", filePath, fs, path, err)
|
return fmt.Errorf("cannot upoad data to %q at %s (remote path %q): %w", filePath, fs, path, err)
|
||||||
}
|
}
|
||||||
l := int64(len(data))
|
l := int64(len(data))
|
||||||
if sr.size != l {
|
if sr.size != l {
|
||||||
|
@ -290,10 +290,10 @@ func (fs *FS) HasFile(filePath string) (bool, error) {
|
||||||
if ae, ok := err.(awserr.Error); ok && ae.Code() == s3.ErrCodeNoSuchKey {
|
if ae, ok := err.(awserr.Error); ok && ae.Code() == s3.ErrCodeNoSuchKey {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return false, fmt.Errorf("cannot open %q at %s (remote path %q): %s", filePath, fs, path, err)
|
return false, fmt.Errorf("cannot open %q at %s (remote path %q): %w", filePath, fs, path, err)
|
||||||
}
|
}
|
||||||
if err := o.Body.Close(); err != nil {
|
if err := o.Body.Close(); err != nil {
|
||||||
return false, fmt.Errorf("cannot close %q at %s (remote path %q): %s", filePath, fs, path, err)
|
return false, fmt.Errorf("cannot close %q at %s (remote path %q): %w", filePath, fs, path, err)
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -76,7 +76,7 @@ func MarshalTimestamps(dst []byte, timestamps []int64, precisionBits uint8) (res
|
||||||
func UnmarshalTimestamps(dst []int64, src []byte, mt MarshalType, firstTimestamp int64, itemsCount int) ([]int64, error) {
|
func UnmarshalTimestamps(dst []int64, src []byte, mt MarshalType, firstTimestamp int64, itemsCount int) ([]int64, error) {
|
||||||
dst, err := unmarshalInt64Array(dst, src, mt, firstTimestamp, itemsCount)
|
dst, err := unmarshalInt64Array(dst, src, mt, firstTimestamp, itemsCount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot unmarshal %d timestamps from len(src)=%d bytes: %s", itemsCount, len(src), err)
|
return nil, fmt.Errorf("cannot unmarshal %d timestamps from len(src)=%d bytes: %w", itemsCount, len(src), err)
|
||||||
}
|
}
|
||||||
return dst, nil
|
return dst, nil
|
||||||
}
|
}
|
||||||
|
@ -97,7 +97,7 @@ func MarshalValues(dst []byte, values []int64, precisionBits uint8) (result []by
|
||||||
func UnmarshalValues(dst []int64, src []byte, mt MarshalType, firstValue int64, itemsCount int) ([]int64, error) {
|
func UnmarshalValues(dst []int64, src []byte, mt MarshalType, firstValue int64, itemsCount int) ([]int64, error) {
|
||||||
dst, err := unmarshalInt64Array(dst, src, mt, firstValue, itemsCount)
|
dst, err := unmarshalInt64Array(dst, src, mt, firstValue, itemsCount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot unmarshal %d values from len(src)=%d bytes: %s", itemsCount, len(src), err)
|
return nil, fmt.Errorf("cannot unmarshal %d values from len(src)=%d bytes: %w", itemsCount, len(src), err)
|
||||||
}
|
}
|
||||||
return dst, nil
|
return dst, nil
|
||||||
}
|
}
|
||||||
|
@ -166,36 +166,36 @@ func unmarshalInt64Array(dst []int64, src []byte, mt MarshalType, firstValue int
|
||||||
bb := bbPool.Get()
|
bb := bbPool.Get()
|
||||||
bb.B, err = DecompressZSTD(bb.B[:0], src)
|
bb.B, err = DecompressZSTD(bb.B[:0], src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot decompress zstd data of size %d: %s; src_zstd=%X", len(src), err, src)
|
return nil, fmt.Errorf("cannot decompress zstd data of size %d: %w; src_zstd=%X", len(src), err, src)
|
||||||
}
|
}
|
||||||
dst, err = unmarshalInt64NearestDelta(dst, bb.B, firstValue, itemsCount)
|
dst, err = unmarshalInt64NearestDelta(dst, bb.B, firstValue, itemsCount)
|
||||||
bbPool.Put(bb)
|
bbPool.Put(bb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot unmarshal nearest delta data after zstd decompression: %s; src_zstd=%X", err, src)
|
return nil, fmt.Errorf("cannot unmarshal nearest delta data after zstd decompression: %w; src_zstd=%X", err, src)
|
||||||
}
|
}
|
||||||
return dst, nil
|
return dst, nil
|
||||||
case MarshalTypeZSTDNearestDelta2:
|
case MarshalTypeZSTDNearestDelta2:
|
||||||
bb := bbPool.Get()
|
bb := bbPool.Get()
|
||||||
bb.B, err = DecompressZSTD(bb.B[:0], src)
|
bb.B, err = DecompressZSTD(bb.B[:0], src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot decompress zstd data of size %d: %s; src_zstd=%X", len(src), err, src)
|
return nil, fmt.Errorf("cannot decompress zstd data of size %d: %w; src_zstd=%X", len(src), err, src)
|
||||||
}
|
}
|
||||||
dst, err = unmarshalInt64NearestDelta2(dst, bb.B, firstValue, itemsCount)
|
dst, err = unmarshalInt64NearestDelta2(dst, bb.B, firstValue, itemsCount)
|
||||||
bbPool.Put(bb)
|
bbPool.Put(bb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot unmarshal nearest delta2 data after zstd decompression: %s; src_zstd=%X", err, src)
|
return nil, fmt.Errorf("cannot unmarshal nearest delta2 data after zstd decompression: %w; src_zstd=%X", err, src)
|
||||||
}
|
}
|
||||||
return dst, nil
|
return dst, nil
|
||||||
case MarshalTypeNearestDelta:
|
case MarshalTypeNearestDelta:
|
||||||
dst, err = unmarshalInt64NearestDelta(dst, src, firstValue, itemsCount)
|
dst, err = unmarshalInt64NearestDelta(dst, src, firstValue, itemsCount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot unmarshal nearest delta data: %s", err)
|
return nil, fmt.Errorf("cannot unmarshal nearest delta data: %w", err)
|
||||||
}
|
}
|
||||||
return dst, nil
|
return dst, nil
|
||||||
case MarshalTypeNearestDelta2:
|
case MarshalTypeNearestDelta2:
|
||||||
dst, err = unmarshalInt64NearestDelta2(dst, src, firstValue, itemsCount)
|
dst, err = unmarshalInt64NearestDelta2(dst, src, firstValue, itemsCount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot unmarshal nearest delta2 data: %s", err)
|
return nil, fmt.Errorf("cannot unmarshal nearest delta2 data: %w", err)
|
||||||
}
|
}
|
||||||
return dst, nil
|
return dst, nil
|
||||||
case MarshalTypeConst:
|
case MarshalTypeConst:
|
||||||
|
@ -219,7 +219,7 @@ func unmarshalInt64Array(dst []int64, src []byte, mt MarshalType, firstValue int
|
||||||
v := firstValue
|
v := firstValue
|
||||||
tail, d, err := UnmarshalVarInt64(src)
|
tail, d, err := UnmarshalVarInt64(src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot unmarshal delta value for delta const: %s", err)
|
return nil, fmt.Errorf("cannot unmarshal delta value for delta const: %w", err)
|
||||||
}
|
}
|
||||||
if len(tail) > 0 {
|
if len(tail) > 0 {
|
||||||
return nil, fmt.Errorf("unexpected trailing data after delta const (d=%d): %d bytes", d, len(tail))
|
return nil, fmt.Errorf("unexpected trailing data after delta const (d=%d): %d bytes", d, len(tail))
|
||||||
|
|
|
@ -34,7 +34,7 @@ func BenchmarkUnmarshalGaugeArray(b *testing.B) {
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
dst, err = unmarshalInt64Array(dst[:0], benchMarshaledGaugeArray, MarshalTypeZSTDNearestDelta, benchGaugeArray[0], len(benchGaugeArray))
|
dst, err = unmarshalInt64Array(dst[:0], benchMarshaledGaugeArray, MarshalTypeZSTDNearestDelta, benchGaugeArray[0], len(benchGaugeArray))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Errorf("cannot unmarshal gauge array: %s", err))
|
panic(fmt.Errorf("cannot unmarshal gauge array: %w", err))
|
||||||
}
|
}
|
||||||
atomic.AddUint64(&Sink, uint64(len(dst)))
|
atomic.AddUint64(&Sink, uint64(len(dst)))
|
||||||
}
|
}
|
||||||
|
@ -81,7 +81,7 @@ func BenchmarkUnmarshalDeltaConstArray(b *testing.B) {
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
dst, err = unmarshalInt64Array(dst[:0], benchMarshaledDeltaConstArray, MarshalTypeDeltaConst, benchDeltaConstArray[0], len(benchDeltaConstArray))
|
dst, err = unmarshalInt64Array(dst[:0], benchMarshaledDeltaConstArray, MarshalTypeDeltaConst, benchDeltaConstArray[0], len(benchDeltaConstArray))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Errorf("cannot unmarshal delta const array: %s", err))
|
panic(fmt.Errorf("cannot unmarshal delta const array: %w", err))
|
||||||
}
|
}
|
||||||
atomic.AddUint64(&Sink, uint64(len(dst)))
|
atomic.AddUint64(&Sink, uint64(len(dst)))
|
||||||
}
|
}
|
||||||
|
@ -128,7 +128,7 @@ func BenchmarkUnmarshalConstArray(b *testing.B) {
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
dst, err = unmarshalInt64Array(dst[:0], benchMarshaledConstArray, MarshalTypeConst, benchConstArray[0], len(benchConstArray))
|
dst, err = unmarshalInt64Array(dst[:0], benchMarshaledConstArray, MarshalTypeConst, benchConstArray[0], len(benchConstArray))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Errorf("cannot unmarshal const array: %s", err))
|
panic(fmt.Errorf("cannot unmarshal const array: %w", err))
|
||||||
}
|
}
|
||||||
atomic.AddUint64(&Sink, uint64(len(dst)))
|
atomic.AddUint64(&Sink, uint64(len(dst)))
|
||||||
}
|
}
|
||||||
|
@ -173,7 +173,7 @@ func BenchmarkUnmarshalZeroConstArray(b *testing.B) {
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
dst, err = unmarshalInt64Array(dst[:0], benchMarshaledZeroConstArray, MarshalTypeConst, benchZeroConstArray[0], len(benchZeroConstArray))
|
dst, err = unmarshalInt64Array(dst[:0], benchMarshaledZeroConstArray, MarshalTypeConst, benchZeroConstArray[0], len(benchZeroConstArray))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Errorf("cannot unmarshal zero const array: %s", err))
|
panic(fmt.Errorf("cannot unmarshal zero const array: %w", err))
|
||||||
}
|
}
|
||||||
atomic.AddUint64(&Sink, uint64(len(dst)))
|
atomic.AddUint64(&Sink, uint64(len(dst)))
|
||||||
}
|
}
|
||||||
|
@ -212,7 +212,7 @@ func BenchmarkUnmarshalInt64Array(b *testing.B) {
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
dst, err = unmarshalInt64Array(dst[:0], benchMarshaledInt64Array, benchMarshalType, benchInt64Array[0], len(benchInt64Array))
|
dst, err = unmarshalInt64Array(dst[:0], benchMarshaledInt64Array, benchMarshalType, benchInt64Array[0], len(benchInt64Array))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Errorf("cannot unmarshal int64 array: %s", err))
|
panic(fmt.Errorf("cannot unmarshal int64 array: %w", err))
|
||||||
}
|
}
|
||||||
atomic.AddUint64(&Sink, uint64(len(dst)))
|
atomic.AddUint64(&Sink, uint64(len(dst)))
|
||||||
}
|
}
|
||||||
|
|
|
@ -229,7 +229,7 @@ func MarshalBytes(dst, b []byte) []byte {
|
||||||
func UnmarshalBytes(src []byte) ([]byte, []byte, error) {
|
func UnmarshalBytes(src []byte) ([]byte, []byte, error) {
|
||||||
tail, n, err := UnmarshalVarUint64(src)
|
tail, n, err := UnmarshalVarUint64(src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("cannot unmarshal string size: %d", err)
|
return nil, nil, fmt.Errorf("cannot unmarshal string size: %w", err)
|
||||||
}
|
}
|
||||||
src = tail
|
src = tail
|
||||||
if uint64(len(src)) < n {
|
if uint64(len(src)) < n {
|
||||||
|
|
|
@ -135,7 +135,7 @@ func benchmarkUnmarshalVarInt64s(b *testing.B, maxValue int64) {
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
tail, err := UnmarshalVarInt64s(dst, data)
|
tail, err := UnmarshalVarInt64s(dst, data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Errorf("unexpected error: %s", err))
|
panic(fmt.Errorf("unexpected error: %w", err))
|
||||||
}
|
}
|
||||||
if len(tail) > 0 {
|
if len(tail) > 0 {
|
||||||
panic(fmt.Errorf("unexpected non-empty tail with len=%d: %X", len(tail), tail))
|
panic(fmt.Errorf("unexpected non-empty tail with len=%d: %X", len(tail), tail))
|
||||||
|
|
|
@ -60,7 +60,7 @@ func unmarshalInt64NearestDelta(dst []int64, src []byte, firstValue int64, items
|
||||||
|
|
||||||
tail, err := UnmarshalVarInt64s(is.A, src)
|
tail, err := UnmarshalVarInt64s(is.A, src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot unmarshal nearest delta from %d bytes; src=%X: %s", len(src), src, err)
|
return nil, fmt.Errorf("cannot unmarshal nearest delta from %d bytes; src=%X: %w", len(src), src, err)
|
||||||
}
|
}
|
||||||
if len(tail) > 0 {
|
if len(tail) > 0 {
|
||||||
return nil, fmt.Errorf("unexpected tail left after unmarshaling %d items from %d bytes; tail size=%d; src=%X; tail=%X", itemsCount, len(src), len(tail), src, tail)
|
return nil, fmt.Errorf("unexpected tail left after unmarshaling %d items from %d bytes; tail size=%d; src=%X; tail=%X", itemsCount, len(src), len(tail), src, tail)
|
||||||
|
|
|
@ -63,7 +63,7 @@ func unmarshalInt64NearestDelta2(dst []int64, src []byte, firstValue int64, item
|
||||||
|
|
||||||
tail, err := UnmarshalVarInt64s(is.A, src)
|
tail, err := UnmarshalVarInt64s(is.A, src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot unmarshal nearest delta from %d bytes; src=%X: %s", len(src), src, err)
|
return nil, fmt.Errorf("cannot unmarshal nearest delta from %d bytes; src=%X: %w", len(src), src, err)
|
||||||
}
|
}
|
||||||
if len(tail) > 0 {
|
if len(tail) > 0 {
|
||||||
return nil, fmt.Errorf("unexpected tail left after unmarshaling %d items from %d bytes; tail size=%d; src=%X; tail=%X", itemsCount, len(src), len(tail), src, tail)
|
return nil, fmt.Errorf("unexpected tail left after unmarshaling %d items from %d bytes; tail size=%d; src=%X; tail=%X", itemsCount, len(src), len(tail), src, tail)
|
||||||
|
|
|
@ -35,7 +35,7 @@ func BenchmarkUnmarshalInt64NearestDelta2(b *testing.B) {
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
dst, err = unmarshalInt64NearestDelta2(dst[:0], benchInt64NearestDelta2Data, 0, len(benchInt64Array))
|
dst, err = unmarshalInt64NearestDelta2(dst[:0], benchInt64NearestDelta2Data, 0, len(benchInt64Array))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Errorf("unexpected error: %s", err))
|
panic(fmt.Errorf("unexpected error: %w", err))
|
||||||
}
|
}
|
||||||
atomic.AddUint64(&Sink, uint64(len(dst)))
|
atomic.AddUint64(&Sink, uint64(len(dst)))
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,7 +35,7 @@ func BenchmarkUnmarshalInt64NearestDelta(b *testing.B) {
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
dst, err = unmarshalInt64NearestDelta(dst[:0], benchInt64NearestDeltaData, 0, len(benchInt64Array))
|
dst, err = unmarshalInt64NearestDelta(dst[:0], benchInt64NearestDeltaData, 0, len(benchInt64Array))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Errorf("unexpected error: %s", err))
|
panic(fmt.Errorf("unexpected error: %w", err))
|
||||||
}
|
}
|
||||||
atomic.AddUint64(&Sink, uint64(len(dst)))
|
atomic.AddUint64(&Sink, uint64(len(dst)))
|
||||||
}
|
}
|
||||||
|
|
|
@ -63,7 +63,7 @@ func OpenReaderAt(path string, offset int64, nocache bool) (*Reader, error) {
|
||||||
n, err := r.f.Seek(offset, io.SeekStart)
|
n, err := r.f.Seek(offset, io.SeekStart)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.MustClose()
|
r.MustClose()
|
||||||
return nil, fmt.Errorf("cannot seek to offset=%d for %q: %s", offset, path, err)
|
return nil, fmt.Errorf("cannot seek to offset=%d for %q: %w", offset, path, err)
|
||||||
}
|
}
|
||||||
if n != offset {
|
if n != offset {
|
||||||
r.MustClose()
|
r.MustClose()
|
||||||
|
@ -78,7 +78,7 @@ func OpenReaderAt(path string, offset int64, nocache bool) (*Reader, error) {
|
||||||
func Open(path string, nocache bool) (*Reader, error) {
|
func Open(path string, nocache bool) (*Reader, error) {
|
||||||
f, err := os.Open(path)
|
f, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot open file %q: %s", path, err)
|
return nil, fmt.Errorf("cannot open file %q: %w", path, err)
|
||||||
}
|
}
|
||||||
r := &Reader{
|
r := &Reader{
|
||||||
f: f,
|
f: f,
|
||||||
|
@ -124,7 +124,7 @@ func (r *Reader) Read(p []byte) (int, error) {
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
if err := r.st.adviseDontNeed(n, false); err != nil {
|
if err := r.st.adviseDontNeed(n, false); err != nil {
|
||||||
return n, fmt.Errorf("advise error for %q: %s", r.f.Name(), err)
|
return n, fmt.Errorf("advise error for %q: %w", r.f.Name(), err)
|
||||||
}
|
}
|
||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
|
@ -172,12 +172,12 @@ type Writer struct {
|
||||||
func OpenWriterAt(path string, offset int64, nocache bool) (*Writer, error) {
|
func OpenWriterAt(path string, offset int64, nocache bool) (*Writer, error) {
|
||||||
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0600)
|
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot open %q: %s", path, err)
|
return nil, fmt.Errorf("cannot open %q: %w", path, err)
|
||||||
}
|
}
|
||||||
n, err := f.Seek(offset, io.SeekStart)
|
n, err := f.Seek(offset, io.SeekStart)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = f.Close()
|
_ = f.Close()
|
||||||
return nil, fmt.Errorf("cannot seek to offset=%d in %q: %s", offset, path, err)
|
return nil, fmt.Errorf("cannot seek to offset=%d in %q: %w", offset, path, err)
|
||||||
}
|
}
|
||||||
if n != offset {
|
if n != offset {
|
||||||
_ = f.Close()
|
_ = f.Close()
|
||||||
|
@ -192,7 +192,7 @@ func OpenWriterAt(path string, offset int64, nocache bool) (*Writer, error) {
|
||||||
func Create(path string, nocache bool) (*Writer, error) {
|
func Create(path string, nocache bool) (*Writer, error) {
|
||||||
f, err := os.Create(path)
|
f, err := os.Create(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot create file %q: %s", path, err)
|
return nil, fmt.Errorf("cannot create file %q: %w", path, err)
|
||||||
}
|
}
|
||||||
return newWriter(f, nocache), nil
|
return newWriter(f, nocache), nil
|
||||||
}
|
}
|
||||||
|
@ -248,7 +248,7 @@ func (w *Writer) Write(p []byte) (int, error) {
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
if err := w.st.adviseDontNeed(n, true); err != nil {
|
if err := w.st.adviseDontNeed(n, true); err != nil {
|
||||||
return n, fmt.Errorf("advise error for %q: %s", w.f.Name(), err)
|
return n, fmt.Errorf("advise error for %q: %w", w.f.Name(), err)
|
||||||
}
|
}
|
||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,11 +18,11 @@ func (st *streamTracker) adviseDontNeed(n int, fdatasync bool) error {
|
||||||
blockSize := st.length - (st.length % dontNeedBlockSize)
|
blockSize := st.length - (st.length % dontNeedBlockSize)
|
||||||
if fdatasync {
|
if fdatasync {
|
||||||
if err := unixFdatasync(int(st.fd)); err != nil {
|
if err := unixFdatasync(int(st.fd)); err != nil {
|
||||||
return fmt.Errorf("unix.Fdatasync error: %s", err)
|
return fmt.Errorf("unix.Fdatasync error: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := unix.Fadvise(int(st.fd), int64(st.offset), int64(blockSize), unix.FADV_DONTNEED); err != nil {
|
if err := unix.Fadvise(int(st.fd), int64(st.offset), int64(blockSize), unix.FADV_DONTNEED); err != nil {
|
||||||
return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, %d, %d) error: %s", st.offset, blockSize, err)
|
return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, %d, %d) error: %w", st.offset, blockSize, err)
|
||||||
}
|
}
|
||||||
st.offset += blockSize
|
st.offset += blockSize
|
||||||
st.length -= blockSize
|
st.length -= blockSize
|
||||||
|
@ -35,7 +35,7 @@ func (st *streamTracker) close() error {
|
||||||
}
|
}
|
||||||
// Advise the whole file as it shouldn't be cached.
|
// Advise the whole file as it shouldn't be cached.
|
||||||
if err := unix.Fadvise(int(st.fd), 0, 0, unix.FADV_DONTNEED); err != nil {
|
if err := unix.Fadvise(int(st.fd), 0, 0, unix.FADV_DONTNEED); err != nil {
|
||||||
return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, 0, 0) error: %s", err)
|
return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, 0, 0) error: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,11 +16,11 @@ func (st *streamTracker) adviseDontNeed(n int, fdatasync bool) error {
|
||||||
blockSize := st.length - (st.length % dontNeedBlockSize)
|
blockSize := st.length - (st.length % dontNeedBlockSize)
|
||||||
if fdatasync {
|
if fdatasync {
|
||||||
if err := unix.Fdatasync(int(st.fd)); err != nil {
|
if err := unix.Fdatasync(int(st.fd)); err != nil {
|
||||||
return fmt.Errorf("unix.Fdatasync error: %s", err)
|
return fmt.Errorf("unix.Fdatasync error: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := unix.Fadvise(int(st.fd), int64(st.offset), int64(blockSize), unix.FADV_DONTNEED); err != nil {
|
if err := unix.Fadvise(int(st.fd), int64(st.offset), int64(blockSize), unix.FADV_DONTNEED); err != nil {
|
||||||
return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, %d, %d) error: %s", st.offset, blockSize, err)
|
return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, %d, %d) error: %w", st.offset, blockSize, err)
|
||||||
}
|
}
|
||||||
st.offset += blockSize
|
st.offset += blockSize
|
||||||
st.length -= blockSize
|
st.length -= blockSize
|
||||||
|
@ -33,7 +33,7 @@ func (st *streamTracker) close() error {
|
||||||
}
|
}
|
||||||
// Advise the whole file as it shouldn't be cached.
|
// Advise the whole file as it shouldn't be cached.
|
||||||
if err := unix.Fadvise(int(st.fd), 0, 0, unix.FADV_DONTNEED); err != nil {
|
if err := unix.Fadvise(int(st.fd), 0, 0, unix.FADV_DONTNEED); err != nil {
|
||||||
return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, 0, 0) error: %s", err)
|
return fmt.Errorf("unix.Fadvise(FADV_DONTNEEDED, 0, 0) error: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
22
lib/fs/fs.go
22
lib/fs/fs.go
|
@ -48,12 +48,12 @@ func WriteFileAtomically(path string, data []byte) error {
|
||||||
tmpPath := fmt.Sprintf("%s.tmp.%d", path, n)
|
tmpPath := fmt.Sprintf("%s.tmp.%d", path, n)
|
||||||
f, err := filestream.Create(tmpPath, false)
|
f, err := filestream.Create(tmpPath, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot create file %q: %s", tmpPath, err)
|
return fmt.Errorf("cannot create file %q: %w", tmpPath, err)
|
||||||
}
|
}
|
||||||
if _, err := f.Write(data); err != nil {
|
if _, err := f.Write(data); err != nil {
|
||||||
f.MustClose()
|
f.MustClose()
|
||||||
MustRemoveAll(tmpPath)
|
MustRemoveAll(tmpPath)
|
||||||
return fmt.Errorf("cannot write %d bytes to file %q: %s", len(data), tmpPath, err)
|
return fmt.Errorf("cannot write %d bytes to file %q: %w", len(data), tmpPath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sync and close the file.
|
// Sync and close the file.
|
||||||
|
@ -63,14 +63,14 @@ func WriteFileAtomically(path string, data []byte) error {
|
||||||
if err := os.Rename(tmpPath, path); err != nil {
|
if err := os.Rename(tmpPath, path); err != nil {
|
||||||
// do not call MustRemoveAll(tmpPath) here, so the user could inspect
|
// do not call MustRemoveAll(tmpPath) here, so the user could inspect
|
||||||
// the file contents during investigating the issue.
|
// the file contents during investigating the issue.
|
||||||
return fmt.Errorf("cannot move %q to %q: %s", tmpPath, path, err)
|
return fmt.Errorf("cannot move %q to %q: %w", tmpPath, path, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sync the containing directory, so the file is guaranteed to appear in the directory.
|
// Sync the containing directory, so the file is guaranteed to appear in the directory.
|
||||||
// See https://www.quora.com/When-should-you-fsync-the-containing-directory-in-addition-to-the-file-itself
|
// See https://www.quora.com/When-should-you-fsync-the-containing-directory-in-addition-to-the-file-itself
|
||||||
absPath, err := filepath.Abs(path)
|
absPath, err := filepath.Abs(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot obtain absolute path to %q: %s", path, err)
|
return fmt.Errorf("cannot obtain absolute path to %q: %w", path, err)
|
||||||
}
|
}
|
||||||
parentDirPath := filepath.Dir(absPath)
|
parentDirPath := filepath.Dir(absPath)
|
||||||
MustSyncPath(parentDirPath)
|
MustSyncPath(parentDirPath)
|
||||||
|
@ -204,12 +204,12 @@ func MustRemoveAllWithDoneCallback(path string, done func()) {
|
||||||
// HardLinkFiles makes hard links for all the files from srcDir in dstDir.
|
// HardLinkFiles makes hard links for all the files from srcDir in dstDir.
|
||||||
func HardLinkFiles(srcDir, dstDir string) error {
|
func HardLinkFiles(srcDir, dstDir string) error {
|
||||||
if err := mkdirSync(dstDir); err != nil {
|
if err := mkdirSync(dstDir); err != nil {
|
||||||
return fmt.Errorf("cannot create dstDir=%q: %s", dstDir, err)
|
return fmt.Errorf("cannot create dstDir=%q: %w", dstDir, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
d, err := os.Open(srcDir)
|
d, err := os.Open(srcDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot open srcDir=%q: %s", srcDir, err)
|
return fmt.Errorf("cannot open srcDir=%q: %w", srcDir, err)
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := d.Close(); err != nil {
|
if err := d.Close(); err != nil {
|
||||||
|
@ -219,7 +219,7 @@ func HardLinkFiles(srcDir, dstDir string) error {
|
||||||
|
|
||||||
fis, err := d.Readdir(-1)
|
fis, err := d.Readdir(-1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot read files in scrDir=%q: %s", srcDir, err)
|
return fmt.Errorf("cannot read files in scrDir=%q: %w", srcDir, err)
|
||||||
}
|
}
|
||||||
for _, fi := range fis {
|
for _, fi := range fis {
|
||||||
if IsDirOrSymlink(fi) {
|
if IsDirOrSymlink(fi) {
|
||||||
|
@ -248,7 +248,7 @@ func SymlinkRelative(srcPath, dstPath string) error {
|
||||||
baseDir := filepath.Dir(dstPath)
|
baseDir := filepath.Dir(dstPath)
|
||||||
srcPathRel, err := filepath.Rel(baseDir, srcPath)
|
srcPathRel, err := filepath.Rel(baseDir, srcPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot make relative path for srcPath=%q: %s", srcPath, err)
|
return fmt.Errorf("cannot make relative path for srcPath=%q: %w", srcPath, err)
|
||||||
}
|
}
|
||||||
return os.Symlink(srcPathRel, dstPath)
|
return os.Symlink(srcPathRel, dstPath)
|
||||||
}
|
}
|
||||||
|
@ -260,7 +260,7 @@ func ReadFullData(r io.Reader, data []byte) error {
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
return io.EOF
|
return io.EOF
|
||||||
}
|
}
|
||||||
return fmt.Errorf("cannot read %d bytes; read only %d bytes; error: %s", len(data), n, err)
|
return fmt.Errorf("cannot read %d bytes; read only %d bytes; error: %w", len(data), n, err)
|
||||||
}
|
}
|
||||||
if n != len(data) {
|
if n != len(data) {
|
||||||
logger.Panicf("BUG: io.ReadFull read only %d bytes; must read %d bytes", n, len(data))
|
logger.Panicf("BUG: io.ReadFull read only %d bytes; must read %d bytes", n, len(data))
|
||||||
|
@ -288,10 +288,10 @@ func CreateFlockFile(dir string) (*os.File, error) {
|
||||||
flockFile := dir + "/flock.lock"
|
flockFile := dir + "/flock.lock"
|
||||||
flockF, err := os.Create(flockFile)
|
flockF, err := os.Create(flockFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot create lock file %q: %s", flockFile, err)
|
return nil, fmt.Errorf("cannot create lock file %q: %w", flockFile, err)
|
||||||
}
|
}
|
||||||
if err := unix.Flock(int(flockF.Fd()), unix.LOCK_EX|unix.LOCK_NB); err != nil {
|
if err := unix.Flock(int(flockF.Fd()), unix.LOCK_EX|unix.LOCK_NB); err != nil {
|
||||||
return nil, fmt.Errorf("cannot acquire lock on file %q: %s", flockFile, err)
|
return nil, fmt.Errorf("cannot acquire lock on file %q: %w", flockFile, err)
|
||||||
}
|
}
|
||||||
return flockF, nil
|
return flockF, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -154,7 +154,7 @@ func (r *ReaderAt) MustClose() {
|
||||||
func OpenReaderAt(path string) (*ReaderAt, error) {
|
func OpenReaderAt(path string) (*ReaderAt, error) {
|
||||||
f, err := os.Open(path)
|
f, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot open file %q for reader: %s", path, err)
|
return nil, fmt.Errorf("cannot open file %q for reader: %w", path, err)
|
||||||
}
|
}
|
||||||
var r ReaderAt
|
var r ReaderAt
|
||||||
r.f = f
|
r.f = f
|
||||||
|
@ -162,7 +162,7 @@ func OpenReaderAt(path string) (*ReaderAt, error) {
|
||||||
if !*disableMmap {
|
if !*disableMmap {
|
||||||
fi, err := f.Stat()
|
fi, err := f.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error in stat: %s", err)
|
return nil, fmt.Errorf("error in stat: %w", err)
|
||||||
}
|
}
|
||||||
size := fi.Size()
|
size := fi.Size()
|
||||||
bm := &pageCacheBitmap{
|
bm := &pageCacheBitmap{
|
||||||
|
@ -178,7 +178,7 @@ func OpenReaderAt(path string) (*ReaderAt, error) {
|
||||||
data, err := mmapFile(f, size)
|
data, err := mmapFile(f, size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
MustClose(f)
|
MustClose(f)
|
||||||
return nil, fmt.Errorf("cannot init reader for %q: %s", path, err)
|
return nil, fmt.Errorf("cannot init reader for %q: %w", path, err)
|
||||||
}
|
}
|
||||||
r.mmapData = data
|
r.mmapData = data
|
||||||
}
|
}
|
||||||
|
@ -228,7 +228,7 @@ func mmapFile(f *os.File, size int64) ([]byte, error) {
|
||||||
}
|
}
|
||||||
data, err := unix.Mmap(int(f.Fd()), 0, int(size), unix.PROT_READ, unix.MAP_SHARED)
|
data, err := unix.Mmap(int(f.Fd()), 0, int(size), unix.PROT_READ, unix.MAP_SHARED)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot mmap file with size %d: %s", size, err)
|
return nil, fmt.Errorf("cannot mmap file with size %d: %w", size, err)
|
||||||
}
|
}
|
||||||
return data[:sizeOrig], nil
|
return data[:sizeOrig], nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,7 +61,7 @@ func (bh *blockHeader) Unmarshal(src []byte) ([]byte, error) {
|
||||||
// Unmarshal commonPrefix
|
// Unmarshal commonPrefix
|
||||||
tail, cp, err := encoding.UnmarshalBytes(src)
|
tail, cp, err := encoding.UnmarshalBytes(src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tail, fmt.Errorf("cannot unmarshal commonPrefix: %s", err)
|
return tail, fmt.Errorf("cannot unmarshal commonPrefix: %w", err)
|
||||||
}
|
}
|
||||||
bh.commonPrefix = append(bh.commonPrefix[:0], cp...)
|
bh.commonPrefix = append(bh.commonPrefix[:0], cp...)
|
||||||
src = tail
|
src = tail
|
||||||
|
@ -69,7 +69,7 @@ func (bh *blockHeader) Unmarshal(src []byte) ([]byte, error) {
|
||||||
// Unmarshal firstItem
|
// Unmarshal firstItem
|
||||||
tail, fi, err := encoding.UnmarshalBytes(src)
|
tail, fi, err := encoding.UnmarshalBytes(src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tail, fmt.Errorf("cannot unmarshal firstItem: %s", err)
|
return tail, fmt.Errorf("cannot unmarshal firstItem: %w", err)
|
||||||
}
|
}
|
||||||
bh.firstItem = append(bh.firstItem[:0], fi...)
|
bh.firstItem = append(bh.firstItem[:0], fi...)
|
||||||
src = tail
|
src = tail
|
||||||
|
@ -81,7 +81,7 @@ func (bh *blockHeader) Unmarshal(src []byte) ([]byte, error) {
|
||||||
bh.marshalType = marshalType(src[0])
|
bh.marshalType = marshalType(src[0])
|
||||||
src = src[1:]
|
src = src[1:]
|
||||||
if err := checkMarshalType(bh.marshalType); err != nil {
|
if err := checkMarshalType(bh.marshalType); err != nil {
|
||||||
return src, fmt.Errorf("unexpected marshalType: %s", err)
|
return src, fmt.Errorf("unexpected marshalType: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unmarshal itemsCount
|
// Unmarshal itemsCount
|
||||||
|
@ -148,7 +148,7 @@ func unmarshalBlockHeaders(dst []blockHeader, src []byte, blockHeadersCount int)
|
||||||
for i := 0; i < blockHeadersCount; i++ {
|
for i := 0; i < blockHeadersCount; i++ {
|
||||||
tail, err := dst[dstLen+i].Unmarshal(src)
|
tail, err := dst[dstLen+i].Unmarshal(src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dst, fmt.Errorf("cannot unmarshal block header: %s", err)
|
return dst, fmt.Errorf("cannot unmarshal block header: %w", err)
|
||||||
}
|
}
|
||||||
src = tail
|
src = tail
|
||||||
}
|
}
|
||||||
|
|
|
@ -131,31 +131,31 @@ func (bsr *blockStreamReader) InitFromFilePart(path string) error {
|
||||||
path = filepath.Clean(path)
|
path = filepath.Clean(path)
|
||||||
|
|
||||||
if err := bsr.ph.ParseFromPath(path); err != nil {
|
if err := bsr.ph.ParseFromPath(path); err != nil {
|
||||||
return fmt.Errorf("cannot parse partHeader data from %q: %s", path, err)
|
return fmt.Errorf("cannot parse partHeader data from %q: %w", path, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
metaindexPath := path + "/metaindex.bin"
|
metaindexPath := path + "/metaindex.bin"
|
||||||
metaindexFile, err := filestream.Open(metaindexPath, true)
|
metaindexFile, err := filestream.Open(metaindexPath, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot open metaindex file in stream mode: %s", err)
|
return fmt.Errorf("cannot open metaindex file in stream mode: %w", err)
|
||||||
}
|
}
|
||||||
bsr.mrs, err = unmarshalMetaindexRows(bsr.mrs[:0], metaindexFile)
|
bsr.mrs, err = unmarshalMetaindexRows(bsr.mrs[:0], metaindexFile)
|
||||||
metaindexFile.MustClose()
|
metaindexFile.MustClose()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot unmarshal metaindex rows from file %q: %s", metaindexPath, err)
|
return fmt.Errorf("cannot unmarshal metaindex rows from file %q: %w", metaindexPath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
indexPath := path + "/index.bin"
|
indexPath := path + "/index.bin"
|
||||||
indexFile, err := filestream.Open(indexPath, true)
|
indexFile, err := filestream.Open(indexPath, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot open index file in stream mode: %s", err)
|
return fmt.Errorf("cannot open index file in stream mode: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
itemsPath := path + "/items.bin"
|
itemsPath := path + "/items.bin"
|
||||||
itemsFile, err := filestream.Open(itemsPath, true)
|
itemsFile, err := filestream.Open(itemsPath, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
indexFile.MustClose()
|
indexFile.MustClose()
|
||||||
return fmt.Errorf("cannot open items file in stream mode: %s", err)
|
return fmt.Errorf("cannot open items file in stream mode: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
lensPath := path + "/lens.bin"
|
lensPath := path + "/lens.bin"
|
||||||
|
@ -163,7 +163,7 @@ func (bsr *blockStreamReader) InitFromFilePart(path string) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
indexFile.MustClose()
|
indexFile.MustClose()
|
||||||
itemsFile.MustClose()
|
itemsFile.MustClose()
|
||||||
return fmt.Errorf("cannot open lens file in stream mode: %s", err)
|
return fmt.Errorf("cannot open lens file in stream mode: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
bsr.path = path
|
bsr.path = path
|
||||||
|
@ -200,7 +200,7 @@ func (bsr *blockStreamReader) Next() bool {
|
||||||
err = fmt.Errorf("unexpected last item; got %X; want %X", lastItem, bsr.ph.lastItem)
|
err = fmt.Errorf("unexpected last item; got %X; want %X", lastItem, bsr.ph.lastItem)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
err = fmt.Errorf("cannot read the next index block: %s", err)
|
err = fmt.Errorf("cannot read the next index block: %w", err)
|
||||||
}
|
}
|
||||||
bsr.err = err
|
bsr.err = err
|
||||||
return false
|
return false
|
||||||
|
@ -212,18 +212,18 @@ func (bsr *blockStreamReader) Next() bool {
|
||||||
|
|
||||||
bsr.sb.itemsData = bytesutil.Resize(bsr.sb.itemsData, int(bsr.bh.itemsBlockSize))
|
bsr.sb.itemsData = bytesutil.Resize(bsr.sb.itemsData, int(bsr.bh.itemsBlockSize))
|
||||||
if err := fs.ReadFullData(bsr.itemsReader, bsr.sb.itemsData); err != nil {
|
if err := fs.ReadFullData(bsr.itemsReader, bsr.sb.itemsData); err != nil {
|
||||||
bsr.err = fmt.Errorf("cannot read compressed items block with size %d: %s", bsr.bh.itemsBlockSize, err)
|
bsr.err = fmt.Errorf("cannot read compressed items block with size %d: %w", bsr.bh.itemsBlockSize, err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
bsr.sb.lensData = bytesutil.Resize(bsr.sb.lensData, int(bsr.bh.lensBlockSize))
|
bsr.sb.lensData = bytesutil.Resize(bsr.sb.lensData, int(bsr.bh.lensBlockSize))
|
||||||
if err := fs.ReadFullData(bsr.lensReader, bsr.sb.lensData); err != nil {
|
if err := fs.ReadFullData(bsr.lensReader, bsr.sb.lensData); err != nil {
|
||||||
bsr.err = fmt.Errorf("cannot read compressed lens block with size %d: %s", bsr.bh.lensBlockSize, err)
|
bsr.err = fmt.Errorf("cannot read compressed lens block with size %d: %w", bsr.bh.lensBlockSize, err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := bsr.Block.UnmarshalData(&bsr.sb, bsr.bh.firstItem, bsr.bh.commonPrefix, bsr.bh.itemsCount, bsr.bh.marshalType); err != nil {
|
if err := bsr.Block.UnmarshalData(&bsr.sb, bsr.bh.firstItem, bsr.bh.commonPrefix, bsr.bh.itemsCount, bsr.bh.marshalType); err != nil {
|
||||||
bsr.err = fmt.Errorf("cannot unmarshal inmemoryBlock from storageBlock with firstItem=%X, commonPrefix=%X, itemsCount=%d, marshalType=%d: %s",
|
bsr.err = fmt.Errorf("cannot unmarshal inmemoryBlock from storageBlock with firstItem=%X, commonPrefix=%X, itemsCount=%d, marshalType=%d: %w",
|
||||||
bsr.bh.firstItem, bsr.bh.commonPrefix, bsr.bh.itemsCount, bsr.bh.marshalType, err)
|
bsr.bh.firstItem, bsr.bh.commonPrefix, bsr.bh.itemsCount, bsr.bh.marshalType, err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -260,14 +260,14 @@ func (bsr *blockStreamReader) readNextBHS() error {
|
||||||
// Read compressed index block.
|
// Read compressed index block.
|
||||||
bsr.packedBuf = bytesutil.Resize(bsr.packedBuf, int(mr.indexBlockSize))
|
bsr.packedBuf = bytesutil.Resize(bsr.packedBuf, int(mr.indexBlockSize))
|
||||||
if err := fs.ReadFullData(bsr.indexReader, bsr.packedBuf); err != nil {
|
if err := fs.ReadFullData(bsr.indexReader, bsr.packedBuf); err != nil {
|
||||||
return fmt.Errorf("cannot read compressed index block with size %d: %s", mr.indexBlockSize, err)
|
return fmt.Errorf("cannot read compressed index block with size %d: %w", mr.indexBlockSize, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unpack the compressed index block.
|
// Unpack the compressed index block.
|
||||||
var err error
|
var err error
|
||||||
bsr.unpackedBuf, err = encoding.DecompressZSTD(bsr.unpackedBuf[:0], bsr.packedBuf)
|
bsr.unpackedBuf, err = encoding.DecompressZSTD(bsr.unpackedBuf[:0], bsr.packedBuf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot decompress index block with size %d: %s", mr.indexBlockSize, err)
|
return fmt.Errorf("cannot decompress index block with size %d: %w", mr.indexBlockSize, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unmarshal the unpacked index block into bsr.bhs.
|
// Unmarshal the unpacked index block into bsr.bhs.
|
||||||
|
@ -280,7 +280,7 @@ func (bsr *blockStreamReader) readNextBHS() error {
|
||||||
for i := 0; i < int(mr.blockHeadersCount); i++ {
|
for i := 0; i < int(mr.blockHeadersCount); i++ {
|
||||||
tail, err := bsr.bhs[i].Unmarshal(b)
|
tail, err := bsr.bhs[i].Unmarshal(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot unmarshal blockHeader #%d in the index block #%d: %s", len(bsr.bhs), bsr.mrIdx, err)
|
return fmt.Errorf("cannot unmarshal blockHeader #%d in the index block #%d: %w", len(bsr.bhs), bsr.mrIdx, err)
|
||||||
}
|
}
|
||||||
b = tail
|
b = tail
|
||||||
}
|
}
|
||||||
|
|
|
@ -84,7 +84,7 @@ func (bsw *blockStreamWriter) InitFromFilePart(path string, nocache bool, compre
|
||||||
|
|
||||||
// Create the directory
|
// Create the directory
|
||||||
if err := fs.MkdirAllFailIfExist(path); err != nil {
|
if err := fs.MkdirAllFailIfExist(path); err != nil {
|
||||||
return fmt.Errorf("cannot create directory %q: %s", path, err)
|
return fmt.Errorf("cannot create directory %q: %w", path, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create part files in the directory.
|
// Create part files in the directory.
|
||||||
|
@ -95,7 +95,7 @@ func (bsw *blockStreamWriter) InitFromFilePart(path string, nocache bool, compre
|
||||||
metaindexFile, err := filestream.Create(metaindexPath, false)
|
metaindexFile, err := filestream.Create(metaindexPath, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.MustRemoveAll(path)
|
fs.MustRemoveAll(path)
|
||||||
return fmt.Errorf("cannot create metaindex file: %s", err)
|
return fmt.Errorf("cannot create metaindex file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
indexPath := path + "/index.bin"
|
indexPath := path + "/index.bin"
|
||||||
|
@ -103,7 +103,7 @@ func (bsw *blockStreamWriter) InitFromFilePart(path string, nocache bool, compre
|
||||||
if err != nil {
|
if err != nil {
|
||||||
metaindexFile.MustClose()
|
metaindexFile.MustClose()
|
||||||
fs.MustRemoveAll(path)
|
fs.MustRemoveAll(path)
|
||||||
return fmt.Errorf("cannot create index file: %s", err)
|
return fmt.Errorf("cannot create index file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
itemsPath := path + "/items.bin"
|
itemsPath := path + "/items.bin"
|
||||||
|
@ -112,7 +112,7 @@ func (bsw *blockStreamWriter) InitFromFilePart(path string, nocache bool, compre
|
||||||
metaindexFile.MustClose()
|
metaindexFile.MustClose()
|
||||||
indexFile.MustClose()
|
indexFile.MustClose()
|
||||||
fs.MustRemoveAll(path)
|
fs.MustRemoveAll(path)
|
||||||
return fmt.Errorf("cannot create items file: %s", err)
|
return fmt.Errorf("cannot create items file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
lensPath := path + "/lens.bin"
|
lensPath := path + "/lens.bin"
|
||||||
|
@ -122,7 +122,7 @@ func (bsw *blockStreamWriter) InitFromFilePart(path string, nocache bool, compre
|
||||||
indexFile.MustClose()
|
indexFile.MustClose()
|
||||||
itemsFile.MustClose()
|
itemsFile.MustClose()
|
||||||
fs.MustRemoveAll(path)
|
fs.MustRemoveAll(path)
|
||||||
return fmt.Errorf("cannot create lens file: %s", err)
|
return fmt.Errorf("cannot create lens file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
bsw.reset()
|
bsw.reset()
|
||||||
|
|
|
@ -267,7 +267,7 @@ func (ib *inmemoryBlock) UnmarshalData(sb *storageBlock, firstItem, commonPrefix
|
||||||
switch mt {
|
switch mt {
|
||||||
case marshalTypePlain:
|
case marshalTypePlain:
|
||||||
if err := ib.unmarshalDataPlain(sb, firstItem, itemsCount); err != nil {
|
if err := ib.unmarshalDataPlain(sb, firstItem, itemsCount); err != nil {
|
||||||
return fmt.Errorf("cannot unmarshal plain data: %s", err)
|
return fmt.Errorf("cannot unmarshal plain data: %w", err)
|
||||||
}
|
}
|
||||||
if !ib.isSorted() {
|
if !ib.isSorted() {
|
||||||
return fmt.Errorf("plain data block contains unsorted items; items:\n%s", ib.debugItemsString())
|
return fmt.Errorf("plain data block contains unsorted items; items:\n%s", ib.debugItemsString())
|
||||||
|
@ -289,7 +289,7 @@ func (ib *inmemoryBlock) UnmarshalData(sb *storageBlock, firstItem, commonPrefix
|
||||||
// Unmarshal lens data.
|
// Unmarshal lens data.
|
||||||
bb.B, err = encoding.DecompressZSTD(bb.B[:0], sb.lensData)
|
bb.B, err = encoding.DecompressZSTD(bb.B[:0], sb.lensData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot decompress lensData: %s", err)
|
return fmt.Errorf("cannot decompress lensData: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
lb := getLensBuffer(int(2 * itemsCount))
|
lb := getLensBuffer(int(2 * itemsCount))
|
||||||
|
@ -304,7 +304,7 @@ func (ib *inmemoryBlock) UnmarshalData(sb *storageBlock, firstItem, commonPrefix
|
||||||
// Unmarshal prefixLens
|
// Unmarshal prefixLens
|
||||||
tail, err := encoding.UnmarshalVarUint64s(is.A, bb.B)
|
tail, err := encoding.UnmarshalVarUint64s(is.A, bb.B)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot unmarshal prefixLens from lensData: %s", err)
|
return fmt.Errorf("cannot unmarshal prefixLens from lensData: %w", err)
|
||||||
}
|
}
|
||||||
prefixLens[0] = 0
|
prefixLens[0] = 0
|
||||||
for i, xLen := range is.A {
|
for i, xLen := range is.A {
|
||||||
|
@ -314,7 +314,7 @@ func (ib *inmemoryBlock) UnmarshalData(sb *storageBlock, firstItem, commonPrefix
|
||||||
// Unmarshal lens
|
// Unmarshal lens
|
||||||
tail, err = encoding.UnmarshalVarUint64s(is.A, tail)
|
tail, err = encoding.UnmarshalVarUint64s(is.A, tail)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot unmarshal lens from lensData: %s", err)
|
return fmt.Errorf("cannot unmarshal lens from lensData: %w", err)
|
||||||
}
|
}
|
||||||
if len(tail) > 0 {
|
if len(tail) > 0 {
|
||||||
return fmt.Errorf("unexpected tail left unmarshaling %d lens; tail size=%d; contents=%X", itemsCount, len(tail), tail)
|
return fmt.Errorf("unexpected tail left unmarshaling %d lens; tail size=%d; contents=%X", itemsCount, len(tail), tail)
|
||||||
|
@ -331,7 +331,7 @@ func (ib *inmemoryBlock) UnmarshalData(sb *storageBlock, firstItem, commonPrefix
|
||||||
// Unmarshal items data.
|
// Unmarshal items data.
|
||||||
bb.B, err = encoding.DecompressZSTD(bb.B[:0], sb.itemsData)
|
bb.B, err = encoding.DecompressZSTD(bb.B[:0], sb.itemsData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot decompress lensData: %s", err)
|
return fmt.Errorf("cannot decompress lensData: %w", err)
|
||||||
}
|
}
|
||||||
data := bytesutil.Resize(ib.data, maxInmemoryBlockSize)
|
data := bytesutil.Resize(ib.data, maxInmemoryBlockSize)
|
||||||
if n := int(itemsCount) - cap(ib.items); n > 0 {
|
if n := int(itemsCount) - cap(ib.items); n > 0 {
|
||||||
|
|
|
@ -30,7 +30,7 @@ type PrepareBlockCallback func(data []byte, items [][]byte) ([]byte, [][]byte)
|
||||||
func mergeBlockStreams(ph *partHeader, bsw *blockStreamWriter, bsrs []*blockStreamReader, prepareBlock PrepareBlockCallback, stopCh <-chan struct{}, itemsMerged *uint64) error {
|
func mergeBlockStreams(ph *partHeader, bsw *blockStreamWriter, bsrs []*blockStreamReader, prepareBlock PrepareBlockCallback, stopCh <-chan struct{}, itemsMerged *uint64) error {
|
||||||
bsm := bsmPool.Get().(*blockStreamMerger)
|
bsm := bsmPool.Get().(*blockStreamMerger)
|
||||||
if err := bsm.Init(bsrs, prepareBlock); err != nil {
|
if err := bsm.Init(bsrs, prepareBlock); err != nil {
|
||||||
return fmt.Errorf("cannot initialize blockStreamMerger: %s", err)
|
return fmt.Errorf("cannot initialize blockStreamMerger: %w", err)
|
||||||
}
|
}
|
||||||
err := bsm.Merge(bsw, ph, stopCh, itemsMerged)
|
err := bsm.Merge(bsw, ph, stopCh, itemsMerged)
|
||||||
bsm.reset()
|
bsm.reset()
|
||||||
|
@ -42,7 +42,7 @@ func mergeBlockStreams(ph *partHeader, bsw *blockStreamWriter, bsrs []*blockStre
|
||||||
if err == errForciblyStopped {
|
if err == errForciblyStopped {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return fmt.Errorf("cannot merge %d block streams: %s: %s", len(bsrs), bsrs, err)
|
return fmt.Errorf("cannot merge %d block streams: %s: %w", len(bsrs), bsrs, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var bsmPool = &sync.Pool{
|
var bsmPool = &sync.Pool{
|
||||||
|
@ -88,7 +88,7 @@ func (bsm *blockStreamMerger) Init(bsrs []*blockStreamReader, prepareBlock Prepa
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := bsr.Error(); err != nil {
|
if err := bsr.Error(); err != nil {
|
||||||
return fmt.Errorf("cannot obtain the next block from blockStreamReader %q: %s", bsr.path, err)
|
return fmt.Errorf("cannot obtain the next block from blockStreamReader %q: %w", bsr.path, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
heap.Init(&bsm.bsrHeap)
|
heap.Init(&bsm.bsrHeap)
|
||||||
|
@ -143,7 +143,7 @@ again:
|
||||||
goto again
|
goto again
|
||||||
}
|
}
|
||||||
if err := bsr.Error(); err != nil {
|
if err := bsr.Error(); err != nil {
|
||||||
return fmt.Errorf("cannot read storageBlock: %s", err)
|
return fmt.Errorf("cannot read storageBlock: %w", err)
|
||||||
}
|
}
|
||||||
goto again
|
goto again
|
||||||
}
|
}
|
||||||
|
|
|
@ -121,7 +121,7 @@ func testMergeBlockStreamsSerial(blocksToMerge, maxItemsPerBlock int) error {
|
||||||
var bsw blockStreamWriter
|
var bsw blockStreamWriter
|
||||||
bsw.InitFromInmemoryPart(&dstIP)
|
bsw.InitFromInmemoryPart(&dstIP)
|
||||||
if err := mergeBlockStreams(&dstIP.ph, &bsw, bsrs, nil, nil, &itemsMerged); err != nil {
|
if err := mergeBlockStreams(&dstIP.ph, &bsw, bsrs, nil, nil, &itemsMerged); err != nil {
|
||||||
return fmt.Errorf("cannot merge block streams: %s", err)
|
return fmt.Errorf("cannot merge block streams: %w", err)
|
||||||
}
|
}
|
||||||
if itemsMerged != uint64(len(items)) {
|
if itemsMerged != uint64(len(items)) {
|
||||||
return fmt.Errorf("unexpected itemsMerged; got %d; want %d", itemsMerged, len(items))
|
return fmt.Errorf("unexpected itemsMerged; got %d; want %d", itemsMerged, len(items))
|
||||||
|
@ -130,7 +130,7 @@ func testMergeBlockStreamsSerial(blocksToMerge, maxItemsPerBlock int) error {
|
||||||
// Verify the resulting part (dstIP) contains all the items
|
// Verify the resulting part (dstIP) contains all the items
|
||||||
// in the correct order.
|
// in the correct order.
|
||||||
if err := testCheckItems(&dstIP, items); err != nil {
|
if err := testCheckItems(&dstIP, items); err != nil {
|
||||||
return fmt.Errorf("error checking items: %s", err)
|
return fmt.Errorf("error checking items: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -164,7 +164,7 @@ func testCheckItems(dstIP *inmemoryPart, items []string) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := dstBsr.Error(); err != nil {
|
if err := dstBsr.Error(); err != nil {
|
||||||
return fmt.Errorf("unexpected error in dstBsr: %s", err)
|
return fmt.Errorf("unexpected error in dstBsr: %w", err)
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(items, dstItems) {
|
if !reflect.DeepEqual(items, dstItems) {
|
||||||
return fmt.Errorf("unequal items\ngot\n%q\nwant\n%q", dstItems, items)
|
return fmt.Errorf("unequal items\ngot\n%q\nwant\n%q", dstItems, items)
|
||||||
|
|
|
@ -44,7 +44,7 @@ func (mr *metaindexRow) Unmarshal(src []byte) ([]byte, error) {
|
||||||
// Unmarshal firstItem
|
// Unmarshal firstItem
|
||||||
tail, fi, err := encoding.UnmarshalBytes(src)
|
tail, fi, err := encoding.UnmarshalBytes(src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tail, fmt.Errorf("cannot unmarshal firstItem: %s", err)
|
return tail, fmt.Errorf("cannot unmarshal firstItem: %w", err)
|
||||||
}
|
}
|
||||||
mr.firstItem = append(mr.firstItem[:0], fi...)
|
mr.firstItem = append(mr.firstItem[:0], fi...)
|
||||||
src = tail
|
src = tail
|
||||||
|
@ -85,11 +85,11 @@ func unmarshalMetaindexRows(dst []metaindexRow, r io.Reader) ([]metaindexRow, er
|
||||||
// since it is quite small.
|
// since it is quite small.
|
||||||
compressedData, err := ioutil.ReadAll(r)
|
compressedData, err := ioutil.ReadAll(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dst, fmt.Errorf("cannot read metaindex data: %s", err)
|
return dst, fmt.Errorf("cannot read metaindex data: %w", err)
|
||||||
}
|
}
|
||||||
data, err := encoding.DecompressZSTD(nil, compressedData)
|
data, err := encoding.DecompressZSTD(nil, compressedData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dst, fmt.Errorf("cannot decompress metaindex data with size %d bytes: %s", len(compressedData), err)
|
return dst, fmt.Errorf("cannot decompress metaindex data with size %d bytes: %w", len(compressedData), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
dstLen := len(dst)
|
dstLen := len(dst)
|
||||||
|
@ -102,7 +102,7 @@ func unmarshalMetaindexRows(dst []metaindexRow, r io.Reader) ([]metaindexRow, er
|
||||||
mr := &dst[len(dst)-1]
|
mr := &dst[len(dst)-1]
|
||||||
tail, err := mr.Unmarshal(data)
|
tail, err := mr.Unmarshal(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dst, fmt.Errorf("cannot unmarshal metaindexRow #%d from metaindex data: %s", len(dst)-dstLen, err)
|
return dst, fmt.Errorf("cannot unmarshal metaindexRow #%d from metaindex data: %w", len(dst)-dstLen, err)
|
||||||
}
|
}
|
||||||
data = tail
|
data = tail
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,13 +67,13 @@ func openFilePart(path string) (*part, error) {
|
||||||
|
|
||||||
var ph partHeader
|
var ph partHeader
|
||||||
if err := ph.ParseFromPath(path); err != nil {
|
if err := ph.ParseFromPath(path); err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse path to part: %s", err)
|
return nil, fmt.Errorf("cannot parse path to part: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
metaindexPath := path + "/metaindex.bin"
|
metaindexPath := path + "/metaindex.bin"
|
||||||
metaindexFile, err := filestream.Open(metaindexPath, true)
|
metaindexFile, err := filestream.Open(metaindexPath, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot open %q: %s", metaindexPath, err)
|
return nil, fmt.Errorf("cannot open %q: %w", metaindexPath, err)
|
||||||
}
|
}
|
||||||
metaindexSize := fs.MustFileSize(metaindexPath)
|
metaindexSize := fs.MustFileSize(metaindexPath)
|
||||||
|
|
||||||
|
@ -81,7 +81,7 @@ func openFilePart(path string) (*part, error) {
|
||||||
indexFile, err := fs.OpenReaderAt(indexPath)
|
indexFile, err := fs.OpenReaderAt(indexPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
metaindexFile.MustClose()
|
metaindexFile.MustClose()
|
||||||
return nil, fmt.Errorf("cannot open %q: %s", indexPath, err)
|
return nil, fmt.Errorf("cannot open %q: %w", indexPath, err)
|
||||||
}
|
}
|
||||||
indexSize := fs.MustFileSize(indexPath)
|
indexSize := fs.MustFileSize(indexPath)
|
||||||
|
|
||||||
|
@ -90,7 +90,7 @@ func openFilePart(path string) (*part, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
metaindexFile.MustClose()
|
metaindexFile.MustClose()
|
||||||
indexFile.MustClose()
|
indexFile.MustClose()
|
||||||
return nil, fmt.Errorf("cannot open %q: %s", itemsPath, err)
|
return nil, fmt.Errorf("cannot open %q: %w", itemsPath, err)
|
||||||
}
|
}
|
||||||
itemsSize := fs.MustFileSize(itemsPath)
|
itemsSize := fs.MustFileSize(itemsPath)
|
||||||
|
|
||||||
|
@ -100,7 +100,7 @@ func openFilePart(path string) (*part, error) {
|
||||||
metaindexFile.MustClose()
|
metaindexFile.MustClose()
|
||||||
indexFile.MustClose()
|
indexFile.MustClose()
|
||||||
itemsFile.MustClose()
|
itemsFile.MustClose()
|
||||||
return nil, fmt.Errorf("cannot open %q: %s", lensPath, err)
|
return nil, fmt.Errorf("cannot open %q: %w", lensPath, err)
|
||||||
}
|
}
|
||||||
lensSize := fs.MustFileSize(lensPath)
|
lensSize := fs.MustFileSize(lensPath)
|
||||||
|
|
||||||
|
@ -112,7 +112,7 @@ func newPart(ph *partHeader, path string, size uint64, metaindexReader filestrea
|
||||||
var errors []error
|
var errors []error
|
||||||
mrs, err := unmarshalMetaindexRows(nil, metaindexReader)
|
mrs, err := unmarshalMetaindexRows(nil, metaindexReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errors = append(errors, fmt.Errorf("cannot unmarshal metaindexRows: %s", err))
|
errors = append(errors, fmt.Errorf("cannot unmarshal metaindexRows: %w", err))
|
||||||
}
|
}
|
||||||
metaindexReader.MustClose()
|
metaindexReader.MustClose()
|
||||||
|
|
||||||
|
@ -131,7 +131,7 @@ func newPart(ph *partHeader, path string, size uint64, metaindexReader filestrea
|
||||||
|
|
||||||
if len(errors) > 0 {
|
if len(errors) > 0 {
|
||||||
// Return only the first error, since it has no sense in returning all errors.
|
// Return only the first error, since it has no sense in returning all errors.
|
||||||
err := fmt.Errorf("error opening part %s: %s", p.path, errors[0])
|
err := fmt.Errorf("error opening part %s: %w", p.path, errors[0])
|
||||||
p.MustClose()
|
p.MustClose()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,7 +54,7 @@ func (hs *hexString) UnmarshalJSON(data []byte) error {
|
||||||
data = data[1 : len(data)-1]
|
data = data[1 : len(data)-1]
|
||||||
b, err := hex.DecodeString(string(data))
|
b, err := hex.DecodeString(string(data))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot hex-decode %q: %s", data, err)
|
return fmt.Errorf("cannot hex-decode %q: %w", data, err)
|
||||||
}
|
}
|
||||||
*hs = b
|
*hs = b
|
||||||
return nil
|
return nil
|
||||||
|
@ -101,7 +101,7 @@ func (ph *partHeader) ParseFromPath(partPath string) error {
|
||||||
// Read itemsCount from partName.
|
// Read itemsCount from partName.
|
||||||
itemsCount, err := strconv.ParseUint(a[0], 10, 64)
|
itemsCount, err := strconv.ParseUint(a[0], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot parse itemsCount from partName %q: %s", partName, err)
|
return fmt.Errorf("cannot parse itemsCount from partName %q: %w", partName, err)
|
||||||
}
|
}
|
||||||
ph.itemsCount = itemsCount
|
ph.itemsCount = itemsCount
|
||||||
if ph.itemsCount <= 0 {
|
if ph.itemsCount <= 0 {
|
||||||
|
@ -111,7 +111,7 @@ func (ph *partHeader) ParseFromPath(partPath string) error {
|
||||||
// Read blocksCount from partName.
|
// Read blocksCount from partName.
|
||||||
blocksCount, err := strconv.ParseUint(a[1], 10, 64)
|
blocksCount, err := strconv.ParseUint(a[1], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot parse blocksCount from partName %q: %s", partName, err)
|
return fmt.Errorf("cannot parse blocksCount from partName %q: %w", partName, err)
|
||||||
}
|
}
|
||||||
ph.blocksCount = blocksCount
|
ph.blocksCount = blocksCount
|
||||||
if ph.blocksCount <= 0 {
|
if ph.blocksCount <= 0 {
|
||||||
|
@ -126,12 +126,12 @@ func (ph *partHeader) ParseFromPath(partPath string) error {
|
||||||
metadataPath := partPath + "/metadata.json"
|
metadataPath := partPath + "/metadata.json"
|
||||||
metadata, err := ioutil.ReadFile(metadataPath)
|
metadata, err := ioutil.ReadFile(metadataPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot read %q: %s", metadataPath, err)
|
return fmt.Errorf("cannot read %q: %w", metadataPath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var phj partHeaderJSON
|
var phj partHeaderJSON
|
||||||
if err := json.Unmarshal(metadata, &phj); err != nil {
|
if err := json.Unmarshal(metadata, &phj); err != nil {
|
||||||
return fmt.Errorf("cannot parse %q: %s", metadataPath, err)
|
return fmt.Errorf("cannot parse %q: %w", metadataPath, err)
|
||||||
}
|
}
|
||||||
if ph.itemsCount != phj.ItemsCount {
|
if ph.itemsCount != phj.ItemsCount {
|
||||||
return fmt.Errorf("invalid ItemsCount in %q; got %d; want %d", metadataPath, phj.ItemsCount, ph.itemsCount)
|
return fmt.Errorf("invalid ItemsCount in %q; got %d; want %d", metadataPath, phj.ItemsCount, ph.itemsCount)
|
||||||
|
@ -161,11 +161,11 @@ func (ph *partHeader) WriteMetadata(partPath string) error {
|
||||||
}
|
}
|
||||||
metadata, err := json.MarshalIndent(&phj, "", "\t")
|
metadata, err := json.MarshalIndent(&phj, "", "\t")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot marshal metadata: %s", err)
|
return fmt.Errorf("cannot marshal metadata: %w", err)
|
||||||
}
|
}
|
||||||
metadataPath := partPath + "/metadata.json"
|
metadataPath := partPath + "/metadata.json"
|
||||||
if err := fs.WriteFileAtomically(metadataPath, metadata); err != nil {
|
if err := fs.WriteFileAtomically(metadataPath, metadata); err != nil {
|
||||||
return fmt.Errorf("cannot create %q: %s", metadataPath, err)
|
return fmt.Errorf("cannot create %q: %w", metadataPath, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -279,7 +279,7 @@ func (ps *partSearch) nextBHS() error {
|
||||||
var err error
|
var err error
|
||||||
idxb, err = ps.readIndexBlock(mr)
|
idxb, err = ps.readIndexBlock(mr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot read index block: %s", err)
|
return fmt.Errorf("cannot read index block: %w", err)
|
||||||
}
|
}
|
||||||
ps.idxbCache.Put(idxbKey, idxb)
|
ps.idxbCache.Put(idxbKey, idxb)
|
||||||
}
|
}
|
||||||
|
@ -294,12 +294,12 @@ func (ps *partSearch) readIndexBlock(mr *metaindexRow) (*indexBlock, error) {
|
||||||
var err error
|
var err error
|
||||||
ps.indexBuf, err = encoding.DecompressZSTD(ps.indexBuf[:0], ps.compressedIndexBuf)
|
ps.indexBuf, err = encoding.DecompressZSTD(ps.indexBuf[:0], ps.compressedIndexBuf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot decompress index block with size %d bytes: %s", len(ps.compressedIndexBuf), err)
|
return nil, fmt.Errorf("cannot decompress index block with size %d bytes: %w", len(ps.compressedIndexBuf), err)
|
||||||
}
|
}
|
||||||
idxb := getIndexBlock()
|
idxb := getIndexBlock()
|
||||||
idxb.bhs, err = unmarshalBlockHeaders(idxb.bhs[:0], ps.indexBuf, int(mr.blockHeadersCount))
|
idxb.bhs, err = unmarshalBlockHeaders(idxb.bhs[:0], ps.indexBuf, int(mr.blockHeadersCount))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot unmarshal block headers from index block (offset=%d, size=%d): %s", mr.indexBlockOffset, mr.indexBlockSize, err)
|
return nil, fmt.Errorf("cannot unmarshal block headers from index block (offset=%d, size=%d): %w", mr.indexBlockOffset, mr.indexBlockSize, err)
|
||||||
}
|
}
|
||||||
return idxb, nil
|
return idxb, nil
|
||||||
}
|
}
|
||||||
|
@ -340,7 +340,7 @@ func (ps *partSearch) readInmemoryBlock(bh *blockHeader) (*inmemoryBlock, error)
|
||||||
|
|
||||||
ib := getInmemoryBlock()
|
ib := getInmemoryBlock()
|
||||||
if err := ib.UnmarshalData(&ps.sb, bh.firstItem, bh.commonPrefix, bh.itemsCount, bh.marshalType); err != nil {
|
if err := ib.UnmarshalData(&ps.sb, bh.firstItem, bh.commonPrefix, bh.itemsCount, bh.marshalType); err != nil {
|
||||||
return nil, fmt.Errorf("cannot unmarshal storage block with %d items: %s", bh.itemsCount, err)
|
return nil, fmt.Errorf("cannot unmarshal storage block with %d items: %w", bh.itemsCount, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return ib, nil
|
return ib, nil
|
||||||
|
|
|
@ -72,7 +72,7 @@ func testPartSearchSerial(p *part, items []string) error {
|
||||||
return fmt.Errorf("unexpected item found past the end of all the items: %X", ps.Item)
|
return fmt.Errorf("unexpected item found past the end of all the items: %X", ps.Item)
|
||||||
}
|
}
|
||||||
if err := ps.Error(); err != nil {
|
if err := ps.Error(); err != nil {
|
||||||
return fmt.Errorf("unexpected error: %s", err)
|
return fmt.Errorf("unexpected error: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Search for the item bigger than the items[len(items)-1]
|
// Search for the item bigger than the items[len(items)-1]
|
||||||
|
@ -83,7 +83,7 @@ func testPartSearchSerial(p *part, items []string) error {
|
||||||
return fmt.Errorf("unexpected item found: %X; want nothing", ps.Item)
|
return fmt.Errorf("unexpected item found: %X; want nothing", ps.Item)
|
||||||
}
|
}
|
||||||
if err := ps.Error(); err != nil {
|
if err := ps.Error(); err != nil {
|
||||||
return fmt.Errorf("unexpected error when searching past the last item: %s", err)
|
return fmt.Errorf("unexpected error when searching past the last item: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Search for inner items
|
// Search for inner items
|
||||||
|
@ -107,7 +107,7 @@ func testPartSearchSerial(p *part, items []string) error {
|
||||||
return fmt.Errorf("unexpected item found past the end of all the items for idx %d out of %d items; loop %d: got %X", n, len(items), loop, ps.Item)
|
return fmt.Errorf("unexpected item found past the end of all the items for idx %d out of %d items; loop %d: got %X", n, len(items), loop, ps.Item)
|
||||||
}
|
}
|
||||||
if err := ps.Error(); err != nil {
|
if err := ps.Error(); err != nil {
|
||||||
return fmt.Errorf("unexpected error on loop %d: %s", loop, err)
|
return fmt.Errorf("unexpected error on loop %d: %w", loop, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -121,7 +121,7 @@ func testPartSearchSerial(p *part, items []string) error {
|
||||||
return fmt.Errorf("unexpected item found at position %d: got %X; want %X", i, ps.Item, item)
|
return fmt.Errorf("unexpected item found at position %d: got %X; want %X", i, ps.Item, item)
|
||||||
}
|
}
|
||||||
if err := ps.Error(); err != nil {
|
if err := ps.Error(); err != nil {
|
||||||
return fmt.Errorf("unexpected error when searching for items[%d]=%X: %s", i, item, err)
|
return fmt.Errorf("unexpected error when searching for items[%d]=%X: %w", i, item, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -136,7 +136,7 @@ func testPartSearchSerial(p *part, items []string) error {
|
||||||
return fmt.Errorf("unexpected item found at position %d: got %X; want %X", i, ps.Item, item)
|
return fmt.Errorf("unexpected item found at position %d: got %X; want %X", i, ps.Item, item)
|
||||||
}
|
}
|
||||||
if err := ps.Error(); err != nil {
|
if err := ps.Error(); err != nil {
|
||||||
return fmt.Errorf("unexpected error when searching for items[%d]=%X: %s", i, item, err)
|
return fmt.Errorf("unexpected error when searching for items[%d]=%X: %w", i, item, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -151,7 +151,7 @@ func newTestPart(blocksCount, maxItemsPerBlock int) (*part, []string, error) {
|
||||||
var bsw blockStreamWriter
|
var bsw blockStreamWriter
|
||||||
bsw.InitFromInmemoryPart(&ip)
|
bsw.InitFromInmemoryPart(&ip)
|
||||||
if err := mergeBlockStreams(&ip.ph, &bsw, bsrs, nil, nil, &itemsMerged); err != nil {
|
if err := mergeBlockStreams(&ip.ph, &bsw, bsrs, nil, nil, &itemsMerged); err != nil {
|
||||||
return nil, nil, fmt.Errorf("cannot merge blocks: %s", err)
|
return nil, nil, fmt.Errorf("cannot merge blocks: %w", err)
|
||||||
}
|
}
|
||||||
if itemsMerged != uint64(len(items)) {
|
if itemsMerged != uint64(len(items)) {
|
||||||
return nil, nil, fmt.Errorf("unexpected itemsMerged; got %d; want %d", itemsMerged, len(items))
|
return nil, nil, fmt.Errorf("unexpected itemsMerged; got %d; want %d", itemsMerged, len(items))
|
||||||
|
@ -159,7 +159,7 @@ func newTestPart(blocksCount, maxItemsPerBlock int) (*part, []string, error) {
|
||||||
size := ip.size()
|
size := ip.size()
|
||||||
p, err := newPart(&ip.ph, "partName", size, ip.metaindexData.NewReader(), &ip.indexData, &ip.itemsData, &ip.lensData)
|
p, err := newPart(&ip.ph, "partName", size, ip.metaindexData.NewReader(), &ip.indexData, &ip.itemsData, &ip.lensData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("cannot create part: %s", err)
|
return nil, nil, fmt.Errorf("cannot create part: %w", err)
|
||||||
}
|
}
|
||||||
return p, items, nil
|
return p, items, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -169,7 +169,7 @@ func OpenTable(path string, flushCallback func(), prepareBlock PrepareBlockCallb
|
||||||
|
|
||||||
// Create a directory for the table if it doesn't exist yet.
|
// Create a directory for the table if it doesn't exist yet.
|
||||||
if err := fs.MkdirAllIfNotExist(path); err != nil {
|
if err := fs.MkdirAllIfNotExist(path); err != nil {
|
||||||
return nil, fmt.Errorf("cannot create directory %q: %s", path, err)
|
return nil, fmt.Errorf("cannot create directory %q: %w", path, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Protect from concurrent opens.
|
// Protect from concurrent opens.
|
||||||
|
@ -181,7 +181,7 @@ func OpenTable(path string, flushCallback func(), prepareBlock PrepareBlockCallb
|
||||||
// Open table parts.
|
// Open table parts.
|
||||||
pws, err := openParts(path)
|
pws, err := openParts(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot open table parts at %q: %s", path, err)
|
return nil, fmt.Errorf("cannot open table parts at %q: %w", path, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
tb := &Table{
|
tb := &Table{
|
||||||
|
@ -481,13 +481,13 @@ func (tb *Table) convertToV1280() {
|
||||||
func (tb *Table) mergePartsOptimal(pws []*partWrapper, stopCh <-chan struct{}) error {
|
func (tb *Table) mergePartsOptimal(pws []*partWrapper, stopCh <-chan struct{}) error {
|
||||||
for len(pws) > defaultPartsToMerge {
|
for len(pws) > defaultPartsToMerge {
|
||||||
if err := tb.mergeParts(pws[:defaultPartsToMerge], stopCh, false); err != nil {
|
if err := tb.mergeParts(pws[:defaultPartsToMerge], stopCh, false); err != nil {
|
||||||
return fmt.Errorf("cannot merge %d parts: %s", defaultPartsToMerge, err)
|
return fmt.Errorf("cannot merge %d parts: %w", defaultPartsToMerge, err)
|
||||||
}
|
}
|
||||||
pws = pws[defaultPartsToMerge:]
|
pws = pws[defaultPartsToMerge:]
|
||||||
}
|
}
|
||||||
if len(pws) > 0 {
|
if len(pws) > 0 {
|
||||||
if err := tb.mergeParts(pws, stopCh, false); err != nil {
|
if err := tb.mergeParts(pws, stopCh, false); err != nil {
|
||||||
return fmt.Errorf("cannot merge %d parts: %s", len(pws), err)
|
return fmt.Errorf("cannot merge %d parts: %w", len(pws), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -761,7 +761,7 @@ func (tb *Table) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isOuterP
|
||||||
bsr.InitFromInmemoryPart(pw.mp)
|
bsr.InitFromInmemoryPart(pw.mp)
|
||||||
} else {
|
} else {
|
||||||
if err := bsr.InitFromFilePart(pw.p.path); err != nil {
|
if err := bsr.InitFromFilePart(pw.p.path); err != nil {
|
||||||
return fmt.Errorf("cannot open source part for merging: %s", err)
|
return fmt.Errorf("cannot open source part for merging: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
bsrs = append(bsrs, bsr)
|
bsrs = append(bsrs, bsr)
|
||||||
|
@ -786,7 +786,7 @@ func (tb *Table) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isOuterP
|
||||||
bsw := getBlockStreamWriter()
|
bsw := getBlockStreamWriter()
|
||||||
compressLevel := getCompressLevelForPartItems(outItemsCount, outBlocksCount)
|
compressLevel := getCompressLevelForPartItems(outItemsCount, outBlocksCount)
|
||||||
if err := bsw.InitFromFilePart(tmpPartPath, nocache, compressLevel); err != nil {
|
if err := bsw.InitFromFilePart(tmpPartPath, nocache, compressLevel); err != nil {
|
||||||
return fmt.Errorf("cannot create destination part %q: %s", tmpPartPath, err)
|
return fmt.Errorf("cannot create destination part %q: %w", tmpPartPath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Merge parts into a temporary location.
|
// Merge parts into a temporary location.
|
||||||
|
@ -797,10 +797,10 @@ func (tb *Table) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isOuterP
|
||||||
if err == errForciblyStopped {
|
if err == errForciblyStopped {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return fmt.Errorf("error when merging parts to %q: %s", tmpPartPath, err)
|
return fmt.Errorf("error when merging parts to %q: %w", tmpPartPath, err)
|
||||||
}
|
}
|
||||||
if err := ph.WriteMetadata(tmpPartPath); err != nil {
|
if err := ph.WriteMetadata(tmpPartPath); err != nil {
|
||||||
return fmt.Errorf("cannot write metadata to destination part %q: %s", tmpPartPath, err)
|
return fmt.Errorf("cannot write metadata to destination part %q: %w", tmpPartPath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close bsrs (aka source parts).
|
// Close bsrs (aka source parts).
|
||||||
|
@ -821,18 +821,18 @@ func (tb *Table) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isOuterP
|
||||||
fmt.Fprintf(&bb, "%s -> %s\n", tmpPartPath, dstPartPath)
|
fmt.Fprintf(&bb, "%s -> %s\n", tmpPartPath, dstPartPath)
|
||||||
txnPath := fmt.Sprintf("%s/txn/%016X", tb.path, mergeIdx)
|
txnPath := fmt.Sprintf("%s/txn/%016X", tb.path, mergeIdx)
|
||||||
if err := fs.WriteFileAtomically(txnPath, bb.B); err != nil {
|
if err := fs.WriteFileAtomically(txnPath, bb.B); err != nil {
|
||||||
return fmt.Errorf("cannot create transaction file %q: %s", txnPath, err)
|
return fmt.Errorf("cannot create transaction file %q: %w", txnPath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run the created transaction.
|
// Run the created transaction.
|
||||||
if err := runTransaction(&tb.snapshotLock, tb.path, txnPath); err != nil {
|
if err := runTransaction(&tb.snapshotLock, tb.path, txnPath); err != nil {
|
||||||
return fmt.Errorf("cannot execute transaction %q: %s", txnPath, err)
|
return fmt.Errorf("cannot execute transaction %q: %w", txnPath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Open the merged part.
|
// Open the merged part.
|
||||||
newP, err := openFilePart(dstPartPath)
|
newP, err := openFilePart(dstPartPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot open merged part %q: %s", dstPartPath, err)
|
return fmt.Errorf("cannot open merged part %q: %w", dstPartPath, err)
|
||||||
}
|
}
|
||||||
newPSize := newP.size
|
newPSize := newP.size
|
||||||
newPW := &partWrapper{
|
newPW := &partWrapper{
|
||||||
|
@ -950,7 +950,7 @@ func openParts(path string) ([]*partWrapper, error) {
|
||||||
}
|
}
|
||||||
d, err := os.Open(path)
|
d, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot open difrectory: %s", err)
|
return nil, fmt.Errorf("cannot open difrectory: %w", err)
|
||||||
}
|
}
|
||||||
defer fs.MustClose(d)
|
defer fs.MustClose(d)
|
||||||
|
|
||||||
|
@ -958,19 +958,19 @@ func openParts(path string) ([]*partWrapper, error) {
|
||||||
// Snapshots cannot be created yet, so use fakeSnapshotLock.
|
// Snapshots cannot be created yet, so use fakeSnapshotLock.
|
||||||
var fakeSnapshotLock sync.RWMutex
|
var fakeSnapshotLock sync.RWMutex
|
||||||
if err := runTransactions(&fakeSnapshotLock, path); err != nil {
|
if err := runTransactions(&fakeSnapshotLock, path); err != nil {
|
||||||
return nil, fmt.Errorf("cannot run transactions: %s", err)
|
return nil, fmt.Errorf("cannot run transactions: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
txnDir := path + "/txn"
|
txnDir := path + "/txn"
|
||||||
fs.MustRemoveAll(txnDir)
|
fs.MustRemoveAll(txnDir)
|
||||||
if err := fs.MkdirAllFailIfExist(txnDir); err != nil {
|
if err := fs.MkdirAllFailIfExist(txnDir); err != nil {
|
||||||
return nil, fmt.Errorf("cannot create %q: %s", txnDir, err)
|
return nil, fmt.Errorf("cannot create %q: %w", txnDir, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
tmpDir := path + "/tmp"
|
tmpDir := path + "/tmp"
|
||||||
fs.MustRemoveAll(tmpDir)
|
fs.MustRemoveAll(tmpDir)
|
||||||
if err := fs.MkdirAllFailIfExist(tmpDir); err != nil {
|
if err := fs.MkdirAllFailIfExist(tmpDir); err != nil {
|
||||||
return nil, fmt.Errorf("cannot create %q: %s", tmpDir, err)
|
return nil, fmt.Errorf("cannot create %q: %w", tmpDir, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fs.MustSyncPath(path)
|
fs.MustSyncPath(path)
|
||||||
|
@ -978,7 +978,7 @@ func openParts(path string) ([]*partWrapper, error) {
|
||||||
// Open parts.
|
// Open parts.
|
||||||
fis, err := d.Readdir(-1)
|
fis, err := d.Readdir(-1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot read directory: %s", err)
|
return nil, fmt.Errorf("cannot read directory: %w", err)
|
||||||
}
|
}
|
||||||
var pws []*partWrapper
|
var pws []*partWrapper
|
||||||
for _, fi := range fis {
|
for _, fi := range fis {
|
||||||
|
@ -995,7 +995,7 @@ func openParts(path string) ([]*partWrapper, error) {
|
||||||
p, err := openFilePart(partPath)
|
p, err := openFilePart(partPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
mustCloseParts(pws)
|
mustCloseParts(pws)
|
||||||
return nil, fmt.Errorf("cannot open part %q: %s", partPath, err)
|
return nil, fmt.Errorf("cannot open part %q: %w", partPath, err)
|
||||||
}
|
}
|
||||||
pw := &partWrapper{
|
pw := &partWrapper{
|
||||||
p: p,
|
p: p,
|
||||||
|
@ -1028,11 +1028,11 @@ func (tb *Table) CreateSnapshotAt(dstDir string) error {
|
||||||
srcDir := tb.path
|
srcDir := tb.path
|
||||||
srcDir, err = filepath.Abs(srcDir)
|
srcDir, err = filepath.Abs(srcDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot obtain absolute dir for %q: %s", srcDir, err)
|
return fmt.Errorf("cannot obtain absolute dir for %q: %w", srcDir, err)
|
||||||
}
|
}
|
||||||
dstDir, err = filepath.Abs(dstDir)
|
dstDir, err = filepath.Abs(dstDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot obtain absolute dir for %q: %s", dstDir, err)
|
return fmt.Errorf("cannot obtain absolute dir for %q: %w", dstDir, err)
|
||||||
}
|
}
|
||||||
if strings.HasPrefix(dstDir, srcDir+"/") {
|
if strings.HasPrefix(dstDir, srcDir+"/") {
|
||||||
return fmt.Errorf("cannot create snapshot %q inside the data dir %q", dstDir, srcDir)
|
return fmt.Errorf("cannot create snapshot %q inside the data dir %q", dstDir, srcDir)
|
||||||
|
@ -1047,18 +1047,18 @@ func (tb *Table) CreateSnapshotAt(dstDir string) error {
|
||||||
defer tb.snapshotLock.Unlock()
|
defer tb.snapshotLock.Unlock()
|
||||||
|
|
||||||
if err := fs.MkdirAllFailIfExist(dstDir); err != nil {
|
if err := fs.MkdirAllFailIfExist(dstDir); err != nil {
|
||||||
return fmt.Errorf("cannot create snapshot dir %q: %s", dstDir, err)
|
return fmt.Errorf("cannot create snapshot dir %q: %w", dstDir, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
d, err := os.Open(srcDir)
|
d, err := os.Open(srcDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot open difrectory: %s", err)
|
return fmt.Errorf("cannot open difrectory: %w", err)
|
||||||
}
|
}
|
||||||
defer fs.MustClose(d)
|
defer fs.MustClose(d)
|
||||||
|
|
||||||
fis, err := d.Readdir(-1)
|
fis, err := d.Readdir(-1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot read directory: %s", err)
|
return fmt.Errorf("cannot read directory: %w", err)
|
||||||
}
|
}
|
||||||
for _, fi := range fis {
|
for _, fi := range fis {
|
||||||
fn := fi.Name()
|
fn := fi.Name()
|
||||||
|
@ -1068,7 +1068,7 @@ func (tb *Table) CreateSnapshotAt(dstDir string) error {
|
||||||
srcPath := srcDir + "/" + fn
|
srcPath := srcDir + "/" + fn
|
||||||
dstPath := dstDir + "/" + fn
|
dstPath := dstDir + "/" + fn
|
||||||
if err := os.Link(srcPath, dstPath); err != nil {
|
if err := os.Link(srcPath, dstPath); err != nil {
|
||||||
return fmt.Errorf("cannot hard link from %q to %q: %s", srcPath, dstPath, err)
|
return fmt.Errorf("cannot hard link from %q to %q: %w", srcPath, dstPath, err)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
// Skip other non-directories.
|
// Skip other non-directories.
|
||||||
|
@ -1082,7 +1082,7 @@ func (tb *Table) CreateSnapshotAt(dstDir string) error {
|
||||||
srcPartPath := srcDir + "/" + fn
|
srcPartPath := srcDir + "/" + fn
|
||||||
dstPartPath := dstDir + "/" + fn
|
dstPartPath := dstDir + "/" + fn
|
||||||
if err := fs.HardLinkFiles(srcPartPath, dstPartPath); err != nil {
|
if err := fs.HardLinkFiles(srcPartPath, dstPartPath); err != nil {
|
||||||
return fmt.Errorf("cannot create hard links from %q to %q: %s", srcPartPath, dstPartPath, err)
|
return fmt.Errorf("cannot create hard links from %q to %q: %w", srcPartPath, dstPartPath, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1107,13 +1107,13 @@ func runTransactions(txnLock *sync.RWMutex, path string) error {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return fmt.Errorf("cannot open %q: %s", txnDir, err)
|
return fmt.Errorf("cannot open %q: %w", txnDir, err)
|
||||||
}
|
}
|
||||||
defer fs.MustClose(d)
|
defer fs.MustClose(d)
|
||||||
|
|
||||||
fis, err := d.Readdir(-1)
|
fis, err := d.Readdir(-1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot read directory %q: %s", d.Name(), err)
|
return fmt.Errorf("cannot read directory %q: %w", d.Name(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sort transaction files by id, since transactions must be ordered.
|
// Sort transaction files by id, since transactions must be ordered.
|
||||||
|
@ -1129,7 +1129,7 @@ func runTransactions(txnLock *sync.RWMutex, path string) error {
|
||||||
}
|
}
|
||||||
txnPath := txnDir + "/" + fn
|
txnPath := txnDir + "/" + fn
|
||||||
if err := runTransaction(txnLock, path, txnPath); err != nil {
|
if err := runTransaction(txnLock, path, txnPath); err != nil {
|
||||||
return fmt.Errorf("cannot run transaction from %q: %s", txnPath, err)
|
return fmt.Errorf("cannot run transaction from %q: %w", txnPath, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -1143,7 +1143,7 @@ func runTransaction(txnLock *sync.RWMutex, pathPrefix, txnPath string) error {
|
||||||
|
|
||||||
data, err := ioutil.ReadFile(txnPath)
|
data, err := ioutil.ReadFile(txnPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot read transaction file: %s", err)
|
return fmt.Errorf("cannot read transaction file: %w", err)
|
||||||
}
|
}
|
||||||
if len(data) > 0 && data[len(data)-1] == '\n' {
|
if len(data) > 0 && data[len(data)-1] == '\n' {
|
||||||
data = data[:len(data)-1]
|
data = data[:len(data)-1]
|
||||||
|
@ -1164,7 +1164,7 @@ func runTransaction(txnLock *sync.RWMutex, pathPrefix, txnPath string) error {
|
||||||
for _, path := range rmPaths {
|
for _, path := range rmPaths {
|
||||||
path, err := validatePath(pathPrefix, path)
|
path, err := validatePath(pathPrefix, path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("invalid path to remove: %s", err)
|
return fmt.Errorf("invalid path to remove: %w", err)
|
||||||
}
|
}
|
||||||
removeWG.Add(1)
|
removeWG.Add(1)
|
||||||
fs.MustRemoveAllWithDoneCallback(path, removeWG.Done)
|
fs.MustRemoveAllWithDoneCallback(path, removeWG.Done)
|
||||||
|
@ -1175,15 +1175,15 @@ func runTransaction(txnLock *sync.RWMutex, pathPrefix, txnPath string) error {
|
||||||
dstPath := mvPaths[1]
|
dstPath := mvPaths[1]
|
||||||
srcPath, err = validatePath(pathPrefix, srcPath)
|
srcPath, err = validatePath(pathPrefix, srcPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("invalid source path to rename: %s", err)
|
return fmt.Errorf("invalid source path to rename: %w", err)
|
||||||
}
|
}
|
||||||
dstPath, err = validatePath(pathPrefix, dstPath)
|
dstPath, err = validatePath(pathPrefix, dstPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("invalid destination path to rename: %s", err)
|
return fmt.Errorf("invalid destination path to rename: %w", err)
|
||||||
}
|
}
|
||||||
if fs.IsPathExist(srcPath) {
|
if fs.IsPathExist(srcPath) {
|
||||||
if err := os.Rename(srcPath, dstPath); err != nil {
|
if err := os.Rename(srcPath, dstPath); err != nil {
|
||||||
return fmt.Errorf("cannot rename %q to %q: %s", srcPath, dstPath, err)
|
return fmt.Errorf("cannot rename %q to %q: %w", srcPath, dstPath, err)
|
||||||
}
|
}
|
||||||
} else if !fs.IsPathExist(dstPath) {
|
} else if !fs.IsPathExist(dstPath) {
|
||||||
// Emit info message for the expected condition after unclean shutdown on NFS disk.
|
// Emit info message for the expected condition after unclean shutdown on NFS disk.
|
||||||
|
@ -1217,12 +1217,12 @@ func validatePath(pathPrefix, path string) (string, error) {
|
||||||
|
|
||||||
pathPrefix, err = filepath.Abs(pathPrefix)
|
pathPrefix, err = filepath.Abs(pathPrefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return path, fmt.Errorf("cannot determine absolute path for pathPrefix=%q: %s", pathPrefix, err)
|
return path, fmt.Errorf("cannot determine absolute path for pathPrefix=%q: %w", pathPrefix, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
path, err = filepath.Abs(path)
|
path, err = filepath.Abs(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return path, fmt.Errorf("cannot determine absolute path for %q: %s", path, err)
|
return path, fmt.Errorf("cannot determine absolute path for %q: %w", path, err)
|
||||||
}
|
}
|
||||||
if !strings.HasPrefix(path, pathPrefix+"/") {
|
if !strings.HasPrefix(path, pathPrefix+"/") {
|
||||||
return path, fmt.Errorf("invalid path %q; must start with %q", path, pathPrefix+"/")
|
return path, fmt.Errorf("invalid path %q; must start with %q", path, pathPrefix+"/")
|
||||||
|
|
|
@ -104,7 +104,7 @@ func (ts *TableSearch) Seek(k []byte) {
|
||||||
}
|
}
|
||||||
if len(errors) > 0 {
|
if len(errors) > 0 {
|
||||||
// Return only the first error, since it has no sense in returning all errors.
|
// Return only the first error, since it has no sense in returning all errors.
|
||||||
ts.err = fmt.Errorf("cannot seek %q: %s", k, errors[0])
|
ts.err = fmt.Errorf("cannot seek %q: %w", k, errors[0])
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if len(ts.psHeap) == 0 {
|
if len(ts.psHeap) == 0 {
|
||||||
|
@ -149,7 +149,7 @@ func (ts *TableSearch) NextItem() bool {
|
||||||
ts.err = ts.nextBlock()
|
ts.err = ts.nextBlock()
|
||||||
if ts.err != nil {
|
if ts.err != nil {
|
||||||
if ts.err != io.EOF {
|
if ts.err != io.EOF {
|
||||||
ts.err = fmt.Errorf("cannot obtain the next block to search in the table: %s", ts.err)
|
ts.err = fmt.Errorf("cannot obtain the next block to search in the table: %w", ts.err)
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
|
@ -98,7 +98,7 @@ func testTableSearchConcurrent(tb *Table, items []string) error {
|
||||||
select {
|
select {
|
||||||
case err := <-ch:
|
case err := <-ch:
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unexpected error: %s", err)
|
return fmt.Errorf("unexpected error: %w", err)
|
||||||
}
|
}
|
||||||
case <-time.After(time.Second * 5):
|
case <-time.After(time.Second * 5):
|
||||||
return fmt.Errorf("timeout")
|
return fmt.Errorf("timeout")
|
||||||
|
@ -139,7 +139,7 @@ func testTableSearchSerial(tb *Table, items []string) error {
|
||||||
return fmt.Errorf("superflouos item found at position %d when searching for %q: %q", n, key, ts.Item)
|
return fmt.Errorf("superflouos item found at position %d when searching for %q: %q", n, key, ts.Item)
|
||||||
}
|
}
|
||||||
if err := ts.Error(); err != nil {
|
if err := ts.Error(); err != nil {
|
||||||
return fmt.Errorf("unexpected error when searching for %q: %s", key, err)
|
return fmt.Errorf("unexpected error when searching for %q: %w", key, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ts.MustClose()
|
ts.MustClose()
|
||||||
|
@ -153,13 +153,13 @@ func newTestTable(path string, itemsCount int) (*Table, []string, error) {
|
||||||
}
|
}
|
||||||
tb, err := OpenTable(path, flushCallback, nil)
|
tb, err := OpenTable(path, flushCallback, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("cannot open table: %s", err)
|
return nil, nil, fmt.Errorf("cannot open table: %w", err)
|
||||||
}
|
}
|
||||||
items := make([]string, itemsCount)
|
items := make([]string, itemsCount)
|
||||||
for i := 0; i < itemsCount; i++ {
|
for i := 0; i < itemsCount; i++ {
|
||||||
item := fmt.Sprintf("%d:%d", rand.Intn(1e9), i)
|
item := fmt.Sprintf("%d:%d", rand.Intn(1e9), i)
|
||||||
if err := tb.AddItems([][]byte{[]byte(item)}); err != nil {
|
if err := tb.AddItems([][]byte{[]byte(item)}); err != nil {
|
||||||
return nil, nil, fmt.Errorf("cannot add item: %s", err)
|
return nil, nil, fmt.Errorf("cannot add item: %w", err)
|
||||||
}
|
}
|
||||||
items[i] = item
|
items[i] = item
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,7 +27,7 @@ func benchmarkTableSearch(b *testing.B, itemsCount int) {
|
||||||
|
|
||||||
tb, items, err := newTestTable(path, itemsCount)
|
tb, items, err := newTestTable(path, itemsCount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Errorf("cannot create test table at %q with %d items: %s", path, itemsCount, err))
|
panic(fmt.Errorf("cannot create test table at %q with %d items: %w", path, itemsCount, err))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Force finishing pending merges
|
// Force finishing pending merges
|
||||||
|
@ -106,7 +106,7 @@ func benchmarkTableSearchKeysExt(b *testing.B, tb *Table, keys [][]byte, stripSu
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := ts.Error(); err != nil {
|
if err := ts.Error(); err != nil {
|
||||||
panic(fmt.Errorf("BUG: unexpected error for searchKeys[%d]=%q: %s", i, searchKey, err))
|
panic(fmt.Errorf("BUG: unexpected error for searchKeys[%d]=%q: %w", i, searchKey, err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -177,7 +177,7 @@ func tryOpeningQueue(path, name string, chunkFileSize, maxBlockSize, maxPendingB
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := fs.MkdirAllIfNotExist(path); err != nil {
|
if err := fs.MkdirAllIfNotExist(path); err != nil {
|
||||||
return nil, fmt.Errorf("cannot create directory %q: %s", path, err)
|
return nil, fmt.Errorf("cannot create directory %q: %w", path, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read metainfo.
|
// Read metainfo.
|
||||||
|
@ -193,13 +193,13 @@ func tryOpeningQueue(path, name string, chunkFileSize, maxBlockSize, maxPendingB
|
||||||
mi.Reset()
|
mi.Reset()
|
||||||
mi.Name = q.name
|
mi.Name = q.name
|
||||||
if err := mi.WriteToFile(metainfoPath); err != nil {
|
if err := mi.WriteToFile(metainfoPath); err != nil {
|
||||||
return nil, fmt.Errorf("cannot create %q: %s", metainfoPath, err)
|
return nil, fmt.Errorf("cannot create %q: %w", metainfoPath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create initial chunk file.
|
// Create initial chunk file.
|
||||||
filepath := q.chunkFilePath(0)
|
filepath := q.chunkFilePath(0)
|
||||||
if err := fs.WriteFileAtomically(filepath, nil); err != nil {
|
if err := fs.WriteFileAtomically(filepath, nil); err != nil {
|
||||||
return nil, fmt.Errorf("cannot create %q: %s", filepath, err)
|
return nil, fmt.Errorf("cannot create %q: %w", filepath, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if mi.Name != q.name {
|
if mi.Name != q.name {
|
||||||
|
@ -209,7 +209,7 @@ func tryOpeningQueue(path, name string, chunkFileSize, maxBlockSize, maxPendingB
|
||||||
// Locate reader and writer chunks in the path.
|
// Locate reader and writer chunks in the path.
|
||||||
fis, err := ioutil.ReadDir(path)
|
fis, err := ioutil.ReadDir(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot read contents of the directory %q: %s", path, err)
|
return nil, fmt.Errorf("cannot read contents of the directory %q: %w", path, err)
|
||||||
}
|
}
|
||||||
for _, fi := range fis {
|
for _, fi := range fis {
|
||||||
fname := fi.Name()
|
fname := fi.Name()
|
||||||
|
@ -406,11 +406,11 @@ func (q *Queue) writeBlockLocked(block []byte) error {
|
||||||
q.writerPath = q.chunkFilePath(q.writerOffset)
|
q.writerPath = q.chunkFilePath(q.writerOffset)
|
||||||
w, err := filestream.Create(q.writerPath, false)
|
w, err := filestream.Create(q.writerPath, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot create chunk file %q: %s", q.writerPath, err)
|
return fmt.Errorf("cannot create chunk file %q: %w", q.writerPath, err)
|
||||||
}
|
}
|
||||||
q.writer = w
|
q.writer = w
|
||||||
if err := q.flushMetainfo(); err != nil {
|
if err := q.flushMetainfo(); err != nil {
|
||||||
return fmt.Errorf("cannot flush metainfo: %s", err)
|
return fmt.Errorf("cannot flush metainfo: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -421,12 +421,12 @@ func (q *Queue) writeBlockLocked(block []byte) error {
|
||||||
err := q.write(header.B)
|
err := q.write(header.B)
|
||||||
headerBufPool.Put(header)
|
headerBufPool.Put(header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot write header with size 8 bytes to %q: %s", q.writerPath, err)
|
return fmt.Errorf("cannot write header with size 8 bytes to %q: %w", q.writerPath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write block contents.
|
// Write block contents.
|
||||||
if err := q.write(block); err != nil {
|
if err := q.write(block); err != nil {
|
||||||
return fmt.Errorf("cannot write block contents with size %d bytes to %q: %s", len(block), q.writerPath, err)
|
return fmt.Errorf("cannot write block contents with size %d bytes to %q: %w", len(block), q.writerPath, err)
|
||||||
}
|
}
|
||||||
q.blocksWritten.Inc()
|
q.blocksWritten.Inc()
|
||||||
q.bytesWritten.Add(len(block))
|
q.bytesWritten.Add(len(block))
|
||||||
|
@ -474,11 +474,11 @@ func (q *Queue) readBlockLocked(dst []byte) ([]byte, error) {
|
||||||
q.readerPath = q.chunkFilePath(q.readerOffset)
|
q.readerPath = q.chunkFilePath(q.readerOffset)
|
||||||
r, err := filestream.Open(q.readerPath, true)
|
r, err := filestream.Open(q.readerPath, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dst, fmt.Errorf("cannot open chunk file %q: %s", q.readerPath, err)
|
return dst, fmt.Errorf("cannot open chunk file %q: %w", q.readerPath, err)
|
||||||
}
|
}
|
||||||
q.reader = r
|
q.reader = r
|
||||||
if err := q.flushMetainfo(); err != nil {
|
if err := q.flushMetainfo(); err != nil {
|
||||||
return dst, fmt.Errorf("cannot flush metainfo: %s", err)
|
return dst, fmt.Errorf("cannot flush metainfo: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -489,7 +489,7 @@ func (q *Queue) readBlockLocked(dst []byte) ([]byte, error) {
|
||||||
blockLen := encoding.UnmarshalUint64(header.B)
|
blockLen := encoding.UnmarshalUint64(header.B)
|
||||||
headerBufPool.Put(header)
|
headerBufPool.Put(header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dst, fmt.Errorf("cannot read header with size 8 bytes from %q: %s", q.readerPath, err)
|
return dst, fmt.Errorf("cannot read header with size 8 bytes from %q: %w", q.readerPath, err)
|
||||||
}
|
}
|
||||||
if blockLen > q.maxBlockSize {
|
if blockLen > q.maxBlockSize {
|
||||||
return dst, fmt.Errorf("too big block size read from %q: %d bytes; cannot exceed %d bytes", q.readerPath, blockLen, q.maxBlockSize)
|
return dst, fmt.Errorf("too big block size read from %q: %d bytes; cannot exceed %d bytes", q.readerPath, blockLen, q.maxBlockSize)
|
||||||
|
@ -499,7 +499,7 @@ func (q *Queue) readBlockLocked(dst []byte) ([]byte, error) {
|
||||||
dstLen := len(dst)
|
dstLen := len(dst)
|
||||||
dst = bytesutil.Resize(dst, dstLen+int(blockLen))
|
dst = bytesutil.Resize(dst, dstLen+int(blockLen))
|
||||||
if err := q.readFull(dst[dstLen:]); err != nil {
|
if err := q.readFull(dst[dstLen:]); err != nil {
|
||||||
return dst, fmt.Errorf("cannot read block contents with size %d bytes from %q: %s", blockLen, q.readerPath, err)
|
return dst, fmt.Errorf("cannot read block contents with size %d bytes from %q: %w", blockLen, q.readerPath, err)
|
||||||
}
|
}
|
||||||
q.blocksRead.Inc()
|
q.blocksRead.Inc()
|
||||||
q.bytesRead.Add(int(blockLen))
|
q.bytesRead.Add(int(blockLen))
|
||||||
|
@ -546,7 +546,7 @@ func (q *Queue) flushMetainfo() error {
|
||||||
}
|
}
|
||||||
metainfoPath := q.metainfoPath()
|
metainfoPath := q.metainfoPath()
|
||||||
if err := mi.WriteToFile(metainfoPath); err != nil {
|
if err := mi.WriteToFile(metainfoPath); err != nil {
|
||||||
return fmt.Errorf("cannot write metainfo to %q: %s", metainfoPath, err)
|
return fmt.Errorf("cannot write metainfo to %q: %w", metainfoPath, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -567,10 +567,10 @@ func (mi *metainfo) Reset() {
|
||||||
func (mi *metainfo) WriteToFile(path string) error {
|
func (mi *metainfo) WriteToFile(path string) error {
|
||||||
data, err := json.Marshal(mi)
|
data, err := json.Marshal(mi)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot marshal persistent queue metainfo %#v: %s", mi, err)
|
return fmt.Errorf("cannot marshal persistent queue metainfo %#v: %w", mi, err)
|
||||||
}
|
}
|
||||||
if err := ioutil.WriteFile(path, data, 0600); err != nil {
|
if err := ioutil.WriteFile(path, data, 0600); err != nil {
|
||||||
return fmt.Errorf("cannot write persistent queue metainfo to %q: %s", path, err)
|
return fmt.Errorf("cannot write persistent queue metainfo to %q: %w", path, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -582,10 +582,10 @@ func (mi *metainfo) ReadFromFile(path string) error {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return fmt.Errorf("cannot read %q: %s", path, err)
|
return fmt.Errorf("cannot read %q: %w", path, err)
|
||||||
}
|
}
|
||||||
if err := json.Unmarshal(data, mi); err != nil {
|
if err := json.Unmarshal(data, mi); err != nil {
|
||||||
return fmt.Errorf("cannot unmarshal persistent queue metainfo from %q: %s", path, err)
|
return fmt.Errorf("cannot unmarshal persistent queue metainfo from %q: %w", path, err)
|
||||||
}
|
}
|
||||||
if mi.ReaderOffset > mi.WriterOffset {
|
if mi.ReaderOffset > mi.WriterOffset {
|
||||||
return fmt.Errorf("invalid data read from %q: readerOffset=%d cannot exceed writerOffset=%d", path, mi.ReaderOffset, mi.WriterOffset)
|
return fmt.Errorf("invalid data read from %q: readerOffset=%d cannot exceed writerOffset=%d", path, mi.ReaderOffset, mi.WriterOffset)
|
||||||
|
|
|
@ -495,20 +495,20 @@ func TestQueueLimitedSize(t *testing.T) {
|
||||||
|
|
||||||
func mustCreateFile(path, contents string) {
|
func mustCreateFile(path, contents string) {
|
||||||
if err := ioutil.WriteFile(path, []byte(contents), 0600); err != nil {
|
if err := ioutil.WriteFile(path, []byte(contents), 0600); err != nil {
|
||||||
panic(fmt.Errorf("cannot create file %q with %d bytes contents: %s", path, len(contents), err))
|
panic(fmt.Errorf("cannot create file %q with %d bytes contents: %w", path, len(contents), err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func mustCreateDir(path string) {
|
func mustCreateDir(path string) {
|
||||||
mustDeleteDir(path)
|
mustDeleteDir(path)
|
||||||
if err := os.MkdirAll(path, 0700); err != nil {
|
if err := os.MkdirAll(path, 0700); err != nil {
|
||||||
panic(fmt.Errorf("cannot create dir %q: %s", path, err))
|
panic(fmt.Errorf("cannot create dir %q: %w", path, err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func mustDeleteDir(path string) {
|
func mustDeleteDir(path string) {
|
||||||
if err := os.RemoveAll(path); err != nil {
|
if err := os.RemoveAll(path); err != nil {
|
||||||
panic(fmt.Errorf("cannot remove dir %q: %s", path, err))
|
panic(fmt.Errorf("cannot remove dir %q: %w", path, err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -516,6 +516,6 @@ func mustCreateEmptyMetainfo(path, name string) {
|
||||||
var mi metainfo
|
var mi metainfo
|
||||||
mi.Name = name
|
mi.Name = name
|
||||||
if err := mi.WriteToFile(path + "/metainfo.json"); err != nil {
|
if err := mi.WriteToFile(path + "/metainfo.json"); err != nil {
|
||||||
panic(fmt.Errorf("cannot create metainfo: %s", err))
|
panic(fmt.Errorf("cannot create metainfo: %w", err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -93,7 +93,7 @@ func NewConfig(baseDir string, basicAuth *BasicAuthConfig, bearerToken, bearerTo
|
||||||
path := getFilepath(baseDir, basicAuth.PasswordFile)
|
path := getFilepath(baseDir, basicAuth.PasswordFile)
|
||||||
pass, err := readPasswordFromFile(path)
|
pass, err := readPasswordFromFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot read password from `password_file`=%q set in `basic_auth` section: %s", basicAuth.PasswordFile, err)
|
return nil, fmt.Errorf("cannot read password from `password_file`=%q set in `basic_auth` section: %w", basicAuth.PasswordFile, err)
|
||||||
}
|
}
|
||||||
password = pass
|
password = pass
|
||||||
}
|
}
|
||||||
|
@ -109,7 +109,7 @@ func NewConfig(baseDir string, basicAuth *BasicAuthConfig, bearerToken, bearerTo
|
||||||
path := getFilepath(baseDir, bearerTokenFile)
|
path := getFilepath(baseDir, bearerTokenFile)
|
||||||
token, err := readPasswordFromFile(path)
|
token, err := readPasswordFromFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot read bearer token from `bearer_token_file`=%q: %s", bearerTokenFile, err)
|
return nil, fmt.Errorf("cannot read bearer token from `bearer_token_file`=%q: %w", bearerTokenFile, err)
|
||||||
}
|
}
|
||||||
bearerToken = token
|
bearerToken = token
|
||||||
}
|
}
|
||||||
|
@ -131,7 +131,7 @@ func NewConfig(baseDir string, basicAuth *BasicAuthConfig, bearerToken, bearerTo
|
||||||
keyPath := getFilepath(baseDir, tlsConfig.KeyFile)
|
keyPath := getFilepath(baseDir, tlsConfig.KeyFile)
|
||||||
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
|
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot load TLS certificate from `cert_file`=%q, `key_file`=%q: %s", tlsConfig.CertFile, tlsConfig.KeyFile, err)
|
return nil, fmt.Errorf("cannot load TLS certificate from `cert_file`=%q, `key_file`=%q: %w", tlsConfig.CertFile, tlsConfig.KeyFile, err)
|
||||||
}
|
}
|
||||||
tlsCertificate = &cert
|
tlsCertificate = &cert
|
||||||
}
|
}
|
||||||
|
@ -139,7 +139,7 @@ func NewConfig(baseDir string, basicAuth *BasicAuthConfig, bearerToken, bearerTo
|
||||||
path := getFilepath(baseDir, tlsConfig.CAFile)
|
path := getFilepath(baseDir, tlsConfig.CAFile)
|
||||||
data, err := ioutil.ReadFile(path)
|
data, err := ioutil.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot read `ca_file` %q: %s", tlsConfig.CAFile, err)
|
return nil, fmt.Errorf("cannot read `ca_file` %q: %w", tlsConfig.CAFile, err)
|
||||||
}
|
}
|
||||||
tlsRootCA = x509.NewCertPool()
|
tlsRootCA = x509.NewCertPool()
|
||||||
if !tlsRootCA.AppendCertsFromPEM(data) {
|
if !tlsRootCA.AppendCertsFromPEM(data) {
|
||||||
|
|
|
@ -14,7 +14,7 @@ func MarshalWriteRequest(dst []byte, wr *WriteRequest) []byte {
|
||||||
dst = dst[:dstLen+size]
|
dst = dst[:dstLen+size]
|
||||||
n, err := wr.MarshalToSizedBuffer(dst[dstLen:])
|
n, err := wr.MarshalToSizedBuffer(dst[dstLen:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Errorf("BUG: unexpected error when marshaling WriteRequest: %s", err))
|
panic(fmt.Errorf("BUG: unexpected error when marshaling WriteRequest: %w", err))
|
||||||
}
|
}
|
||||||
return dst[:dstLen+n]
|
return dst[:dstLen+n]
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,11 +26,11 @@ type RelabelConfig struct {
|
||||||
func LoadRelabelConfigs(path string) ([]ParsedRelabelConfig, error) {
|
func LoadRelabelConfigs(path string) ([]ParsedRelabelConfig, error) {
|
||||||
data, err := ioutil.ReadFile(path)
|
data, err := ioutil.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot read `relabel_configs` from %q: %s", path, err)
|
return nil, fmt.Errorf("cannot read `relabel_configs` from %q: %w", path, err)
|
||||||
}
|
}
|
||||||
var rcs []RelabelConfig
|
var rcs []RelabelConfig
|
||||||
if err := yaml.UnmarshalStrict(data, &rcs); err != nil {
|
if err := yaml.UnmarshalStrict(data, &rcs); err != nil {
|
||||||
return nil, fmt.Errorf("cannot unmarshal `relabel_configs` from %q: %s", path, err)
|
return nil, fmt.Errorf("cannot unmarshal `relabel_configs` from %q: %w", path, err)
|
||||||
}
|
}
|
||||||
return ParseRelabelConfigs(nil, rcs)
|
return ParseRelabelConfigs(nil, rcs)
|
||||||
}
|
}
|
||||||
|
@ -44,7 +44,7 @@ func ParseRelabelConfigs(dst []ParsedRelabelConfig, rcs []RelabelConfig) ([]Pars
|
||||||
var err error
|
var err error
|
||||||
dst, err = parseRelabelConfig(dst, &rcs[i])
|
dst, err = parseRelabelConfig(dst, &rcs[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dst, fmt.Errorf("error when parsing `relabel_config` #%d: %s", i+1, err)
|
return dst, fmt.Errorf("error when parsing `relabel_config` #%d: %w", i+1, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return dst, nil
|
return dst, nil
|
||||||
|
@ -67,7 +67,7 @@ func parseRelabelConfig(dst []ParsedRelabelConfig, rc *RelabelConfig) ([]ParsedR
|
||||||
}
|
}
|
||||||
re, err := regexp.Compile(regex)
|
re, err := regexp.Compile(regex)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return dst, fmt.Errorf("cannot parse `regex` %q: %s", regex, err)
|
return dst, fmt.Errorf("cannot parse `regex` %q: %w", regex, err)
|
||||||
}
|
}
|
||||||
regexCompiled = re
|
regexCompiled = re
|
||||||
}
|
}
|
||||||
|
|
|
@ -94,13 +94,13 @@ func (c *client) ReadData(dst []byte) ([]byte, error) {
|
||||||
fasthttp.ReleaseResponse(resp)
|
fasthttp.ReleaseResponse(resp)
|
||||||
if err == fasthttp.ErrTimeout {
|
if err == fasthttp.ErrTimeout {
|
||||||
scrapesTimedout.Inc()
|
scrapesTimedout.Inc()
|
||||||
return dst, fmt.Errorf("error when scraping %q with timeout %s: %s", c.scrapeURL, c.hc.ReadTimeout, err)
|
return dst, fmt.Errorf("error when scraping %q with timeout %s: %w", c.scrapeURL, c.hc.ReadTimeout, err)
|
||||||
}
|
}
|
||||||
if err == fasthttp.ErrBodyTooLarge {
|
if err == fasthttp.ErrBodyTooLarge {
|
||||||
return dst, fmt.Errorf("the response from %q exceeds -promscrape.maxScrapeSize=%d; "+
|
return dst, fmt.Errorf("the response from %q exceeds -promscrape.maxScrapeSize=%d; "+
|
||||||
"either reduce the response size for the target or increase -promscrape.maxScrapeSize", c.scrapeURL, *maxScrapeSize)
|
"either reduce the response size for the target or increase -promscrape.maxScrapeSize", c.scrapeURL, *maxScrapeSize)
|
||||||
}
|
}
|
||||||
return dst, fmt.Errorf("error when scraping %q: %s", c.scrapeURL, err)
|
return dst, fmt.Errorf("error when scraping %q: %w", c.scrapeURL, err)
|
||||||
}
|
}
|
||||||
dstLen := len(dst)
|
dstLen := len(dst)
|
||||||
if ce := resp.Header.Peek("Content-Encoding"); string(ce) == "gzip" {
|
if ce := resp.Header.Peek("Content-Encoding"); string(ce) == "gzip" {
|
||||||
|
@ -109,7 +109,7 @@ func (c *client) ReadData(dst []byte) ([]byte, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fasthttp.ReleaseResponse(resp)
|
fasthttp.ReleaseResponse(resp)
|
||||||
scrapesGunzipFailed.Inc()
|
scrapesGunzipFailed.Inc()
|
||||||
return dst, fmt.Errorf("cannot ungzip response from %q: %s", c.scrapeURL, err)
|
return dst, fmt.Errorf("cannot ungzip response from %q: %w", c.scrapeURL, err)
|
||||||
}
|
}
|
||||||
scrapesGunzipped.Inc()
|
scrapesGunzipped.Inc()
|
||||||
} else {
|
} else {
|
||||||
|
@ -146,7 +146,7 @@ again:
|
||||||
// Retry request if the server closed the keep-alive connection during the first attempt.
|
// Retry request if the server closed the keep-alive connection during the first attempt.
|
||||||
attempts++
|
attempts++
|
||||||
if attempts > 3 {
|
if attempts > 3 {
|
||||||
return fmt.Errorf("the server closed 3 subsequent connections: %s", err)
|
return fmt.Errorf("the server closed 3 subsequent connections: %w", err)
|
||||||
}
|
}
|
||||||
goto again
|
goto again
|
||||||
}
|
}
|
||||||
|
|
|
@ -99,11 +99,11 @@ type StaticConfig struct {
|
||||||
func loadStaticConfigs(path string) ([]StaticConfig, error) {
|
func loadStaticConfigs(path string) ([]StaticConfig, error) {
|
||||||
data, err := ioutil.ReadFile(path)
|
data, err := ioutil.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot read `static_configs` from %q: %s", path, err)
|
return nil, fmt.Errorf("cannot read `static_configs` from %q: %w", path, err)
|
||||||
}
|
}
|
||||||
var stcs []StaticConfig
|
var stcs []StaticConfig
|
||||||
if err := yaml.UnmarshalStrict(data, &stcs); err != nil {
|
if err := yaml.UnmarshalStrict(data, &stcs); err != nil {
|
||||||
return nil, fmt.Errorf("cannot unmarshal `static_configs` from %q: %s", path, err)
|
return nil, fmt.Errorf("cannot unmarshal `static_configs` from %q: %w", path, err)
|
||||||
}
|
}
|
||||||
return stcs, nil
|
return stcs, nil
|
||||||
}
|
}
|
||||||
|
@ -112,11 +112,11 @@ func loadStaticConfigs(path string) ([]StaticConfig, error) {
|
||||||
func loadConfig(path string) (cfg *Config, data []byte, err error) {
|
func loadConfig(path string) (cfg *Config, data []byte, err error) {
|
||||||
data, err = ioutil.ReadFile(path)
|
data, err = ioutil.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("cannot read Prometheus config from %q: %s", path, err)
|
return nil, nil, fmt.Errorf("cannot read Prometheus config from %q: %w", path, err)
|
||||||
}
|
}
|
||||||
var cfgObj Config
|
var cfgObj Config
|
||||||
if err := cfgObj.parse(data, path); err != nil {
|
if err := cfgObj.parse(data, path); err != nil {
|
||||||
return nil, nil, fmt.Errorf("cannot parse Prometheus config from %q: %s", path, err)
|
return nil, nil, fmt.Errorf("cannot parse Prometheus config from %q: %w", path, err)
|
||||||
}
|
}
|
||||||
if *dryRun {
|
if *dryRun {
|
||||||
// This is a dirty hack for checking Prometheus config only.
|
// This is a dirty hack for checking Prometheus config only.
|
||||||
|
@ -130,18 +130,18 @@ func loadConfig(path string) (cfg *Config, data []byte, err error) {
|
||||||
|
|
||||||
func (cfg *Config) parse(data []byte, path string) error {
|
func (cfg *Config) parse(data []byte, path string) error {
|
||||||
if err := unmarshalMaybeStrict(data, cfg); err != nil {
|
if err := unmarshalMaybeStrict(data, cfg); err != nil {
|
||||||
return fmt.Errorf("cannot unmarshal data: %s", err)
|
return fmt.Errorf("cannot unmarshal data: %w", err)
|
||||||
}
|
}
|
||||||
absPath, err := filepath.Abs(path)
|
absPath, err := filepath.Abs(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot obtain abs path for %q: %s", path, err)
|
return fmt.Errorf("cannot obtain abs path for %q: %w", path, err)
|
||||||
}
|
}
|
||||||
cfg.baseDir = filepath.Dir(absPath)
|
cfg.baseDir = filepath.Dir(absPath)
|
||||||
for i := range cfg.ScrapeConfigs {
|
for i := range cfg.ScrapeConfigs {
|
||||||
sc := &cfg.ScrapeConfigs[i]
|
sc := &cfg.ScrapeConfigs[i]
|
||||||
swc, err := getScrapeWorkConfig(sc, cfg.baseDir, &cfg.Global)
|
swc, err := getScrapeWorkConfig(sc, cfg.baseDir, &cfg.Global)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot parse `scrape_config` #%d: %s", i+1, err)
|
return fmt.Errorf("cannot parse `scrape_config` #%d: %w", i+1, err)
|
||||||
}
|
}
|
||||||
sc.swc = swc
|
sc.swc = swc
|
||||||
}
|
}
|
||||||
|
@ -378,17 +378,17 @@ func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConf
|
||||||
params := sc.Params
|
params := sc.Params
|
||||||
ac, err := promauth.NewConfig(baseDir, sc.BasicAuth, sc.BearerToken, sc.BearerTokenFile, sc.TLSConfig)
|
ac, err := promauth.NewConfig(baseDir, sc.BasicAuth, sc.BearerToken, sc.BearerTokenFile, sc.TLSConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse auth config for `job_name` %q: %s", jobName, err)
|
return nil, fmt.Errorf("cannot parse auth config for `job_name` %q: %w", jobName, err)
|
||||||
}
|
}
|
||||||
var relabelConfigs []promrelabel.ParsedRelabelConfig
|
var relabelConfigs []promrelabel.ParsedRelabelConfig
|
||||||
relabelConfigs, err = promrelabel.ParseRelabelConfigs(relabelConfigs[:0], sc.RelabelConfigs)
|
relabelConfigs, err = promrelabel.ParseRelabelConfigs(relabelConfigs[:0], sc.RelabelConfigs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse `relabel_configs` for `job_name` %q: %s", jobName, err)
|
return nil, fmt.Errorf("cannot parse `relabel_configs` for `job_name` %q: %w", jobName, err)
|
||||||
}
|
}
|
||||||
var metricRelabelConfigs []promrelabel.ParsedRelabelConfig
|
var metricRelabelConfigs []promrelabel.ParsedRelabelConfig
|
||||||
metricRelabelConfigs, err = promrelabel.ParseRelabelConfigs(metricRelabelConfigs[:0], sc.MetricRelabelConfigs)
|
metricRelabelConfigs, err = promrelabel.ParseRelabelConfigs(metricRelabelConfigs[:0], sc.MetricRelabelConfigs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse `metric_relabel_configs` for `job_name` %q: %s", jobName, err)
|
return nil, fmt.Errorf("cannot parse `metric_relabel_configs` for `job_name` %q: %w", jobName, err)
|
||||||
}
|
}
|
||||||
swc := &scrapeWorkConfig{
|
swc := &scrapeWorkConfig{
|
||||||
scrapeInterval: scrapeInterval,
|
scrapeInterval: scrapeInterval,
|
||||||
|
@ -580,7 +580,7 @@ func appendScrapeWork(dst []ScrapeWork, swc *scrapeWorkConfig, target string, ex
|
||||||
paramsStr := url.Values(paramsRelabeled).Encode()
|
paramsStr := url.Values(paramsRelabeled).Encode()
|
||||||
scrapeURL := fmt.Sprintf("%s://%s%s%s%s", schemeRelabeled, addressRelabeled, metricsPathRelabeled, optionalQuestion, paramsStr)
|
scrapeURL := fmt.Sprintf("%s://%s%s%s%s", schemeRelabeled, addressRelabeled, metricsPathRelabeled, optionalQuestion, paramsStr)
|
||||||
if _, err := url.Parse(scrapeURL); err != nil {
|
if _, err := url.Parse(scrapeURL); err != nil {
|
||||||
return dst, fmt.Errorf("invalid url %q for scheme=%q (%q), target=%q (%q), metrics_path=%q (%q) for `job_name` %q: %s",
|
return dst, fmt.Errorf("invalid url %q for scheme=%q (%q), target=%q (%q), metrics_path=%q (%q) for `job_name` %q: %w",
|
||||||
scrapeURL, swc.scheme, schemeRelabeled, target, addressRelabeled, swc.metricsPath, metricsPathRelabeled, swc.jobName, err)
|
scrapeURL, swc.scheme, schemeRelabeled, target, addressRelabeled, swc.metricsPath, metricsPathRelabeled, swc.jobName, err)
|
||||||
}
|
}
|
||||||
// Set missing "instance" label according to https://www.robustperception.io/life-of-a-label
|
// Set missing "instance" label according to https://www.robustperception.io/life-of-a-label
|
||||||
|
|
|
@ -135,7 +135,7 @@ scrape_configs:
|
||||||
func getFileSDScrapeWork(data []byte, path string) ([]ScrapeWork, error) {
|
func getFileSDScrapeWork(data []byte, path string) ([]ScrapeWork, error) {
|
||||||
var cfg Config
|
var cfg Config
|
||||||
if err := cfg.parse(data, path); err != nil {
|
if err := cfg.parse(data, path); err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse data: %s", err)
|
return nil, fmt.Errorf("cannot parse data: %w", err)
|
||||||
}
|
}
|
||||||
return cfg.getFileSDScrapeWork(nil), nil
|
return cfg.getFileSDScrapeWork(nil), nil
|
||||||
}
|
}
|
||||||
|
@ -143,7 +143,7 @@ func getFileSDScrapeWork(data []byte, path string) ([]ScrapeWork, error) {
|
||||||
func getStaticScrapeWork(data []byte, path string) ([]ScrapeWork, error) {
|
func getStaticScrapeWork(data []byte, path string) ([]ScrapeWork, error) {
|
||||||
var cfg Config
|
var cfg Config
|
||||||
if err := cfg.parse(data, path); err != nil {
|
if err := cfg.parse(data, path); err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse data: %s", err)
|
return nil, fmt.Errorf("cannot parse data: %w", err)
|
||||||
}
|
}
|
||||||
return cfg.getStaticScrapeWork(), nil
|
return cfg.getStaticScrapeWork(), nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,7 +22,7 @@ type AgentConfig struct {
|
||||||
func parseAgent(data []byte) (*Agent, error) {
|
func parseAgent(data []byte) (*Agent, error) {
|
||||||
var a Agent
|
var a Agent
|
||||||
if err := json.Unmarshal(data, &a); err != nil {
|
if err := json.Unmarshal(data, &a); err != nil {
|
||||||
return nil, fmt.Errorf("cannot unmarshal agent info from %q: %s", data, err)
|
return nil, fmt.Errorf("cannot unmarshal agent info from %q: %w", data, err)
|
||||||
}
|
}
|
||||||
return &a, nil
|
return &a, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,7 +47,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
||||||
}
|
}
|
||||||
ac, err := promauth.NewConfig(baseDir, ba, token, "", sdc.TLSConfig)
|
ac, err := promauth.NewConfig(baseDir, ba, token, "", sdc.TLSConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse auth config: %s", err)
|
return nil, fmt.Errorf("cannot parse auth config: %w", err)
|
||||||
}
|
}
|
||||||
apiServer := sdc.Server
|
apiServer := sdc.Server
|
||||||
if apiServer == "" {
|
if apiServer == "" {
|
||||||
|
@ -62,7 +62,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
||||||
}
|
}
|
||||||
client, err := discoveryutils.NewClient(apiServer, ac)
|
client, err := discoveryutils.NewClient(apiServer, ac)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot create HTTP client for %q: %s", apiServer, err)
|
return nil, fmt.Errorf("cannot create HTTP client for %q: %w", apiServer, err)
|
||||||
}
|
}
|
||||||
tagSeparator := ","
|
tagSeparator := ","
|
||||||
if sdc.TagSeparator != nil {
|
if sdc.TagSeparator != nil {
|
||||||
|
@ -92,7 +92,7 @@ func getToken(token *string) (string, error) {
|
||||||
if tokenFile := os.Getenv("CONSUL_HTTP_TOKEN_FILE"); tokenFile != "" {
|
if tokenFile := os.Getenv("CONSUL_HTTP_TOKEN_FILE"); tokenFile != "" {
|
||||||
data, err := ioutil.ReadFile(tokenFile)
|
data, err := ioutil.ReadFile(tokenFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("cannot read consul token file %q; probably, `token` arg is missing in `consul_sd_config`? error: %s", tokenFile, err)
|
return "", fmt.Errorf("cannot read consul token file %q; probably, `token` arg is missing in `consul_sd_config`? error: %w", tokenFile, err)
|
||||||
}
|
}
|
||||||
return string(data), nil
|
return string(data), nil
|
||||||
}
|
}
|
||||||
|
@ -108,7 +108,7 @@ func getDatacenter(client *discoveryutils.Client, dc string) (string, error) {
|
||||||
// See https://www.consul.io/api/agent.html#read-configuration
|
// See https://www.consul.io/api/agent.html#read-configuration
|
||||||
data, err := client.GetAPIResponse("/v1/agent/self")
|
data, err := client.GetAPIResponse("/v1/agent/self")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("cannot query consul agent info: %s", err)
|
return "", fmt.Errorf("cannot query consul agent info: %w", err)
|
||||||
}
|
}
|
||||||
a, err := parseAgent(data)
|
a, err := parseAgent(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -30,11 +30,11 @@ type SDConfig struct {
|
||||||
func GetLabels(sdc *SDConfig, baseDir string) ([]map[string]string, error) {
|
func GetLabels(sdc *SDConfig, baseDir string) ([]map[string]string, error) {
|
||||||
cfg, err := getAPIConfig(sdc, baseDir)
|
cfg, err := getAPIConfig(sdc, baseDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot get API config: %s", err)
|
return nil, fmt.Errorf("cannot get API config: %w", err)
|
||||||
}
|
}
|
||||||
ms, err := getServiceNodesLabels(cfg)
|
ms, err := getServiceNodesLabels(cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error when fetching service nodes data from Consul: %s", err)
|
return nil, fmt.Errorf("error when fetching service nodes data from Consul: %w", err)
|
||||||
}
|
}
|
||||||
return ms, nil
|
return ms, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,11 +28,11 @@ func getAllServiceNodes(cfg *apiConfig) ([]ServiceNode, error) {
|
||||||
// See https://www.consul.io/api/catalog.html#list-services
|
// See https://www.consul.io/api/catalog.html#list-services
|
||||||
data, err := getAPIResponse(cfg, "/v1/catalog/services")
|
data, err := getAPIResponse(cfg, "/v1/catalog/services")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot obtain services: %s", err)
|
return nil, fmt.Errorf("cannot obtain services: %w", err)
|
||||||
}
|
}
|
||||||
var m map[string][]string
|
var m map[string][]string
|
||||||
if err := json.Unmarshal(data, &m); err != nil {
|
if err := json.Unmarshal(data, &m); err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse services response %q: %s", data, err)
|
return nil, fmt.Errorf("cannot parse services response %q: %w", data, err)
|
||||||
}
|
}
|
||||||
serviceNames := make(map[string]bool)
|
serviceNames := make(map[string]bool)
|
||||||
for serviceName, tags := range m {
|
for serviceName, tags := range m {
|
||||||
|
@ -125,7 +125,7 @@ func getServiceNodes(cfg *apiConfig, serviceName string) ([]ServiceNode, error)
|
||||||
}
|
}
|
||||||
data, err := getAPIResponse(cfg, path)
|
data, err := getAPIResponse(cfg, path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot obtain instances for serviceName=%q: %s", serviceName, err)
|
return nil, fmt.Errorf("cannot obtain instances for serviceName=%q: %w", serviceName, err)
|
||||||
}
|
}
|
||||||
return parseServiceNodes(data)
|
return parseServiceNodes(data)
|
||||||
}
|
}
|
||||||
|
@ -173,7 +173,7 @@ type Check struct {
|
||||||
func parseServiceNodes(data []byte) ([]ServiceNode, error) {
|
func parseServiceNodes(data []byte) ([]ServiceNode, error) {
|
||||||
var sns []ServiceNode
|
var sns []ServiceNode
|
||||||
if err := json.Unmarshal(data, &sns); err != nil {
|
if err := json.Unmarshal(data, &sns); err != nil {
|
||||||
return nil, fmt.Errorf("cannot unmarshal ServiceNodes from %q: %s", data, err)
|
return nil, fmt.Errorf("cannot unmarshal ServiceNodes from %q: %w", data, err)
|
||||||
}
|
}
|
||||||
return sns, nil
|
return sns, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,7 +36,7 @@ func newAPIConfig(sdc *SDConfig) (*apiConfig, error) {
|
||||||
if len(region) == 0 {
|
if len(region) == 0 {
|
||||||
r, err := getDefaultRegion()
|
r, err := getDefaultRegion()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot determine default ec2 region; probably, `region` param in `ec2_sd_configs` is missing; the error: %s", err)
|
return nil, fmt.Errorf("cannot determine default ec2 region; probably, `region` param in `ec2_sd_configs` is missing; the error: %w", err)
|
||||||
}
|
}
|
||||||
region = r
|
region = r
|
||||||
}
|
}
|
||||||
|
@ -88,7 +88,7 @@ func getDefaultRegion() (string, error) {
|
||||||
}
|
}
|
||||||
var id IdentityDocument
|
var id IdentityDocument
|
||||||
if err := json.Unmarshal(data, &id); err != nil {
|
if err := json.Unmarshal(data, &id); err != nil {
|
||||||
return "", fmt.Errorf("cannot parse identity document: %s", err)
|
return "", fmt.Errorf("cannot parse identity document: %w", err)
|
||||||
}
|
}
|
||||||
return id.Region, nil
|
return id.Region, nil
|
||||||
}
|
}
|
||||||
|
@ -109,28 +109,28 @@ func getMetadataByPath(apiPath string) ([]byte, error) {
|
||||||
sessionTokenURL := "http://169.254.169.254/latest/api/token"
|
sessionTokenURL := "http://169.254.169.254/latest/api/token"
|
||||||
req, err := http.NewRequest("PUT", sessionTokenURL, nil)
|
req, err := http.NewRequest("PUT", sessionTokenURL, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot create request for IMDSv2 session token at url %q: %s", sessionTokenURL, err)
|
return nil, fmt.Errorf("cannot create request for IMDSv2 session token at url %q: %w", sessionTokenURL, err)
|
||||||
}
|
}
|
||||||
req.Header.Set("X-aws-ec2-metadata-token-ttl-seconds", "60")
|
req.Header.Set("X-aws-ec2-metadata-token-ttl-seconds", "60")
|
||||||
resp, err := client.Do(req)
|
resp, err := client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot obtain IMDSv2 session token from %q; probably, `region` is missing in `ec2_sd_config`; error: %s", sessionTokenURL, err)
|
return nil, fmt.Errorf("cannot obtain IMDSv2 session token from %q; probably, `region` is missing in `ec2_sd_config`; error: %w", sessionTokenURL, err)
|
||||||
}
|
}
|
||||||
token, err := readResponseBody(resp, sessionTokenURL)
|
token, err := readResponseBody(resp, sessionTokenURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot read IMDSv2 session token from %q; probably, `region` is missing in `ec2_sd_config`; error: %s", sessionTokenURL, err)
|
return nil, fmt.Errorf("cannot read IMDSv2 session token from %q; probably, `region` is missing in `ec2_sd_config`; error: %w", sessionTokenURL, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use session token in the request.
|
// Use session token in the request.
|
||||||
apiURL := "http://169.254.169.254/latest/" + apiPath
|
apiURL := "http://169.254.169.254/latest/" + apiPath
|
||||||
req, err = http.NewRequest("GET", apiURL, nil)
|
req, err = http.NewRequest("GET", apiURL, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot create request to %q: %s", apiURL, err)
|
return nil, fmt.Errorf("cannot create request to %q: %w", apiURL, err)
|
||||||
}
|
}
|
||||||
req.Header.Set("X-aws-ec2-metadata-token", string(token))
|
req.Header.Set("X-aws-ec2-metadata-token", string(token))
|
||||||
resp, err = client.Do(req)
|
resp, err = client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot obtain response for %q: %s", apiURL, err)
|
return nil, fmt.Errorf("cannot obtain response for %q: %w", apiURL, err)
|
||||||
}
|
}
|
||||||
return readResponseBody(resp, apiURL)
|
return readResponseBody(resp, apiURL)
|
||||||
}
|
}
|
||||||
|
@ -158,11 +158,11 @@ func getAPIResponse(cfg *apiConfig, action, nextPageToken string) ([]byte, error
|
||||||
apiURL += "&Version=2013-10-15"
|
apiURL += "&Version=2013-10-15"
|
||||||
req, err := newSignedRequest(apiURL, "ec2", cfg.region, cfg.accessKey, cfg.secretKey)
|
req, err := newSignedRequest(apiURL, "ec2", cfg.region, cfg.accessKey, cfg.secretKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot create signed request: %s", err)
|
return nil, fmt.Errorf("cannot create signed request: %w", err)
|
||||||
}
|
}
|
||||||
resp, err := discoveryutils.GetHTTPClient().Do(req)
|
resp, err := discoveryutils.GetHTTPClient().Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot perform http request to %q: %s", apiURL, err)
|
return nil, fmt.Errorf("cannot perform http request to %q: %w", apiURL, err)
|
||||||
}
|
}
|
||||||
return readResponseBody(resp, apiURL)
|
return readResponseBody(resp, apiURL)
|
||||||
}
|
}
|
||||||
|
@ -171,7 +171,7 @@ func readResponseBody(resp *http.Response, apiURL string) ([]byte, error) {
|
||||||
data, err := ioutil.ReadAll(resp.Body)
|
data, err := ioutil.ReadAll(resp.Body)
|
||||||
_ = resp.Body.Close()
|
_ = resp.Body.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot read response from %q: %s", apiURL, err)
|
return nil, fmt.Errorf("cannot read response from %q: %w", apiURL, err)
|
||||||
}
|
}
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
return nil, fmt.Errorf("unexpected status code for %q; got %d; want %d; response body: %q",
|
return nil, fmt.Errorf("unexpected status code for %q; got %d; want %d; response body: %q",
|
||||||
|
|
|
@ -34,11 +34,11 @@ type Filter struct {
|
||||||
func GetLabels(sdc *SDConfig) ([]map[string]string, error) {
|
func GetLabels(sdc *SDConfig) ([]map[string]string, error) {
|
||||||
cfg, err := getAPIConfig(sdc)
|
cfg, err := getAPIConfig(sdc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot get API config: %s", err)
|
return nil, fmt.Errorf("cannot get API config: %w", err)
|
||||||
}
|
}
|
||||||
ms, err := getInstancesLabels(cfg)
|
ms, err := getInstancesLabels(cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error when fetching instances data from EC2: %s", err)
|
return nil, fmt.Errorf("error when fetching instances data from EC2: %w", err)
|
||||||
}
|
}
|
||||||
return ms, nil
|
return ms, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,11 +31,11 @@ func getReservations(cfg *apiConfig) ([]Reservation, error) {
|
||||||
for {
|
for {
|
||||||
data, err := getAPIResponse(cfg, action, pageToken)
|
data, err := getAPIResponse(cfg, action, pageToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot obtain instances: %s", err)
|
return nil, fmt.Errorf("cannot obtain instances: %w", err)
|
||||||
}
|
}
|
||||||
ir, err := parseInstancesResponse(data)
|
ir, err := parseInstancesResponse(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse instance list: %s", err)
|
return nil, fmt.Errorf("cannot parse instance list: %w", err)
|
||||||
}
|
}
|
||||||
rs = append(rs, ir.ReservationSet.Items...)
|
rs = append(rs, ir.ReservationSet.Items...)
|
||||||
if len(ir.NextPageToken) == 0 {
|
if len(ir.NextPageToken) == 0 {
|
||||||
|
@ -121,7 +121,7 @@ type Tag struct {
|
||||||
func parseInstancesResponse(data []byte) (*InstancesResponse, error) {
|
func parseInstancesResponse(data []byte) (*InstancesResponse, error) {
|
||||||
var v InstancesResponse
|
var v InstancesResponse
|
||||||
if err := xml.Unmarshal(data, &v); err != nil {
|
if err := xml.Unmarshal(data, &v); err != nil {
|
||||||
return nil, fmt.Errorf("cannot unmarshal InstancesResponse from %q: %s", data, err)
|
return nil, fmt.Errorf("cannot unmarshal InstancesResponse from %q: %w", data, err)
|
||||||
}
|
}
|
||||||
return &v, nil
|
return &v, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,7 +24,7 @@ func newSignedRequest(apiURL, service, region, accessKey, secretKey string) (*ht
|
||||||
func newSignedRequestWithTime(apiURL, service, region, accessKey, secretKey string, t time.Time) (*http.Request, error) {
|
func newSignedRequestWithTime(apiURL, service, region, accessKey, secretKey string, t time.Time) (*http.Request, error) {
|
||||||
uri, err := url.Parse(apiURL)
|
uri, err := url.Parse(apiURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse %q: %s", apiURL, err)
|
return nil, fmt.Errorf("cannot parse %q: %w", apiURL, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create canonicalRequest
|
// Create canonicalRequest
|
||||||
|
@ -65,7 +65,7 @@ func newSignedRequestWithTime(apiURL, service, region, accessKey, secretKey stri
|
||||||
|
|
||||||
req, err := http.NewRequest("GET", apiURL, nil)
|
req, err := http.NewRequest("GET", apiURL, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot create request from %q: %s", apiURL, err)
|
return nil, fmt.Errorf("cannot create request from %q: %w", apiURL, err)
|
||||||
}
|
}
|
||||||
req.Header.Set("x-amz-date", amzdate)
|
req.Header.Set("x-amz-date", amzdate)
|
||||||
req.Header.Set("Authorization", authHeader)
|
req.Header.Set("Authorization", authHeader)
|
||||||
|
|
|
@ -36,13 +36,13 @@ func newAPIConfig(sdc *SDConfig) (*apiConfig, error) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
client, err := google.DefaultClient(ctx, "https://www.googleapis.com/auth/compute.readonly")
|
client, err := google.DefaultClient(ctx, "https://www.googleapis.com/auth/compute.readonly")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot create oauth2 client for gce: %s", err)
|
return nil, fmt.Errorf("cannot create oauth2 client for gce: %w", err)
|
||||||
}
|
}
|
||||||
project := sdc.Project
|
project := sdc.Project
|
||||||
if len(project) == 0 {
|
if len(project) == 0 {
|
||||||
proj, err := getCurrentProject()
|
proj, err := getCurrentProject()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot determine the current project; make sure `vmagent` runs inside GCE; error: %s", err)
|
return nil, fmt.Errorf("cannot determine the current project; make sure `vmagent` runs inside GCE; error: %w", err)
|
||||||
}
|
}
|
||||||
project = proj
|
project = proj
|
||||||
logger.Infof("autodetected the current GCE project: %q", project)
|
logger.Infof("autodetected the current GCE project: %q", project)
|
||||||
|
@ -52,7 +52,7 @@ func newAPIConfig(sdc *SDConfig) (*apiConfig, error) {
|
||||||
// Autodetect the current zone.
|
// Autodetect the current zone.
|
||||||
zone, err := getCurrentZone()
|
zone, err := getCurrentZone()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot determine the current zone; make sure `vmagent` runs inside GCE; error: %s", err)
|
return nil, fmt.Errorf("cannot determine the current zone; make sure `vmagent` runs inside GCE; error: %w", err)
|
||||||
}
|
}
|
||||||
zones = append(zones, zone)
|
zones = append(zones, zone)
|
||||||
logger.Infof("autodetected the current GCE zone: %q", zone)
|
logger.Infof("autodetected the current GCE zone: %q", zone)
|
||||||
|
@ -60,7 +60,7 @@ func newAPIConfig(sdc *SDConfig) (*apiConfig, error) {
|
||||||
// Autodetect zones for project.
|
// Autodetect zones for project.
|
||||||
zs, err := getZonesForProject(client, project, sdc.Filter)
|
zs, err := getZonesForProject(client, project, sdc.Filter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot obtain zones for project %q: %s", project, err)
|
return nil, fmt.Errorf("cannot obtain zones for project %q: %w", project, err)
|
||||||
}
|
}
|
||||||
zones = zs
|
zones = zs
|
||||||
logger.Infof("autodetected all the zones for the GCE project %q: %q", project, zones)
|
logger.Infof("autodetected all the zones for the GCE project %q: %q", project, zones)
|
||||||
|
@ -88,7 +88,7 @@ func getAPIResponse(client *http.Client, apiURL, filter, pageToken string) ([]by
|
||||||
apiURL = appendNonEmptyQueryArg(apiURL, "pageToken", pageToken)
|
apiURL = appendNonEmptyQueryArg(apiURL, "pageToken", pageToken)
|
||||||
resp, err := client.Get(apiURL)
|
resp, err := client.Get(apiURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot query %q: %s", apiURL, err)
|
return nil, fmt.Errorf("cannot query %q: %w", apiURL, err)
|
||||||
}
|
}
|
||||||
return readResponseBody(resp, apiURL)
|
return readResponseBody(resp, apiURL)
|
||||||
}
|
}
|
||||||
|
@ -97,7 +97,7 @@ func readResponseBody(resp *http.Response, apiURL string) ([]byte, error) {
|
||||||
data, err := ioutil.ReadAll(resp.Body)
|
data, err := ioutil.ReadAll(resp.Body)
|
||||||
_ = resp.Body.Close()
|
_ = resp.Body.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot read response from %q: %s", apiURL, err)
|
return nil, fmt.Errorf("cannot read response from %q: %w", apiURL, err)
|
||||||
}
|
}
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
return nil, fmt.Errorf("unexpected status code for %q; got %d; want %d; response body: %q",
|
return nil, fmt.Errorf("unexpected status code for %q; got %d; want %d; response body: %q",
|
||||||
|
@ -144,12 +144,12 @@ func getGCEMetadata(path string) ([]byte, error) {
|
||||||
metadataURL := "http://metadata.google.internal/computeMetadata/v1/" + path
|
metadataURL := "http://metadata.google.internal/computeMetadata/v1/" + path
|
||||||
req, err := http.NewRequest("GET", metadataURL, nil)
|
req, err := http.NewRequest("GET", metadataURL, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot create http request for %q: %s", metadataURL, err)
|
return nil, fmt.Errorf("cannot create http request for %q: %w", metadataURL, err)
|
||||||
}
|
}
|
||||||
req.Header.Set("Metadata-Flavor", "Google")
|
req.Header.Set("Metadata-Flavor", "Google")
|
||||||
resp, err := discoveryutils.GetHTTPClient().Do(req)
|
resp, err := discoveryutils.GetHTTPClient().Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot obtain response to %q: %s", metadataURL, err)
|
return nil, fmt.Errorf("cannot obtain response to %q: %w", metadataURL, err)
|
||||||
}
|
}
|
||||||
return readResponseBody(resp, metadataURL)
|
return readResponseBody(resp, metadataURL)
|
||||||
}
|
}
|
||||||
|
|
|
@ -51,7 +51,7 @@ func (z *ZoneYAML) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
func GetLabels(sdc *SDConfig) ([]map[string]string, error) {
|
func GetLabels(sdc *SDConfig) ([]map[string]string, error) {
|
||||||
cfg, err := getAPIConfig(sdc)
|
cfg, err := getAPIConfig(sdc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot get API config: %s", err)
|
return nil, fmt.Errorf("cannot get API config: %w", err)
|
||||||
}
|
}
|
||||||
ms := getInstancesLabels(cfg)
|
ms := getInstancesLabels(cfg)
|
||||||
return ms, nil
|
return ms, nil
|
||||||
|
|
|
@ -58,11 +58,11 @@ func getInstancesForProjectAndZone(client *http.Client, project, zone, filter st
|
||||||
for {
|
for {
|
||||||
data, err := getAPIResponse(client, instsURL, filter, pageToken)
|
data, err := getAPIResponse(client, instsURL, filter, pageToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot obtain instances: %s", err)
|
return nil, fmt.Errorf("cannot obtain instances: %w", err)
|
||||||
}
|
}
|
||||||
il, err := parseInstanceList(data)
|
il, err := parseInstanceList(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse instance list from %q: %s", instsURL, err)
|
return nil, fmt.Errorf("cannot parse instance list from %q: %w", instsURL, err)
|
||||||
}
|
}
|
||||||
insts = append(insts, il.Items...)
|
insts = append(insts, il.Items...)
|
||||||
if len(il.NextPageToken) == 0 {
|
if len(il.NextPageToken) == 0 {
|
||||||
|
@ -125,7 +125,7 @@ type MetadataEntry struct {
|
||||||
func parseInstanceList(data []byte) (*InstanceList, error) {
|
func parseInstanceList(data []byte) (*InstanceList, error) {
|
||||||
var il InstanceList
|
var il InstanceList
|
||||||
if err := json.Unmarshal(data, &il); err != nil {
|
if err := json.Unmarshal(data, &il); err != nil {
|
||||||
return nil, fmt.Errorf("cannot unmarshal InstanceList from %q: %s", data, err)
|
return nil, fmt.Errorf("cannot unmarshal InstanceList from %q: %w", data, err)
|
||||||
}
|
}
|
||||||
return &il, nil
|
return &il, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,11 +14,11 @@ func getZonesForProject(client *http.Client, project, filter string) ([]string,
|
||||||
for {
|
for {
|
||||||
data, err := getAPIResponse(client, zonesURL, filter, pageToken)
|
data, err := getAPIResponse(client, zonesURL, filter, pageToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot obtain zones: %s", err)
|
return nil, fmt.Errorf("cannot obtain zones: %w", err)
|
||||||
}
|
}
|
||||||
zl, err := parseZoneList(data)
|
zl, err := parseZoneList(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse zone list from %q: %s", zonesURL, err)
|
return nil, fmt.Errorf("cannot parse zone list from %q: %w", zonesURL, err)
|
||||||
}
|
}
|
||||||
for _, z := range zl.Items {
|
for _, z := range zl.Items {
|
||||||
zones = append(zones, z.Name)
|
zones = append(zones, z.Name)
|
||||||
|
@ -45,7 +45,7 @@ type Zone struct {
|
||||||
func parseZoneList(data []byte) (*ZoneList, error) {
|
func parseZoneList(data []byte) (*ZoneList, error) {
|
||||||
var zl ZoneList
|
var zl ZoneList
|
||||||
if err := json.Unmarshal(data, &zl); err != nil {
|
if err := json.Unmarshal(data, &zl); err != nil {
|
||||||
return nil, fmt.Errorf("cannot unmarshal ZoneList from %q: %s", data, err)
|
return nil, fmt.Errorf("cannot unmarshal ZoneList from %q: %w", data, err)
|
||||||
}
|
}
|
||||||
return &zl, nil
|
return &zl, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,7 +29,7 @@ func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
||||||
func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
||||||
ac, err := promauth.NewConfig(baseDir, sdc.BasicAuth, sdc.BearerToken, sdc.BearerTokenFile, sdc.TLSConfig)
|
ac, err := promauth.NewConfig(baseDir, sdc.BasicAuth, sdc.BearerToken, sdc.BearerTokenFile, sdc.TLSConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse auth config: %s", err)
|
return nil, fmt.Errorf("cannot parse auth config: %w", err)
|
||||||
}
|
}
|
||||||
apiServer := sdc.APIServer
|
apiServer := sdc.APIServer
|
||||||
if len(apiServer) == 0 {
|
if len(apiServer) == 0 {
|
||||||
|
@ -52,13 +52,13 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
||||||
}
|
}
|
||||||
acNew, err := promauth.NewConfig(".", nil, "", "/var/run/secrets/kubernetes.io/serviceaccount/token", &tlsConfig)
|
acNew, err := promauth.NewConfig(".", nil, "", "/var/run/secrets/kubernetes.io/serviceaccount/token", &tlsConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot initialize service account auth: %s; probably, `kubernetes_sd_config->api_server` is missing in Prometheus configs?", err)
|
return nil, fmt.Errorf("cannot initialize service account auth: %w; probably, `kubernetes_sd_config->api_server` is missing in Prometheus configs?", err)
|
||||||
}
|
}
|
||||||
ac = acNew
|
ac = acNew
|
||||||
}
|
}
|
||||||
client, err := discoveryutils.NewClient(apiServer, ac)
|
client, err := discoveryutils.NewClient(apiServer, ac)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot create HTTP client for %q: %s", apiServer, err)
|
return nil, fmt.Errorf("cannot create HTTP client for %q: %w", apiServer, err)
|
||||||
}
|
}
|
||||||
cfg := &apiConfig{
|
cfg := &apiConfig{
|
||||||
client: client,
|
client: client,
|
||||||
|
|
|
@ -53,11 +53,11 @@ func getEndpoints(cfg *apiConfig) ([]Endpoints, error) {
|
||||||
func getEndpointsByPath(cfg *apiConfig, path string) ([]Endpoints, error) {
|
func getEndpointsByPath(cfg *apiConfig, path string) ([]Endpoints, error) {
|
||||||
data, err := getAPIResponse(cfg, "endpoints", path)
|
data, err := getAPIResponse(cfg, "endpoints", path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot obtain endpoints data from API server: %s", err)
|
return nil, fmt.Errorf("cannot obtain endpoints data from API server: %w", err)
|
||||||
}
|
}
|
||||||
epl, err := parseEndpointsList(data)
|
epl, err := parseEndpointsList(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse endpoints response from API server: %s", err)
|
return nil, fmt.Errorf("cannot parse endpoints response from API server: %w", err)
|
||||||
}
|
}
|
||||||
return epl.Items, nil
|
return epl.Items, nil
|
||||||
}
|
}
|
||||||
|
@ -119,7 +119,7 @@ type EndpointPort struct {
|
||||||
func parseEndpointsList(data []byte) (*EndpointsList, error) {
|
func parseEndpointsList(data []byte) (*EndpointsList, error) {
|
||||||
var esl EndpointsList
|
var esl EndpointsList
|
||||||
if err := json.Unmarshal(data, &esl); err != nil {
|
if err := json.Unmarshal(data, &esl); err != nil {
|
||||||
return nil, fmt.Errorf("cannot unmarshal EndpointsList from %q: %s", data, err)
|
return nil, fmt.Errorf("cannot unmarshal EndpointsList from %q: %w", data, err)
|
||||||
}
|
}
|
||||||
return &esl, nil
|
return &esl, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,11 +43,11 @@ func getIngresses(cfg *apiConfig) ([]Ingress, error) {
|
||||||
func getIngressesByPath(cfg *apiConfig, path string) ([]Ingress, error) {
|
func getIngressesByPath(cfg *apiConfig, path string) ([]Ingress, error) {
|
||||||
data, err := getAPIResponse(cfg, "ingress", path)
|
data, err := getAPIResponse(cfg, "ingress", path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot obtain ingresses data from API server: %s", err)
|
return nil, fmt.Errorf("cannot obtain ingresses data from API server: %w", err)
|
||||||
}
|
}
|
||||||
igl, err := parseIngressList(data)
|
igl, err := parseIngressList(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse ingresses response from API server: %s", err)
|
return nil, fmt.Errorf("cannot parse ingresses response from API server: %w", err)
|
||||||
}
|
}
|
||||||
return igl.Items, nil
|
return igl.Items, nil
|
||||||
}
|
}
|
||||||
|
@ -108,7 +108,7 @@ type HTTPIngressPath struct {
|
||||||
func parseIngressList(data []byte) (*IngressList, error) {
|
func parseIngressList(data []byte) (*IngressList, error) {
|
||||||
var il IngressList
|
var il IngressList
|
||||||
if err := json.Unmarshal(data, &il); err != nil {
|
if err := json.Unmarshal(data, &il); err != nil {
|
||||||
return nil, fmt.Errorf("cannot unmarshal IngressList from %q: %s", data, err)
|
return nil, fmt.Errorf("cannot unmarshal IngressList from %q: %w", data, err)
|
||||||
}
|
}
|
||||||
return &il, nil
|
return &il, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,7 +39,7 @@ type Selector struct {
|
||||||
func GetLabels(sdc *SDConfig, baseDir string) ([]map[string]string, error) {
|
func GetLabels(sdc *SDConfig, baseDir string) ([]map[string]string, error) {
|
||||||
cfg, err := getAPIConfig(sdc, baseDir)
|
cfg, err := getAPIConfig(sdc, baseDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot create API config: %s", err)
|
return nil, fmt.Errorf("cannot create API config: %w", err)
|
||||||
}
|
}
|
||||||
switch sdc.Role {
|
switch sdc.Role {
|
||||||
case "node":
|
case "node":
|
||||||
|
|
|
@ -11,11 +11,11 @@ import (
|
||||||
func getNodesLabels(cfg *apiConfig) ([]map[string]string, error) {
|
func getNodesLabels(cfg *apiConfig) ([]map[string]string, error) {
|
||||||
data, err := getAPIResponse(cfg, "node", "/api/v1/nodes")
|
data, err := getAPIResponse(cfg, "node", "/api/v1/nodes")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot obtain nodes data from API server: %s", err)
|
return nil, fmt.Errorf("cannot obtain nodes data from API server: %w", err)
|
||||||
}
|
}
|
||||||
nl, err := parseNodeList(data)
|
nl, err := parseNodeList(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse nodes response from API server: %s", err)
|
return nil, fmt.Errorf("cannot parse nodes response from API server: %w", err)
|
||||||
}
|
}
|
||||||
var ms []map[string]string
|
var ms []map[string]string
|
||||||
for _, n := range nl.Items {
|
for _, n := range nl.Items {
|
||||||
|
@ -67,7 +67,7 @@ type NodeDaemonEndpoints struct {
|
||||||
func parseNodeList(data []byte) (*NodeList, error) {
|
func parseNodeList(data []byte) (*NodeList, error) {
|
||||||
var nl NodeList
|
var nl NodeList
|
||||||
if err := json.Unmarshal(data, &nl); err != nil {
|
if err := json.Unmarshal(data, &nl); err != nil {
|
||||||
return nil, fmt.Errorf("cannot unmarshal NodeList from %q: %s", data, err)
|
return nil, fmt.Errorf("cannot unmarshal NodeList from %q: %w", data, err)
|
||||||
}
|
}
|
||||||
return &nl, nil
|
return &nl, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,11 +47,11 @@ func getPods(cfg *apiConfig) ([]Pod, error) {
|
||||||
func getPodsByPath(cfg *apiConfig, path string) ([]Pod, error) {
|
func getPodsByPath(cfg *apiConfig, path string) ([]Pod, error) {
|
||||||
data, err := getAPIResponse(cfg, "pod", path)
|
data, err := getAPIResponse(cfg, "pod", path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot obtain pods data from API server: %s", err)
|
return nil, fmt.Errorf("cannot obtain pods data from API server: %w", err)
|
||||||
}
|
}
|
||||||
pl, err := parsePodList(data)
|
pl, err := parsePodList(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse pods response from API server: %s", err)
|
return nil, fmt.Errorf("cannot parse pods response from API server: %w", err)
|
||||||
}
|
}
|
||||||
return pl.Items, nil
|
return pl.Items, nil
|
||||||
}
|
}
|
||||||
|
@ -118,7 +118,7 @@ type PodCondition struct {
|
||||||
func parsePodList(data []byte) (*PodList, error) {
|
func parsePodList(data []byte) (*PodList, error) {
|
||||||
var pl PodList
|
var pl PodList
|
||||||
if err := json.Unmarshal(data, &pl); err != nil {
|
if err := json.Unmarshal(data, &pl); err != nil {
|
||||||
return nil, fmt.Errorf("cannot unmarshal PodList from %q: %s", data, err)
|
return nil, fmt.Errorf("cannot unmarshal PodList from %q: %w", data, err)
|
||||||
}
|
}
|
||||||
return &pl, nil
|
return &pl, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,11 +45,11 @@ func getServices(cfg *apiConfig) ([]Service, error) {
|
||||||
func getServicesByPath(cfg *apiConfig, path string) ([]Service, error) {
|
func getServicesByPath(cfg *apiConfig, path string) ([]Service, error) {
|
||||||
data, err := getAPIResponse(cfg, "service", path)
|
data, err := getAPIResponse(cfg, "service", path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot obtain services data from API server: %s", err)
|
return nil, fmt.Errorf("cannot obtain services data from API server: %w", err)
|
||||||
}
|
}
|
||||||
sl, err := parseServiceList(data)
|
sl, err := parseServiceList(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse services response from API server: %s", err)
|
return nil, fmt.Errorf("cannot parse services response from API server: %w", err)
|
||||||
}
|
}
|
||||||
return sl.Items, nil
|
return sl.Items, nil
|
||||||
}
|
}
|
||||||
|
@ -92,7 +92,7 @@ type ServicePort struct {
|
||||||
func parseServiceList(data []byte) (*ServiceList, error) {
|
func parseServiceList(data []byte) (*ServiceList, error) {
|
||||||
var sl ServiceList
|
var sl ServiceList
|
||||||
if err := json.Unmarshal(data, &sl); err != nil {
|
if err := json.Unmarshal(data, &sl); err != nil {
|
||||||
return nil, fmt.Errorf("cannot unmarshal ServiceList from %q: %s", data, err)
|
return nil, fmt.Errorf("cannot unmarshal ServiceList from %q: %w", data, err)
|
||||||
}
|
}
|
||||||
return &sl, nil
|
return &sl, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -112,13 +112,13 @@ func (c *Client) GetAPIResponse(path string) ([]byte, error) {
|
||||||
var resp fasthttp.Response
|
var resp fasthttp.Response
|
||||||
// There is no need in calling DoTimeout, since the timeout is already set in c.hc.ReadTimeout above.
|
// There is no need in calling DoTimeout, since the timeout is already set in c.hc.ReadTimeout above.
|
||||||
if err := c.hc.Do(&req, &resp); err != nil {
|
if err := c.hc.Do(&req, &resp); err != nil {
|
||||||
return nil, fmt.Errorf("cannot fetch %q: %s", requestURL, err)
|
return nil, fmt.Errorf("cannot fetch %q: %w", requestURL, err)
|
||||||
}
|
}
|
||||||
var data []byte
|
var data []byte
|
||||||
if ce := resp.Header.Peek("Content-Encoding"); string(ce) == "gzip" {
|
if ce := resp.Header.Peek("Content-Encoding"); string(ce) == "gzip" {
|
||||||
dst, err := fasthttp.AppendGunzipBytes(nil, resp.Body())
|
dst, err := fasthttp.AppendGunzipBytes(nil, resp.Body())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot ungzip response from %q: %s", requestURL, err)
|
return nil, fmt.Errorf("cannot ungzip response from %q: %w", requestURL, err)
|
||||||
}
|
}
|
||||||
data = dst
|
data = dst
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -32,7 +32,7 @@ func TestScrapeWorkScrapeInternalFailure(t *testing.T) {
|
||||||
var pushDataErr error
|
var pushDataErr error
|
||||||
sw.PushData = func(wr *prompbmarshal.WriteRequest) {
|
sw.PushData = func(wr *prompbmarshal.WriteRequest) {
|
||||||
if err := expectEqualTimeseries(wr.Timeseries, timeseriesExpected); err != nil {
|
if err := expectEqualTimeseries(wr.Timeseries, timeseriesExpected); err != nil {
|
||||||
pushDataErr = fmt.Errorf("unexpected data pushed: %s\ngot\n%#v\nwant\n%#v", err, wr.Timeseries, timeseriesExpected)
|
pushDataErr = fmt.Errorf("unexpected data pushed: %w\ngot\n%#v\nwant\n%#v", err, wr.Timeseries, timeseriesExpected)
|
||||||
}
|
}
|
||||||
pushDataCalls++
|
pushDataCalls++
|
||||||
}
|
}
|
||||||
|
@ -72,7 +72,7 @@ func TestScrapeWorkScrapeInternalSuccess(t *testing.T) {
|
||||||
var pushDataErr error
|
var pushDataErr error
|
||||||
sw.PushData = func(wr *prompbmarshal.WriteRequest) {
|
sw.PushData = func(wr *prompbmarshal.WriteRequest) {
|
||||||
if err := expectEqualTimeseries(wr.Timeseries, timeseriesExpected); err != nil {
|
if err := expectEqualTimeseries(wr.Timeseries, timeseriesExpected); err != nil {
|
||||||
pushDataErr = fmt.Errorf("unexpected data pushed: %s\ngot\n%#v\nwant\n%#v", err, wr.Timeseries, timeseriesExpected)
|
pushDataErr = fmt.Errorf("unexpected data pushed: %w\ngot\n%#v\nwant\n%#v", err, wr.Timeseries, timeseriesExpected)
|
||||||
}
|
}
|
||||||
pushDataCalls++
|
pushDataCalls++
|
||||||
}
|
}
|
||||||
|
@ -336,11 +336,11 @@ func parseData(data string) []prompbmarshal.TimeSeries {
|
||||||
func expectEqualTimeseries(tss, tssExpected []prompbmarshal.TimeSeries) error {
|
func expectEqualTimeseries(tss, tssExpected []prompbmarshal.TimeSeries) error {
|
||||||
m, err := timeseriesToMap(tss)
|
m, err := timeseriesToMap(tss)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("invalid generated timeseries: %s", err)
|
return fmt.Errorf("invalid generated timeseries: %w", err)
|
||||||
}
|
}
|
||||||
mExpected, err := timeseriesToMap(tssExpected)
|
mExpected, err := timeseriesToMap(tssExpected)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("invalid expected timeseries: %s", err)
|
return fmt.Errorf("invalid expected timeseries: %w", err)
|
||||||
}
|
}
|
||||||
if len(m) != len(mExpected) {
|
if len(m) != len(mExpected) {
|
||||||
return fmt.Errorf("unexpected time series len; got %d; want %d", len(m), len(mExpected))
|
return fmt.Errorf("unexpected time series len; got %d; want %d", len(m), len(mExpected))
|
||||||
|
|
|
@ -42,7 +42,7 @@ vm_tcplistener_write_calls_total{name="https", addr=":443"} 132356
|
||||||
timestamp := int64(0)
|
timestamp := int64(0)
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
if err := sw.scrapeInternal(timestamp); err != nil {
|
if err := sw.scrapeInternal(timestamp); err != nil {
|
||||||
panic(fmt.Errorf("unexpected error: %s", err))
|
panic(fmt.Errorf("unexpected error: %w", err))
|
||||||
}
|
}
|
||||||
timestamp++
|
timestamp++
|
||||||
}
|
}
|
||||||
|
|
|
@ -63,7 +63,7 @@ func ParseColumnDescriptors(s string) ([]ColumnDescriptor, error) {
|
||||||
}
|
}
|
||||||
pos, err := strconv.Atoi(a[0])
|
pos, err := strconv.Atoi(a[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse <column_pos> part from the entry #%d %q: %s", i+1, col, err)
|
return nil, fmt.Errorf("cannot parse <column_pos> part from the entry #%d %q: %w", i+1, col, err)
|
||||||
}
|
}
|
||||||
if pos <= 0 {
|
if pos <= 0 {
|
||||||
return nil, fmt.Errorf("<column_pos> cannot be smaller than 1; got %d for entry #%d %q", pos, i+1, col)
|
return nil, fmt.Errorf("<column_pos> cannot be smaller than 1; got %d for entry #%d %q", pos, i+1, col)
|
||||||
|
@ -82,7 +82,7 @@ func ParseColumnDescriptors(s string) ([]ColumnDescriptor, error) {
|
||||||
}
|
}
|
||||||
parseTimestamp, err := parseTimeFormat(a[2])
|
parseTimestamp, err := parseTimeFormat(a[2])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot parse time format from the entry #%d %q: %s", i+1, col, err)
|
return nil, fmt.Errorf("cannot parse time format from the entry #%d %q: %w", i+1, col, err)
|
||||||
}
|
}
|
||||||
cd.ParseTimestamp = parseTimestamp
|
cd.ParseTimestamp = parseTimestamp
|
||||||
hasTimeCol = true
|
hasTimeCol = true
|
||||||
|
@ -156,7 +156,7 @@ func parseUnixTimestampNanoseconds(s string) (int64, error) {
|
||||||
func parseRFC3339(s string) (int64, error) {
|
func parseRFC3339(s string) (int64, error) {
|
||||||
t, err := time.Parse(time.RFC3339, s)
|
t, err := time.Parse(time.RFC3339, s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("cannot parse time in RFC3339 from %q: %s", s, err)
|
return 0, fmt.Errorf("cannot parse time in RFC3339 from %q: %w", s, err)
|
||||||
}
|
}
|
||||||
return t.UnixNano() / 1e6, nil
|
return t.UnixNano() / 1e6, nil
|
||||||
}
|
}
|
||||||
|
@ -165,7 +165,7 @@ func newParseCustomTimeFunc(format string) func(s string) (int64, error) {
|
||||||
return func(s string) (int64, error) {
|
return func(s string) (int64, error) {
|
||||||
t, err := time.Parse(format, s)
|
t, err := time.Parse(format, s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("cannot parse time in custom format %q from %q: %s", format, s, err)
|
return 0, fmt.Errorf("cannot parse time in custom format %q from %q: %w", format, s, err)
|
||||||
}
|
}
|
||||||
return t.UnixNano() / 1e6, nil
|
return t.UnixNano() / 1e6, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -91,7 +91,7 @@ func parseRows(sc *scanner, dst []Row, tags []Tag, metrics []metric, cds []Colum
|
||||||
if parseTimestamp := cd.ParseTimestamp; parseTimestamp != nil {
|
if parseTimestamp := cd.ParseTimestamp; parseTimestamp != nil {
|
||||||
timestamp, err := parseTimestamp(sc.Column)
|
timestamp, err := parseTimestamp(sc.Column)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
sc.Error = fmt.Errorf("cannot parse timestamp from %q: %s", sc.Column, err)
|
sc.Error = fmt.Errorf("cannot parse timestamp from %q: %w", sc.Column, err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
r.Timestamp = timestamp
|
r.Timestamp = timestamp
|
||||||
|
|
|
@ -30,13 +30,13 @@ func ParseStream(req *http.Request, callback func(rows []Row) error) error {
|
||||||
format := q.Get("format")
|
format := q.Get("format")
|
||||||
cds, err := ParseColumnDescriptors(format)
|
cds, err := ParseColumnDescriptors(format)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot parse the provided csv format: %s", err)
|
return fmt.Errorf("cannot parse the provided csv format: %w", err)
|
||||||
}
|
}
|
||||||
r := req.Body
|
r := req.Body
|
||||||
if req.Header.Get("Content-Encoding") == "gzip" {
|
if req.Header.Get("Content-Encoding") == "gzip" {
|
||||||
zr, err := common.GetGzipReader(r)
|
zr, err := common.GetGzipReader(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot read gzipped csv data: %s", err)
|
return fmt.Errorf("cannot read gzipped csv data: %w", err)
|
||||||
}
|
}
|
||||||
defer common.PutGzipReader(zr)
|
defer common.PutGzipReader(zr)
|
||||||
r = zr
|
r = zr
|
||||||
|
@ -60,7 +60,7 @@ func (ctx *streamContext) Read(r io.Reader, cds []ColumnDescriptor) bool {
|
||||||
if ctx.err != nil {
|
if ctx.err != nil {
|
||||||
if ctx.err != io.EOF {
|
if ctx.err != io.EOF {
|
||||||
readErrors.Inc()
|
readErrors.Inc()
|
||||||
ctx.err = fmt.Errorf("cannot read csv data: %s", ctx.err)
|
ctx.err = fmt.Errorf("cannot read csv data: %w", ctx.err)
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
|
@ -75,7 +75,7 @@ func (r *Row) unmarshal(s string, tagsPool []Tag) ([]Tag, error) {
|
||||||
var err error
|
var err error
|
||||||
tagsPool, err = unmarshalTags(tagsPool, metricAndTags[n+1:])
|
tagsPool, err = unmarshalTags(tagsPool, metricAndTags[n+1:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return tagsPool, fmt.Errorf("cannot umarshal tags: %s", err)
|
return tagsPool, fmt.Errorf("cannot umarshal tags: %w", err)
|
||||||
}
|
}
|
||||||
tags := tagsPool[tagsStart:]
|
tags := tagsPool[tagsStart:]
|
||||||
r.Tags = tags[:len(tags):len(tags)]
|
r.Tags = tags[:len(tags):len(tags)]
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue