Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files

This commit is contained in:
Aliaksandr Valialkin 2022-02-02 23:58:11 +02:00
commit 78b028064f
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
128 changed files with 6910 additions and 2532 deletions

View file

@ -267,7 +267,7 @@ golangci-lint: install-golangci-lint
golangci-lint run --exclude '(SA4003|SA1019|SA5011):' -D errcheck -D structcheck --timeout 2m
install-golangci-lint:
which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.43.0
which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.44.0
install-wwhrd:
which wwhrd || GO111MODULE=off go get github.com/frapposelli/wwhrd

View file

@ -70,6 +70,13 @@ run-vmalert: vmalert
-evaluationInterval=3s \
-rule.configCheckInterval=10s
run-vmalert-sd: vmalert
./bin/vmalert -rule=app/vmalert/config/testdata/rules2-good.rules \
-datasource.url=http://localhost:8428 \
-remoteWrite.url=http://localhost:8428 \
-notifier.config=app/vmalert/notifier/testdata/consul.good.yaml \
-configCheckInterval=10s
replay-vmalert: vmalert
./bin/vmalert -rule=app/vmalert/config/testdata/rules-replay-good.rules \
-datasource.url=http://localhost:8428 \

View file

@ -43,7 +43,8 @@ To start using `vmalert` you will need the following things:
* list of rules - PromQL/MetricsQL expressions to execute;
* datasource address - reachable MetricsQL endpoint to run queries against;
* notifier address [optional] - reachable [Alert Manager](https://github.com/prometheus/alertmanager) instance for processing,
aggregating alerts, and sending notifications.
aggregating alerts, and sending notifications. Please note, notifier address also supports Consul Service Discovery via
[config file](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmalert/notifier/config.go).
* remote write address [optional] - [remote write](https://prometheus.io/docs/prometheus/latest/storage/#remote-storage-integrations)
compatible storage to persist rules and alerts state info;
* remote read address [optional] - MetricsQL compatible datasource to restore alerts state from.
@ -587,6 +588,9 @@ The shortlist of configuration flags is the following:
-notifier.basicAuth.password array
Optional basic auth password for -notifier.url
Supports an array of values separated by comma or specified via multiple flags.
-notifier.basicAuth.passwordFile array
Optional path to basic auth password file for -notifier.url
Supports an array of values separated by comma or specified via multiple flags.
-notifier.basicAuth.username array
Optional basic auth username for -notifier.url
Supports an array of values separated by comma or specified via multiple flags.
@ -689,8 +693,8 @@ The shortlist of configuration flags is the following:
absolute path to all .yaml files in root.
Rule files may contain %{ENV_VAR} placeholders, which are substituted by the corresponding env vars.
Supports an array of values separated by comma or specified via multiple flags.
-rule.configCheckInterval duration
Interval for checking for changes in '-rule' files. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes
-configCheckInterval duration
Interval for checking for changes in '-rule' or '-notifier.config' files. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes
-rule.maxResolveDuration duration
Limits the maximum duration for automatic alert expiration, which is by default equal to 3 evaluation intervals of the parent group.
-rule.validateExpressions
@ -703,6 +707,14 @@ The shortlist of configuration flags is the following:
Path to file with TLS certificate. Used only if -tls is set. Prefer ECDSA certs instead of RSA certs as RSA certs are slower
-tlsKeyFile string
Path to file with TLS key. Used only if -tls is set
-promscrape.consul.waitTime duration
Wait time used by Consul service discovery. Default value is used if not set
-promscrape.consulSDCheckInterval duration
Interval for checking for changes in Consul. This works only if consul_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config for details (default 30s)
-promscrape.discovery.concurrency int
The maximum number of concurrent requests to Prometheus autodiscovery API (Consul, Kubernetes, etc.) (default 100)
-promscrape.discovery.concurrentWaitTime duration
The maximum duration for waiting to perform API requests if more than -promscrape.discovery.concurrency requests are simultaneously performed (default 1m0s)
-version
Show VictoriaMetrics version
```
@ -711,7 +723,7 @@ The shortlist of configuration flags is the following:
`vmalert` supports "hot" config reload via the following methods:
* send SIGHUP signal to `vmalert` process;
* send GET request to `/-/reload` endpoint;
* configure `-rule.configCheckInterval` flag for periodic reload
* configure `-configCheckInterval` flag for periodic reload
on config change.
### URL params
@ -732,6 +744,88 @@ Please note, `params` are used only for executing rules expressions (requests to
If there would be a conflict between URL params set in `datasource.url` flag and params in group definition
the latter will have higher priority.
### Notifier configuration file
Notifier also supports configuration via file specified with flag `notifier.config`:
```
./bin/vmalert -rule=app/vmalert/config/testdata/rules.good.rules \
-datasource.url=http://localhost:8428 \
-notifier.config=app/vmalert/notifier/testdata/consul.good.yaml
```
The configuration file allows to configure static notifiers or discover notifiers via
[Consul](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config).
For example:
```
static_configs:
- targets:
- localhost:9093
- localhost:9095
consul_sd_configs:
- server: localhost:8500
services:
- alertmanager
```
The list of configured or discovered Notifiers can be explored via [UI](#Web).
The configuration file [specification](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmalert/notifier/config.go)
is the following:
```
# Per-target Notifier timeout when pushing alerts.
[ timeout: <duration> | default = 10s ]
# Prefix for the HTTP path alerts are pushed to.
[ path_prefix: <path> | default = / ]
# Configures the protocol scheme used for requests.
[ scheme: <scheme> | default = http ]
# Sets the `Authorization` header on every request with the
# configured username and password.
# password and password_file are mutually exclusive.
basic_auth:
[ username: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
# Optional `Authorization` header configuration.
authorization:
# Sets the authentication type.
[ type: <string> | default: Bearer ]
# Sets the credentials. It is mutually exclusive with
# `credentials_file`.
[ credentials: <secret> ]
# Sets the credentials to the credentials read from the configured file.
# It is mutually exclusive with `credentials`.
[ credentials_file: <filename> ]
# Configures the scrape request's TLS settings.
# see https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tls_config
tls_config:
[ <tls_config> ]
# List of labeled statically configured Notifiers.
static_configs:
targets:
[ - '<host>' ]
# List of Consul service discovery configurations.
# See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config
consul_sd_configs:
[ - <consul_sd_config> ... ]
# List of relabel configurations.
# Supports the same relabeling features as the rest of VictoriaMetrics components.
# See https://docs.victoriametrics.com/vmagent.html#relabeling
relabel_configs:
[ - <relabel_config> ... ]
```
The configuration file can be [hot-reloaded](#hot-config-reload).
## Contributing

View file

@ -12,9 +12,9 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/metrics"
)
// AlertingRule is basic alert entity
@ -50,10 +50,10 @@ type AlertingRule struct {
}
type alertingRuleMetrics struct {
errors *gauge
pending *gauge
active *gauge
samples *gauge
errors *utils.Gauge
pending *utils.Gauge
active *utils.Gauge
samples *utils.Gauge
}
func newAlertingRule(qb datasource.QuerierBuilder, group *Group, cfg config.Rule) *AlertingRule {
@ -78,7 +78,7 @@ func newAlertingRule(qb datasource.QuerierBuilder, group *Group, cfg config.Rule
}
labels := fmt.Sprintf(`alertname=%q, group=%q, id="%d"`, ar.Name, group.Name, ar.ID())
ar.metrics.pending = getOrCreateGauge(fmt.Sprintf(`vmalert_alerts_pending{%s}`, labels),
ar.metrics.pending = utils.GetOrCreateGauge(fmt.Sprintf(`vmalert_alerts_pending{%s}`, labels),
func() float64 {
ar.mu.RLock()
defer ar.mu.RUnlock()
@ -90,7 +90,7 @@ func newAlertingRule(qb datasource.QuerierBuilder, group *Group, cfg config.Rule
}
return float64(num)
})
ar.metrics.active = getOrCreateGauge(fmt.Sprintf(`vmalert_alerts_firing{%s}`, labels),
ar.metrics.active = utils.GetOrCreateGauge(fmt.Sprintf(`vmalert_alerts_firing{%s}`, labels),
func() float64 {
ar.mu.RLock()
defer ar.mu.RUnlock()
@ -102,7 +102,7 @@ func newAlertingRule(qb datasource.QuerierBuilder, group *Group, cfg config.Rule
}
return float64(num)
})
ar.metrics.errors = getOrCreateGauge(fmt.Sprintf(`vmalert_alerting_rules_error{%s}`, labels),
ar.metrics.errors = utils.GetOrCreateGauge(fmt.Sprintf(`vmalert_alerting_rules_error{%s}`, labels),
func() float64 {
ar.mu.RLock()
defer ar.mu.RUnlock()
@ -111,7 +111,7 @@ func newAlertingRule(qb datasource.QuerierBuilder, group *Group, cfg config.Rule
}
return 1
})
ar.metrics.samples = getOrCreateGauge(fmt.Sprintf(`vmalert_alerting_rules_last_evaluation_samples{%s}`, labels),
ar.metrics.samples = utils.GetOrCreateGauge(fmt.Sprintf(`vmalert_alerting_rules_last_evaluation_samples{%s}`, labels),
func() float64 {
ar.mu.RLock()
defer ar.mu.RUnlock()
@ -122,10 +122,10 @@ func newAlertingRule(qb datasource.QuerierBuilder, group *Group, cfg config.Rule
// Close unregisters rule metrics
func (ar *AlertingRule) Close() {
metrics.UnregisterMetric(ar.metrics.active.name)
metrics.UnregisterMetric(ar.metrics.pending.name)
metrics.UnregisterMetric(ar.metrics.errors.name)
metrics.UnregisterMetric(ar.metrics.samples.name)
ar.metrics.active.Unregister()
ar.metrics.pending.Unregister()
ar.metrics.errors.Unregister()
ar.metrics.samples.Unregister()
}
// String implements Stringer interface
@ -153,7 +153,7 @@ func (ar *AlertingRule) ExecRange(ctx context.Context, start, end time.Time) ([]
return nil, fmt.Errorf("`query` template isn't supported in replay mode")
}
for _, s := range series {
// set additional labels to identify group and rule name
// set additional labels to identify group and rule Name
if ar.Name != "" {
s.SetLabel(alertNameLabel, ar.Name)
}

View file

@ -41,15 +41,15 @@ type Group struct {
}
type groupMetrics struct {
iterationTotal *counter
iterationDuration *summary
iterationTotal *utils.Counter
iterationDuration *utils.Summary
}
func newGroupMetrics(name, file string) *groupMetrics {
m := &groupMetrics{}
labels := fmt.Sprintf(`group=%q, file=%q`, name, file)
m.iterationTotal = getOrCreateCounter(fmt.Sprintf(`vmalert_iteration_total{%s}`, labels))
m.iterationDuration = getOrCreateSummary(fmt.Sprintf(`vmalert_iteration_duration_seconds{%s}`, labels))
m.iterationTotal = utils.GetOrCreateCounter(fmt.Sprintf(`vmalert_iteration_total{%s}`, labels))
m.iterationDuration = utils.GetOrCreateSummary(fmt.Sprintf(`vmalert_iteration_duration_seconds{%s}`, labels))
return m
}
@ -122,7 +122,7 @@ func (g *Group) newRule(qb datasource.QuerierBuilder, rule config.Rule) Rule {
}
// ID return unique group ID that consists of
// rules file and group name
// rules file and group Name
func (g *Group) ID() uint64 {
g.mu.RLock()
defer g.mu.RUnlock()
@ -213,8 +213,8 @@ func (g *Group) close() {
close(g.doneCh)
<-g.finishedCh
metrics.UnregisterMetric(g.metrics.iterationDuration.name)
metrics.UnregisterMetric(g.metrics.iterationTotal.name)
g.metrics.iterationDuration.Unregister()
g.metrics.iterationTotal.Unregister()
for _, rule := range g.Rules {
rule.Close()
}
@ -222,7 +222,7 @@ func (g *Group) close() {
var skipRandSleepOnGroupStart bool
func (g *Group) start(ctx context.Context, nts []notifier.Notifier, rw *remotewrite.Client) {
func (g *Group) start(ctx context.Context, nts func() []notifier.Notifier, rw *remotewrite.Client) {
defer func() { close(g.finishedCh) }()
// Spread group rules evaluation over time in order to reduce load on VictoriaMetrics.
@ -246,16 +246,7 @@ func (g *Group) start(ctx context.Context, nts []notifier.Notifier, rw *remotewr
}
logger.Infof("group %q started; interval=%v; concurrency=%d", g.Name, g.Interval, g.Concurrency)
e := &executor{rw: rw}
for _, nt := range nts {
ent := eNotifier{
Notifier: nt,
alertsSent: getOrCreateCounter(fmt.Sprintf("vmalert_alerts_sent_total{addr=%q}", nt.Addr())),
alertsSendErrors: getOrCreateCounter(fmt.Sprintf("vmalert_alerts_send_errors_total{addr=%q}", nt.Addr())),
}
e.notifiers = append(e.notifiers, ent)
}
e := &executor{rw: rw, notifiers: nts}
t := time.NewTicker(g.Interval)
defer t.Stop()
for {
@ -310,16 +301,10 @@ func getResolveDuration(groupInterval time.Duration) time.Duration {
}
type executor struct {
notifiers []eNotifier
notifiers func() []notifier.Notifier
rw *remotewrite.Client
}
type eNotifier struct {
notifier.Notifier
alertsSent *counter
alertsSendErrors *counter
}
func (e *executor) execConcurrently(ctx context.Context, rules []Rule, concurrency int, resolveDuration time.Duration) chan error {
res := make(chan error, len(rules))
if concurrency == 1 {
@ -400,11 +385,9 @@ func (e *executor) exec(ctx context.Context, rule Rule, resolveDuration time.Dur
}
errGr := new(utils.ErrGroup)
for _, nt := range e.notifiers {
nt.alertsSent.Add(len(alerts))
for _, nt := range e.notifiers() {
if err := nt.Send(ctx, alerts); err != nil {
nt.alertsSendErrors.Inc()
errGr.Add(fmt.Errorf("rule %q: failed to send alerts: %w", rule, err))
errGr.Add(fmt.Errorf("rule %q: failed to send alerts to addr %q: %w", rule, nt.Addr(), err))
}
}
return errGr.Err()

View file

@ -212,7 +212,7 @@ func TestGroupStart(t *testing.T) {
fs.add(m1)
fs.add(m2)
go func() {
g.start(context.Background(), []notifier.Notifier{fn}, nil)
g.start(context.Background(), func() []notifier.Notifier { return []notifier.Notifier{fn} }, nil)
close(finished)
}()

View file

@ -63,6 +63,7 @@ type fakeNotifier struct {
alerts []notifier.Alert
}
func (*fakeNotifier) Close() {}
func (*fakeNotifier) Addr() string { return "" }
func (fn *fakeNotifier) Send(_ context.Context, alerts []notifier.Alert) error {
fn.Lock()

View file

@ -35,7 +35,10 @@ absolute path to all .yaml files in root.
Rule files may contain %{ENV_VAR} placeholders, which are substituted by the corresponding env vars.`)
rulesCheckInterval = flag.Duration("rule.configCheckInterval", 0, "Interval for checking for changes in '-rule' files. "+
"By default the checking is disabled. Send SIGHUP signal in order to force config check for changes")
"By default the checking is disabled. Send SIGHUP signal in order to force config check for changes. DEPRECATED - see '-configCheckInterval' instead")
configCheckInterval = flag.Duration("configCheckInterval", 0, "Interval for checking for changes in '-rule' or '-notifier.config' files. "+
"By default the checking is disabled. Send SIGHUP signal in order to force config check for changes.")
httpListenAddr = flag.String("httpListenAddr", ":8880", "Address to listen for http connections")
evaluationInterval = flag.Duration("evaluationInterval", time.Minute, "How often to evaluate the rules")
@ -47,14 +50,14 @@ Rule files may contain %{ENV_VAR} placeholders, which are substituted by the cor
externalURL = flag.String("external.url", "", "External URL is used as alert's source for sent alerts to the notifier")
externalAlertSource = flag.String("external.alert.source", "", `External Alert Source allows to override the Source link for alerts sent to AlertManager for cases where you want to build a custom link to Grafana, Prometheus or any other service.
eg. 'explore?orgId=1&left=[\"now-1h\",\"now\",\"VictoriaMetrics\",{\"expr\": \"{{$expr|quotesEscape|crlfEscape|queryEscape}}\"},{\"mode\":\"Metrics\"},{\"ui\":[true,true,true,\"none\"]}]'.If empty '/api/v1/:groupID/alertID/status' is used`)
externalLabels = flagutil.NewArray("external.label", "Optional label in the form 'name=value' to add to all generated recording rules and alerts. "+
externalLabels = flagutil.NewArray("external.label", "Optional label in the form 'Name=value' to add to all generated recording rules and alerts. "+
"Pass multiple -label flags in order to add multiple label sets.")
remoteReadLookBack = flag.Duration("remoteRead.lookback", time.Hour, "Lookback defines how far to look into past for alerts timeseries."+
" For example, if lookback=1h then range from now() to now()-1h will be scanned.")
remoteReadIgnoreRestoreErrors = flag.Bool("remoteRead.ignoreRestoreErrors", true, "Whether to ignore errors from remote storage when restoring alerts state on startup.")
disableAlertGroupLabel = flag.Bool("disableAlertgroupLabel", false, "Whether to disable adding group's name as label to generated alerts and time series.")
disableAlertGroupLabel = flag.Bool("disableAlertgroupLabel", false, "Whether to disable adding group's Name as label to generated alerts and time series.")
dryRun = flag.Bool("dryRun", false, "Whether to check only config files without running vmalert. The rules file are validated. The `-rule` flag must be specified.")
)
@ -192,7 +195,7 @@ func newManager(ctx context.Context) (*manager, error) {
}
n := strings.IndexByte(s, '=')
if n < 0 {
return nil, fmt.Errorf("missing '=' in `-label`. It must contain label in the form `name=value`; got %q", s)
return nil, fmt.Errorf("missing '=' in `-label`. It must contain label in the form `Name=value`; got %q", s)
}
manager.labels[s[:n]] = s[n+1:]
}
@ -254,8 +257,13 @@ See the docs at https://docs.victoriametrics.com/vmalert.html .
func configReload(ctx context.Context, m *manager, groupsCfg []config.Group, sighupCh <-chan os.Signal) {
var configCheckCh <-chan time.Time
if *rulesCheckInterval > 0 {
ticker := time.NewTicker(*rulesCheckInterval)
checkInterval := *configCheckInterval
if checkInterval == 0 && *rulesCheckInterval > 0 {
logger.Warnf("flag `rule.configCheckInterval` is deprecated - use `configCheckInterval` instead")
checkInterval = *rulesCheckInterval
}
if checkInterval > 0 {
ticker := time.NewTicker(checkInterval)
configCheckCh = ticker.C
defer ticker.Stop()
}
@ -272,6 +280,12 @@ func configReload(ctx context.Context, m *manager, groupsCfg []config.Group, sig
configReloads.Inc()
case <-configCheckCh:
}
if err := notifier.Reload(); err != nil {
configReloadErrors.Inc()
configSuccess.Set(0)
logger.Errorf("failed to reload notifier config: %s", err)
continue
}
newGroupsCfg, err := config.Parse(*rulePath, *validateTemplates, *validateExpressions)
if err != nil {
configReloadErrors.Inc()

View file

@ -100,7 +100,7 @@ groups:
querierBuilder: &fakeQuerier{},
groups: make(map[uint64]*Group),
labels: map[string]string{},
notifiers: []notifier.Notifier{&fakeNotifier{}},
notifiers: func() []notifier.Notifier { return []notifier.Notifier{&fakeNotifier{}} },
rw: &remotewrite.Client{},
}

View file

@ -17,7 +17,7 @@ import (
// manager controls group states
type manager struct {
querierBuilder datasource.QuerierBuilder
notifiers []notifier.Notifier
notifiers func() []notifier.Notifier
rw *remotewrite.Client
// remote read builder.
@ -109,7 +109,7 @@ func (m *manager) update(ctx context.Context, groupsCfg []config.Group, restore
return fmt.Errorf("config contains recording rules but `-remoteWrite.url` isn't set")
}
if arPresent && m.notifiers == nil {
return fmt.Errorf("config contains alerting rules but `-notifier.url` isn't set")
return fmt.Errorf("config contains alerting rules but neither `-notifier.url` nor `-notifier.config` aren't set")
}
type updateItem struct {

View file

@ -40,7 +40,7 @@ func TestManagerUpdateConcurrent(t *testing.T) {
m := &manager{
groups: make(map[uint64]*Group),
querierBuilder: &fakeQuerier{},
notifiers: []notifier.Notifier{&fakeNotifier{}},
notifiers: func() []notifier.Notifier { return []notifier.Notifier{&fakeNotifier{}} },
}
paths := []string{
"config/testdata/dir/rules0-good.rules",
@ -223,7 +223,7 @@ func TestManagerUpdate(t *testing.T) {
m := &manager{
groups: make(map[uint64]*Group),
querierBuilder: &fakeQuerier{},
notifiers: []notifier.Notifier{&fakeNotifier{}},
notifiers: func() []notifier.Notifier { return []notifier.Notifier{&fakeNotifier{}} },
}
cfgInit := loadCfg(t, []string{tc.initPath}, true, true)
@ -311,9 +311,11 @@ func TestManagerUpdateNegative(t *testing.T) {
m := &manager{
groups: make(map[uint64]*Group),
querierBuilder: &fakeQuerier{},
notifiers: tc.notifiers,
rw: tc.rw,
}
if tc.notifiers != nil {
m.notifiers = func() []notifier.Notifier { return tc.notifiers }
}
err := m.update(context.Background(), []config.Group{tc.cfg}, false)
if err == nil {
t.Fatalf("expected to get error; got nil")

View file

@ -1,39 +0,0 @@
package main
import "github.com/VictoriaMetrics/metrics"
type gauge struct {
name string
*metrics.Gauge
}
func getOrCreateGauge(name string, f func() float64) *gauge {
return &gauge{
name: name,
Gauge: metrics.GetOrCreateGauge(name, f),
}
}
type counter struct {
name string
*metrics.Counter
}
func getOrCreateCounter(name string) *counter {
return &counter{
name: name,
Counter: metrics.GetOrCreateCounter(name),
}
}
type summary struct {
name string
*metrics.Summary
}
func getOrCreateSummary(name string) *summary {
return &summary{
name: name,
Summary: metrics.GetOrCreateSummary(name),
}
}

View file

@ -6,18 +6,41 @@ import (
"fmt"
"io/ioutil"
"net/http"
"strings"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
)
// AlertManager represents integration provider with Prometheus alert manager
// https://github.com/prometheus/alertmanager
type AlertManager struct {
addr string
alertURL string
basicAuthUser string
basicAuthPass string
argFunc AlertURLGenerator
client *http.Client
timeout time.Duration
authCfg *promauth.Config
metrics *metrics
}
type metrics struct {
alertsSent *utils.Counter
alertsSendErrors *utils.Counter
}
func newMetrics(addr string) *metrics {
return &metrics{
alertsSent: utils.GetOrCreateCounter(fmt.Sprintf("vmalert_alerts_sent_total{addr=%q}", addr)),
alertsSendErrors: utils.GetOrCreateCounter(fmt.Sprintf("vmalert_alerts_send_errors_total{addr=%q}", addr)),
}
}
// Close is a destructor method for AlertManager
func (am *AlertManager) Close() {
am.metrics.alertsSent.Unregister()
am.metrics.alertsSendErrors.Unregister()
}
// Addr returns address where alerts are sent.
@ -25,17 +48,36 @@ func (am AlertManager) Addr() string { return am.addr }
// Send an alert or resolve message
func (am *AlertManager) Send(ctx context.Context, alerts []Alert) error {
am.metrics.alertsSent.Add(len(alerts))
err := am.send(ctx, alerts)
if err != nil {
am.metrics.alertsSendErrors.Add(len(alerts))
}
return err
}
func (am *AlertManager) send(ctx context.Context, alerts []Alert) error {
b := &bytes.Buffer{}
writeamRequest(b, alerts, am.argFunc)
req, err := http.NewRequest("POST", am.alertURL, b)
req, err := http.NewRequest("POST", am.addr, b)
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
if am.timeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, am.timeout)
defer cancel()
}
req = req.WithContext(ctx)
if am.basicAuthPass != "" {
req.SetBasicAuth(am.basicAuthUser, am.basicAuthPass)
if am.authCfg != nil {
if auth := am.authCfg.GetAuthHeader(); auth != "" {
req.Header.Set("Authorization", auth)
}
}
resp, err := am.client.Do(req)
if err != nil {
@ -47,9 +89,9 @@ func (am *AlertManager) Send(ctx context.Context, alerts []Alert) error {
if resp.StatusCode != http.StatusOK {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("failed to read response from %q: %w", am.alertURL, err)
return fmt.Errorf("failed to read response from %q: %w", am.addr, err)
}
return fmt.Errorf("invalid SC %d from %q; response body: %s", resp.StatusCode, am.alertURL, string(body))
return fmt.Errorf("invalid SC %d from %q; response body: %s", resp.StatusCode, am.addr, string(body))
}
return nil
}
@ -60,14 +102,31 @@ type AlertURLGenerator func(Alert) string
const alertManagerPath = "/api/v2/alerts"
// NewAlertManager is a constructor for AlertManager
func NewAlertManager(alertManagerURL, user, pass string, fn AlertURLGenerator, c *http.Client) *AlertManager {
url := strings.TrimSuffix(alertManagerURL, "/") + alertManagerPath
func NewAlertManager(alertManagerURL string, fn AlertURLGenerator, authCfg promauth.HTTPClientConfig, timeout time.Duration) (*AlertManager, error) {
tls := &promauth.TLSConfig{}
if authCfg.TLSConfig != nil {
tls = authCfg.TLSConfig
}
tr, err := utils.Transport(alertManagerURL, tls.CertFile, tls.KeyFile, tls.CAFile, tls.ServerName, tls.InsecureSkipVerify)
if err != nil {
return nil, fmt.Errorf("failed to create transport: %w", err)
}
ba := &promauth.BasicAuthConfig{}
if authCfg.BasicAuth != nil {
ba = authCfg.BasicAuth
}
aCfg, err := utils.AuthConfig(ba.Username, ba.Password.String(), ba.PasswordFile, authCfg.BearerToken.String(), authCfg.BearerTokenFile)
if err != nil {
return nil, fmt.Errorf("failed to configure auth: %w", err)
}
return &AlertManager{
addr: alertManagerURL,
alertURL: url,
argFunc: fn,
client: c,
basicAuthUser: user,
basicAuthPass: pass,
}
authCfg: aCfg,
client: &http.Client{Transport: tr},
timeout: timeout,
metrics: newMetrics(alertManagerURL),
}, nil
}

View file

@ -8,11 +8,16 @@ import (
"strconv"
"testing"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
)
func TestAlertManager_Addr(t *testing.T) {
const addr = "http://localhost"
am := NewAlertManager(addr, "", "", nil, nil)
am, err := NewAlertManager(addr, nil, promauth.HTTPClientConfig{}, 0)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
if am.Addr() != addr {
t.Errorf("expected to have %q; got %q", addr, am.Addr())
}
@ -75,9 +80,19 @@ func TestAlertManager_Send(t *testing.T) {
})
srv := httptest.NewServer(mux)
defer srv.Close()
am := NewAlertManager(srv.URL, baUser, baPass, func(alert Alert) string {
aCfg := promauth.HTTPClientConfig{
BasicAuth: &promauth.BasicAuthConfig{
Username: baUser,
Password: promauth.NewSecret(baPass),
},
}
am, err := NewAlertManager(srv.URL+alertManagerPath, func(alert Alert) string {
return strconv.FormatUint(alert.GroupID, 10) + "/" + strconv.FormatUint(alert.ID, 10)
}, srv.Client())
}, aCfg, 0)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
if err := am.Send(context.Background(), []Alert{{}, {}}); err == nil {
t.Error("expected connection error got nil")
}

View file

@ -0,0 +1,182 @@
package notifier
import (
"crypto/md5"
"fmt"
"gopkg.in/yaml.v2"
"io/ioutil"
"net/url"
"path"
"path/filepath"
"strings"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/consul"
)
// Config contains list of supported configuration settings
// for Notifier
type Config struct {
// Scheme defines the HTTP scheme for Notifier address
Scheme string `yaml:"scheme,omitempty"`
// PathPrefix is added to URL path before adding alertManagerPath value
PathPrefix string `yaml:"path_prefix,omitempty"`
// ConsulSDConfigs contains list of settings for service discovery via Consul
// see https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config
ConsulSDConfigs []consul.SDConfig `yaml:"consul_sd_configs,omitempty"`
// StaticConfigs contains list of static targets
StaticConfigs []StaticConfig `yaml:"static_configs,omitempty"`
// HTTPClientConfig contains HTTP configuration for Notifier clients
HTTPClientConfig promauth.HTTPClientConfig `yaml:",inline"`
// RelabelConfigs contains list of relabeling rules
RelabelConfigs []promrelabel.RelabelConfig `yaml:"relabel_configs,omitempty"`
// The timeout used when sending alerts.
Timeout utils.PromDuration `yaml:"timeout,omitempty"`
// Checksum stores the hash of yaml definition for the config.
// May be used to detect any changes to the config file.
Checksum string
// Catches all undefined fields and must be empty after parsing.
XXX map[string]interface{} `yaml:",inline"`
// This is set to the directory from where the config has been loaded.
baseDir string
// stores already parsed RelabelConfigs object
parsedRelabelConfigs *promrelabel.ParsedConfigs
}
// StaticConfig contains list of static targets in the following form:
// targets:
// [ - '<host>' ]
type StaticConfig struct {
Targets []string `yaml:"targets"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (cfg *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
type config Config
if err := unmarshal((*config)(cfg)); err != nil {
return err
}
if cfg.Scheme == "" {
cfg.Scheme = "http"
}
if cfg.Timeout.Duration() == 0 {
cfg.Timeout = utils.NewPromDuration(time.Second * 10)
}
rCfg, err := promrelabel.ParseRelabelConfigs(cfg.RelabelConfigs, false)
if err != nil {
return fmt.Errorf("failed to parse relabeling config: %w", err)
}
cfg.parsedRelabelConfigs = rCfg
b, err := yaml.Marshal(cfg)
if err != nil {
return fmt.Errorf("failed to marshal configuration for checksum: %w", err)
}
h := md5.New()
h.Write(b)
cfg.Checksum = fmt.Sprintf("%x", h.Sum(nil))
return nil
}
func parseConfig(path string) (*Config, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("error reading config file: %w", err)
}
var cfg *Config
err = yaml.Unmarshal(data, &cfg)
if err != nil {
return nil, err
}
if len(cfg.XXX) > 0 {
var keys []string
for k := range cfg.XXX {
keys = append(keys, k)
}
return nil, fmt.Errorf("unknown fields in %s", strings.Join(keys, ", "))
}
absPath, err := filepath.Abs(path)
if err != nil {
return nil, fmt.Errorf("cannot obtain abs path for %q: %w", path, err)
}
cfg.baseDir = filepath.Dir(absPath)
return cfg, nil
}
func parseLabels(target string, metaLabels map[string]string, cfg *Config) (string, []prompbmarshal.Label, error) {
labels := mergeLabels(target, metaLabels, cfg)
labels = cfg.parsedRelabelConfigs.Apply(labels, 0, false)
labels = promrelabel.RemoveMetaLabels(labels[:0], labels)
// Remove references to already deleted labels, so GC could clean strings for label name and label value past len(labels).
// This should reduce memory usage when relabeling creates big number of temporary labels with long names and/or values.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825 for details.
labels = append([]prompbmarshal.Label{}, labels...)
if len(labels) == 0 {
return "", nil, nil
}
schemeRelabeled := promrelabel.GetLabelValueByName(labels, "__scheme__")
if len(schemeRelabeled) == 0 {
schemeRelabeled = "http"
}
addressRelabeled := promrelabel.GetLabelValueByName(labels, "__address__")
if len(addressRelabeled) == 0 {
return "", nil, nil
}
if strings.Contains(addressRelabeled, "/") {
return "", nil, nil
}
addressRelabeled = addMissingPort(schemeRelabeled, addressRelabeled)
alertsPathRelabeled := promrelabel.GetLabelValueByName(labels, "__alerts_path__")
if !strings.HasPrefix(alertsPathRelabeled, "/") {
alertsPathRelabeled = "/" + alertsPathRelabeled
}
u := fmt.Sprintf("%s://%s%s", schemeRelabeled, addressRelabeled, alertsPathRelabeled)
if _, err := url.Parse(u); err != nil {
return "", nil, fmt.Errorf("invalid url %q for scheme=%q (%q), target=%q, metrics_path=%q (%q): %w",
u, cfg.Scheme, schemeRelabeled, target, addressRelabeled, alertsPathRelabeled, err)
}
return u, labels, nil
}
func addMissingPort(scheme, target string) string {
if strings.Contains(target, ":") {
return target
}
if scheme == "https" {
target += ":443"
} else {
target += ":80"
}
return target
}
func mergeLabels(target string, metaLabels map[string]string, cfg *Config) []prompbmarshal.Label {
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
m := make(map[string]string)
m["__address__"] = target
m["__scheme__"] = cfg.Scheme
m["__alerts_path__"] = path.Join("/", cfg.PathPrefix, alertManagerPath)
for k, v := range metaLabels {
m[k] = v
}
result := make([]prompbmarshal.Label, 0, len(m))
for k, v := range m {
result = append(result, prompbmarshal.Label{
Name: k,
Value: v,
})
}
return result
}

View file

@ -0,0 +1,31 @@
package notifier
import (
"strings"
"testing"
)
func TestConfigParseGood(t *testing.T) {
f := func(path string) {
_, err := parseConfig(path)
checkErr(t, err)
}
f("testdata/mixed.good.yaml")
f("testdata/consul.good.yaml")
f("testdata/static.good.yaml")
}
func TestConfigParseBad(t *testing.T) {
f := func(path, expErr string) {
_, err := parseConfig(path)
if err == nil {
t.Fatalf("expected to get non-nil err for config %q", path)
}
if !strings.Contains(err.Error(), expErr) {
t.Errorf("expected err to contain %q; got %q instead", expErr, err)
}
}
f("testdata/unknownFields.bad.yaml", "unknown field")
f("non-existing-file", "error reading")
}

View file

@ -0,0 +1,244 @@
package notifier
import (
"fmt"
"sync"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/consul"
)
// configWatcher supports dynamic reload of Notifier objects
// from static configuration and service discovery.
// Use newWatcher to create a new object.
type configWatcher struct {
cfg *Config
genFn AlertURLGenerator
wg sync.WaitGroup
reloadCh chan struct{}
syncCh chan struct{}
targetsMu sync.RWMutex
targets map[TargetType][]Target
}
func newWatcher(path string, gen AlertURLGenerator) (*configWatcher, error) {
cfg, err := parseConfig(path)
if err != nil {
return nil, err
}
cw := &configWatcher{
cfg: cfg,
wg: sync.WaitGroup{},
reloadCh: make(chan struct{}, 1),
syncCh: make(chan struct{}),
genFn: gen,
targetsMu: sync.RWMutex{},
targets: make(map[TargetType][]Target),
}
return cw, cw.start()
}
func (cw *configWatcher) notifiers() []Notifier {
cw.targetsMu.RLock()
defer cw.targetsMu.RUnlock()
var notifiers []Notifier
for _, ns := range cw.targets {
for _, n := range ns {
notifiers = append(notifiers, n.Notifier)
}
}
return notifiers
}
func (cw *configWatcher) reload(path string) error {
select {
case cw.reloadCh <- struct{}{}:
default:
return nil
}
defer func() { <-cw.reloadCh }()
cfg, err := parseConfig(path)
if err != nil {
return err
}
if cfg.Checksum == cw.cfg.Checksum {
return nil
}
// stop existing discovery
close(cw.syncCh)
cw.wg.Wait()
// re-start cw with new config
cw.syncCh = make(chan struct{})
cw.cfg = cfg
cw.resetTargets()
return cw.start()
}
const (
addRetryBackoff = time.Millisecond * 100
addRetryCount = 2
)
func (cw *configWatcher) add(typeK TargetType, interval time.Duration, labelsFn getLabels) error {
var targets []Target
var errors []error
var count int
for { // retry addRetryCount times if first discovery attempts gave no results
targets, errors = targetsFromLabels(labelsFn, cw.cfg, cw.genFn)
for _, err := range errors {
return fmt.Errorf("failed to init notifier for %q: %s", typeK, err)
}
if len(targets) > 0 || count >= addRetryCount {
break
}
time.Sleep(addRetryBackoff)
}
cw.setTargets(typeK, targets)
cw.wg.Add(1)
go func() {
defer cw.wg.Done()
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-cw.syncCh:
return
case <-ticker.C:
}
updateTargets, errors := targetsFromLabels(labelsFn, cw.cfg, cw.genFn)
for _, err := range errors {
logger.Errorf("failed to init notifier for %q: %s", typeK, err)
}
cw.setTargets(typeK, updateTargets)
}
}()
return nil
}
func targetsFromLabels(labelsFn getLabels, cfg *Config, genFn AlertURLGenerator) ([]Target, []error) {
metaLabels, err := labelsFn()
if err != nil {
return nil, []error{fmt.Errorf("failed to get labels: %s", err)}
}
var targets []Target
var errors []error
duplicates := make(map[string]struct{})
for _, labels := range metaLabels {
target := labels["__address__"]
u, processedLabels, err := parseLabels(target, labels, cfg)
if err != nil {
errors = append(errors, err)
continue
}
if len(u) == 0 {
continue
}
if _, ok := duplicates[u]; ok { // check for duplicates
if !*suppressDuplicateTargetErrors {
logger.Errorf("skipping duplicate target with identical address %q; "+
"make sure service discovery and relabeling is set up properly; "+
"original labels: %s; resulting labels: %s",
u, labels, processedLabels)
}
continue
}
duplicates[u] = struct{}{}
am, err := NewAlertManager(u, genFn, cfg.HTTPClientConfig, cfg.Timeout.Duration())
if err != nil {
errors = append(errors, err)
continue
}
targets = append(targets, Target{
Notifier: am,
Labels: processedLabels,
})
}
return targets, errors
}
type getLabels func() ([]map[string]string, error)
func (cw *configWatcher) start() error {
if len(cw.cfg.StaticConfigs) > 0 {
var targets []Target
for _, cfg := range cw.cfg.StaticConfigs {
for _, target := range cfg.Targets {
address, labels, err := parseLabels(target, nil, cw.cfg)
if err != nil {
return fmt.Errorf("failed to parse labels for target %q: %s", target, err)
}
notifier, err := NewAlertManager(address, cw.genFn, cw.cfg.HTTPClientConfig, cw.cfg.Timeout.Duration())
if err != nil {
return fmt.Errorf("failed to init alertmanager for addr %q: %s", address, err)
}
targets = append(targets, Target{
Notifier: notifier,
Labels: labels,
})
}
}
cw.setTargets(TargetStatic, targets)
}
if len(cw.cfg.ConsulSDConfigs) > 0 {
err := cw.add(TargetConsul, *consul.SDCheckInterval, func() ([]map[string]string, error) {
var labels []map[string]string
for i := range cw.cfg.ConsulSDConfigs {
sdc := &cw.cfg.ConsulSDConfigs[i]
targetLabels, err := sdc.GetLabels(cw.cfg.baseDir)
if err != nil {
return nil, fmt.Errorf("got labels err: %s", err)
}
labels = append(labels, targetLabels...)
}
return labels, nil
})
if err != nil {
return fmt.Errorf("failed to start consulSD discovery: %s", err)
}
}
return nil
}
func (cw *configWatcher) resetTargets() {
cw.targetsMu.Lock()
for _, targets := range cw.targets {
for _, t := range targets {
t.Close()
}
}
cw.targets = make(map[TargetType][]Target)
cw.targetsMu.Unlock()
}
func (cw *configWatcher) setTargets(key TargetType, targets []Target) {
cw.targetsMu.Lock()
newT := make(map[string]Target)
for _, t := range targets {
newT[t.Addr()] = t
}
oldT := cw.targets[key]
for _, ot := range oldT {
if _, ok := newT[ot.Addr()]; !ok {
ot.Notifier.Close()
}
}
cw.targets[key] = targets
cw.targetsMu.Unlock()
}

View file

@ -0,0 +1,307 @@
package notifier
import (
"fmt"
"io/ioutil"
"math/rand"
"net/http"
"net/http/httptest"
"os"
"sync"
"testing"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/consul"
)
func TestConfigWatcherReload(t *testing.T) {
f, err := ioutil.TempFile("", "")
if err != nil {
t.Fatal(err)
}
defer func() { _ = os.Remove(f.Name()) }()
writeToFile(t, f.Name(), `
static_configs:
- targets:
- localhost:9093
- localhost:9094
`)
cw, err := newWatcher(f.Name(), nil)
if err != nil {
t.Fatalf("failed to start config watcher: %s", err)
}
ns := cw.notifiers()
if len(ns) != 2 {
t.Fatalf("expected to have 2 notifiers; got %d %#v", len(ns), ns)
}
f2, err := ioutil.TempFile("", "")
if err != nil {
t.Fatal(err)
}
defer func() { _ = os.Remove(f2.Name()) }()
writeToFile(t, f2.Name(), `
static_configs:
- targets:
- 127.0.0.1:9093
`)
checkErr(t, cw.reload(f2.Name()))
ns = cw.notifiers()
if len(ns) != 1 {
t.Fatalf("expected to have 1 notifier; got %d", len(ns))
}
expAddr := "http://127.0.0.1:9093/api/v2/alerts"
if ns[0].Addr() != expAddr {
t.Fatalf("expected to get %q; got %q instead", expAddr, ns[0].Addr())
}
}
func TestConfigWatcherStart(t *testing.T) {
consulSDServer := newFakeConsulServer()
defer consulSDServer.Close()
consulSDFile, err := ioutil.TempFile("", "")
if err != nil {
t.Fatal(err)
}
defer func() { _ = os.Remove(consulSDFile.Name()) }()
writeToFile(t, consulSDFile.Name(), fmt.Sprintf(`
scheme: https
path_prefix: proxy
consul_sd_configs:
- server: %s
services:
- alertmanager
`, consulSDServer.URL))
prevCheckInterval := *consul.SDCheckInterval
defer func() { *consul.SDCheckInterval = prevCheckInterval }()
*consul.SDCheckInterval = time.Millisecond * 100
cw, err := newWatcher(consulSDFile.Name(), nil)
if err != nil {
t.Fatalf("failed to start config watcher: %s", err)
}
time.Sleep(*consul.SDCheckInterval * 2)
if len(cw.notifiers()) != 2 {
t.Fatalf("expected to get 2 notifiers; got %d", len(cw.notifiers()))
}
expAddr1 := fmt.Sprintf("https://%s/proxy/api/v2/alerts", fakeConsulService1)
expAddr2 := fmt.Sprintf("https://%s/proxy/api/v2/alerts", fakeConsulService2)
n1, n2 := cw.notifiers()[0], cw.notifiers()[1]
if n1.Addr() != expAddr1 {
t.Fatalf("exp address %q; got %q", expAddr1, n1.Addr())
}
if n2.Addr() != expAddr2 {
t.Fatalf("exp address %q; got %q", expAddr2, n2.Addr())
}
}
// TestConfigWatcherReloadConcurrent supposed to test concurrent
// execution of configuration update.
// Should be executed with -race flag
func TestConfigWatcherReloadConcurrent(t *testing.T) {
consulSDServer1 := newFakeConsulServer()
defer consulSDServer1.Close()
consulSDServer2 := newFakeConsulServer()
defer consulSDServer2.Close()
consulSDFile, err := ioutil.TempFile("", "")
if err != nil {
t.Fatal(err)
}
defer func() { _ = os.Remove(consulSDFile.Name()) }()
writeToFile(t, consulSDFile.Name(), fmt.Sprintf(`
consul_sd_configs:
- server: %s
services:
- alertmanager
- server: %s
services:
- consul
`, consulSDServer1.URL, consulSDServer2.URL))
staticAndConsulSDFile, err := ioutil.TempFile("", "")
if err != nil {
t.Fatal(err)
}
defer func() { _ = os.Remove(staticAndConsulSDFile.Name()) }()
writeToFile(t, staticAndConsulSDFile.Name(), fmt.Sprintf(`
static_configs:
- targets:
- localhost:9093
- localhost:9095
consul_sd_configs:
- server: %s
services:
- alertmanager
- server: %s
services:
- consul
`, consulSDServer1.URL, consulSDServer2.URL))
paths := []string{
staticAndConsulSDFile.Name(),
consulSDFile.Name(),
"testdata/static.good.yaml",
"unknownFields.bad.yaml",
}
cw, err := newWatcher(paths[0], nil)
if err != nil {
t.Fatalf("failed to start config watcher: %s", err)
}
const workers = 500
const iterations = 10
wg := sync.WaitGroup{}
wg.Add(workers)
for i := 0; i < workers; i++ {
go func() {
defer wg.Done()
for i := 0; i < iterations; i++ {
rnd := rand.Intn(len(paths))
_ = cw.reload(paths[rnd]) // update can fail and this is expected
_ = cw.notifiers()
}
}()
}
wg.Wait()
}
func writeToFile(t *testing.T, file, b string) {
t.Helper()
checkErr(t, ioutil.WriteFile(file, []byte(b), 0644))
}
func checkErr(t *testing.T, err error) {
t.Helper()
if err != nil {
t.Fatalf("unexpected err: %s", err)
}
}
const (
fakeConsulService1 = "127.0.0.1:9093"
fakeConsulService2 = "127.0.0.1:9095"
)
func newFakeConsulServer() *httptest.Server {
mux := http.NewServeMux()
mux.HandleFunc("/v1/agent/self", func(rw http.ResponseWriter, _ *http.Request) {
rw.Write([]byte(`{"Config": {"Datacenter": "dc1"}}`))
})
mux.HandleFunc("/v1/catalog/services", func(rw http.ResponseWriter, _ *http.Request) {
rw.Header().Set("X-Consul-Index", "1")
rw.Write([]byte(`{
"alertmanager": [
"alertmanager",
"__scheme__=http"
]
}`))
})
mux.HandleFunc("/v1/health/service/alertmanager", func(rw http.ResponseWriter, _ *http.Request) {
rw.Header().Set("X-Consul-Index", "1")
rw.Write([]byte(`
[
{
"Node": {
"ID": "e8e3629a-3f50-9d6e-aaf8-f173b5b05c72",
"Node": "machine",
"Address": "127.0.0.1",
"Datacenter": "dc1",
"TaggedAddresses": {
"lan": "127.0.0.1",
"lan_ipv4": "127.0.0.1",
"wan": "127.0.0.1",
"wan_ipv4": "127.0.0.1"
},
"Meta": {
"consul-network-segment": ""
},
"CreateIndex": 13,
"ModifyIndex": 14
},
"Service": {
"ID": "am1",
"Service": "alertmanager",
"Tags": [
"alertmanager",
"__scheme__=http"
],
"Address": "",
"Meta": null,
"Port": 9093,
"Weights": {
"Passing": 1,
"Warning": 1
},
"EnableTagOverride": false,
"Proxy": {
"Mode": "",
"MeshGateway": {},
"Expose": {}
},
"Connect": {},
"CreateIndex": 16,
"ModifyIndex": 16
}
},
{
"Node": {
"ID": "e8e3629a-3f50-9d6e-aaf8-f173b5b05c72",
"Node": "machine",
"Address": "127.0.0.1",
"Datacenter": "dc1",
"TaggedAddresses": {
"lan": "127.0.0.1",
"lan_ipv4": "127.0.0.1",
"wan": "127.0.0.1",
"wan_ipv4": "127.0.0.1"
},
"Meta": {
"consul-network-segment": ""
},
"CreateIndex": 13,
"ModifyIndex": 14
},
"Service": {
"ID": "am2",
"Service": "alertmanager",
"Tags": [
"alertmanager",
"bad-node"
],
"Address": "",
"Meta": null,
"Port": 9095,
"Weights": {
"Passing": 1,
"Warning": 1
},
"EnableTagOverride": false,
"Proxy": {
"Mode": "",
"MeshGateway": {},
"Expose": {}
},
"Connect": {},
"CreateIndex": 15,
"ModifyIndex": 15
}
}
]`))
})
return httptest.NewServer(mux)
}

View file

@ -1,17 +1,24 @@
package notifier
import (
"flag"
"fmt"
"net/http"
"strings"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
)
var (
configPath = flag.String("notifier.config", "", "Path to configuration file for notifiers")
suppressDuplicateTargetErrors = flag.Bool("notifier.suppressDuplicateTargetErrors", false, "Whether to suppress 'duplicate target' errors during discovery")
addrs = flagutil.NewArray("notifier.url", "Prometheus alertmanager URL, e.g. http://127.0.0.1:9093")
basicAuthUsername = flagutil.NewArray("notifier.basicAuth.username", "Optional basic auth username for -notifier.url")
basicAuthPassword = flagutil.NewArray("notifier.basicAuth.password", "Optional basic auth password for -notifier.url")
basicAuthPasswordFile = flagutil.NewArray("notifier.basicAuth.passwordFile", "Optional path to basic auth password file for -notifier.url")
tlsInsecureSkipVerify = flagutil.NewArrayBool("notifier.tlsInsecureSkipVerify", "Whether to skip tls verification when connecting to -notifier.url")
tlsCertFile = flagutil.NewArray("notifier.tlsCertFile", "Optional path to client-side TLS certificate file to use when connecting to -notifier.url")
@ -22,20 +29,119 @@ var (
"By default the server name from -notifier.url is used")
)
// Init creates a Notifier object based on provided flags.
func Init(gen AlertURLGenerator) ([]Notifier, error) {
var notifiers []Notifier
for i, addr := range *addrs {
cert, key := tlsCertFile.GetOptionalArg(i), tlsKeyFile.GetOptionalArg(i)
ca, serverName := tlsCAFile.GetOptionalArg(i), tlsServerName.GetOptionalArg(i)
tr, err := utils.Transport(addr, cert, key, ca, serverName, tlsInsecureSkipVerify.GetOptionalArg(i))
if err != nil {
return nil, fmt.Errorf("failed to create transport: %w", err)
// cw holds a configWatcher for configPath configuration file
// configWatcher provides a list of Notifier objects discovered
// from static config or via service discovery.
// cw is not nil only if configPath is provided.
var cw *configWatcher
// Reload checks the changes in configPath configuration file
// and applies changes if any.
func Reload() error {
if cw == nil {
return nil
}
user, pass := basicAuthUsername.GetOptionalArg(i), basicAuthPassword.GetOptionalArg(i)
am := NewAlertManager(addr, user, pass, gen, &http.Client{Transport: tr})
notifiers = append(notifiers, am)
return cw.reload(*configPath)
}
var staticNotifiersFn func() []Notifier
// Init returns a function for retrieving actual list of Notifier objects.
// Init works in two mods:
// * configuration via flags (for backward compatibility). Is always static
// and don't support live reloads.
// * configuration via file. Supports live reloads and service discovery.
// Init returns an error if both mods are used.
func Init(gen AlertURLGenerator) (func() []Notifier, error) {
if *configPath == "" && len(*addrs) == 0 {
return nil, nil
}
if *configPath != "" && len(*addrs) > 0 {
return nil, fmt.Errorf("only one of -notifier.config or -notifier.url flags must be specified")
}
if len(*addrs) > 0 {
notifiers, err := notifiersFromFlags(gen)
if err != nil {
return nil, fmt.Errorf("failed to create notifier from flag values: %s", err)
}
staticNotifiersFn = func() []Notifier {
return notifiers
}
return staticNotifiersFn, nil
}
var err error
cw, err = newWatcher(*configPath, gen)
if err != nil {
return nil, fmt.Errorf("failed to init config watcher: %s", err)
}
return cw.notifiers, nil
}
func notifiersFromFlags(gen AlertURLGenerator) ([]Notifier, error) {
var notifiers []Notifier
for i, addr := range *addrs {
authCfg := promauth.HTTPClientConfig{
TLSConfig: &promauth.TLSConfig{
CAFile: tlsCAFile.GetOptionalArg(i),
CertFile: tlsCertFile.GetOptionalArg(i),
KeyFile: tlsKeyFile.GetOptionalArg(i),
ServerName: tlsServerName.GetOptionalArg(i),
InsecureSkipVerify: tlsInsecureSkipVerify.GetOptionalArg(i),
},
BasicAuth: &promauth.BasicAuthConfig{
Username: basicAuthUsername.GetOptionalArg(i),
Password: promauth.NewSecret(basicAuthPassword.GetOptionalArg(i)),
PasswordFile: basicAuthPasswordFile.GetOptionalArg(i),
},
}
addr = strings.TrimSuffix(addr, "/")
am, err := NewAlertManager(addr+alertManagerPath, gen, authCfg, time.Minute)
if err != nil {
return nil, err
}
notifiers = append(notifiers, am)
}
return notifiers, nil
}
// Target represents a Notifier and optional
// list of labels added during discovery.
type Target struct {
Notifier
Labels []prompbmarshal.Label
}
// TargetType defines how the Target was discovered
type TargetType string
const (
// TargetStatic is for targets configured statically
TargetStatic TargetType = "static"
// TargetConsul is for targets discovered via Consul
TargetConsul TargetType = "consulSD"
)
// GetTargets returns list of static or discovered targets
// via notifier configuration.
func GetTargets() map[TargetType][]Target {
var targets = make(map[TargetType][]Target)
if staticNotifiersFn != nil {
for _, ns := range staticNotifiersFn() {
targets[TargetStatic] = append(targets[TargetStatic], Target{
Notifier: ns,
})
}
}
if cw != nil {
cw.targetsMu.RLock()
for key, ns := range cw.targets {
targets[key] = append(targets[key], ns...)
}
cw.targetsMu.RUnlock()
}
return targets
}

View file

@ -10,4 +10,6 @@ type Notifier interface {
Send(ctx context.Context, alerts []Alert) error
// Addr returns address where alerts are sent.
Addr() string
// Close is a destructor for the Notifier
Close()
}

View file

@ -0,0 +1,13 @@
consul_sd_configs:
- server: localhost:8500
scheme: http
services:
- alertmanager
- server: localhost:8500
services:
- consul
relabel_configs:
- source_labels: [__meta_consul_tags]
regex: .*,__scheme__=([^,]+),.*
replacement: '${1}'
target_label: __scheme__

View file

@ -0,0 +1,18 @@
static_configs:
- targets:
- localhost:9093
- localhost:9095
consul_sd_configs:
- server: localhost:8500
scheme: http
services:
- alertmanager
- server: localhost:8500
services:
- consul
relabel_configs:
- source_labels: [__meta_consul_tags]
regex: .*,__scheme__=([^,]+),.*
replacement: '${1}'
target_label: __scheme__

View file

@ -0,0 +1,4 @@
static_configs:
- targets:
- localhost:9093
- localhost:9095

View file

@ -0,0 +1,5 @@
scheme: https
unknown: field
static_configs:
- targets:
- localhost:9093

View file

@ -10,8 +10,8 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/metrics"
)
// RecordingRule is a Rule that supposed
@ -43,8 +43,8 @@ type RecordingRule struct {
}
type recordingRuleMetrics struct {
errors *gauge
samples *gauge
errors *utils.Gauge
samples *utils.Gauge
}
// String implements Stringer interface
@ -75,7 +75,7 @@ func newRecordingRule(qb datasource.QuerierBuilder, group *Group, cfg config.Rul
}
labels := fmt.Sprintf(`recording=%q, group=%q, id="%d"`, rr.Name, group.Name, rr.ID())
rr.metrics.errors = getOrCreateGauge(fmt.Sprintf(`vmalert_recording_rules_error{%s}`, labels),
rr.metrics.errors = utils.GetOrCreateGauge(fmt.Sprintf(`vmalert_recording_rules_error{%s}`, labels),
func() float64 {
rr.mu.RLock()
defer rr.mu.RUnlock()
@ -84,7 +84,7 @@ func newRecordingRule(qb datasource.QuerierBuilder, group *Group, cfg config.Rul
}
return 1
})
rr.metrics.samples = getOrCreateGauge(fmt.Sprintf(`vmalert_recording_rules_last_evaluation_samples{%s}`, labels),
rr.metrics.samples = utils.GetOrCreateGauge(fmt.Sprintf(`vmalert_recording_rules_last_evaluation_samples{%s}`, labels),
func() float64 {
rr.mu.RLock()
defer rr.mu.RUnlock()
@ -95,8 +95,8 @@ func newRecordingRule(qb datasource.QuerierBuilder, group *Group, cfg config.Rul
// Close unregisters rule metrics
func (rr *RecordingRule) Close() {
metrics.UnregisterMetric(rr.metrics.errors.name)
metrics.UnregisterMetric(rr.metrics.samples.name)
rr.metrics.errors.Unregister()
rr.metrics.samples.Unregister()
}
// ExecRange executes recording rule on the given time range similarly to Exec.

View file

@ -0,0 +1,54 @@
package utils
import "github.com/VictoriaMetrics/metrics"
type namedMetric struct {
Name string
}
// Unregister removes the metric by name from default registry
func (nm namedMetric) Unregister() {
metrics.UnregisterMetric(nm.Name)
}
// Gauge is a metrics.Gauge with Name
type Gauge struct {
namedMetric
*metrics.Gauge
}
// GetOrCreateGauge creates a new Gauge with the given name
func GetOrCreateGauge(name string, f func() float64) *Gauge {
return &Gauge{
namedMetric: namedMetric{Name: name},
Gauge: metrics.GetOrCreateGauge(name, f),
}
}
// Counter is a metrics.Counter with Name
type Counter struct {
namedMetric
*metrics.Counter
}
// GetOrCreateCounter creates a new Counter with the given name
func GetOrCreateCounter(name string) *Counter {
return &Counter{
namedMetric: namedMetric{Name: name},
Counter: metrics.GetOrCreateCounter(name),
}
}
// Summary is a metrics.Summary with Name
type Summary struct {
namedMetric
*metrics.Summary
}
// GetOrCreateSummary creates a new Summary with the given name
func GetOrCreateSummary(name string) *Summary {
return &Summary{
namedMetric: namedMetric{Name: name},
Summary: metrics.GetOrCreateSummary(name),
}
}

View file

@ -10,6 +10,7 @@ import (
"strings"
"sync"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/tpl"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
@ -33,9 +34,10 @@ func initLinks() {
{path.Join(pathPrefix, "-/reload"), "reload configuration"},
}
navItems = []tpl.NavItem{
{Name: "vmalert", Url: pathPrefix},
{Name: "vmalert", Url: path.Join(pathPrefix, "/")},
{Name: "Groups", Url: path.Join(pathPrefix, "groups")},
{Name: "Alerts", Url: path.Join(pathPrefix, "alerts")},
{Name: "Notifiers", Url: path.Join(pathPrefix, "notifiers")},
{Name: "Docs", Url: "https://docs.victoriametrics.com/vmalert.html"},
}
}
@ -62,6 +64,9 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
case "/groups":
WriteListGroups(w, rh.groups())
return true
case "/notifiers":
WriteListTargets(w, notifier.GetTargets())
return true
case "/api/v1/groups":
data, err := rh.listGroups()
if err != nil {

View file

@ -5,6 +5,7 @@
"sort"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/tpl"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
) %}
@ -205,6 +206,62 @@
{% endfunc %}
{% func ListTargets(targets map[notifier.TargetType][]notifier.Target) %}
{%= tpl.Header("Notifiers", navItems) %}
{% if len(targets) > 0 %}
<a class="btn btn-primary" role="button" onclick="collapseAll()">Collapse All</a>
<a class="btn btn-primary" role="button" onclick="expandAll()">Expand All</a>
{%code
var keys []string
for key := range targets {
keys = append(keys, string(key))
}
sort.Strings(keys)
%}
{% for i := range keys %}
{%code typeK, ns := keys[i], targets[notifier.TargetType(keys[i])]
count := len(ns)
%}
<div class="group-heading data-bs-target="rules-{%s typeK %}">
<span class="anchor" id="notifiers-{%s typeK %}"></span>
<a href="#notifiers-{%s typeK %}">{%s typeK %} ({%d count %})</a>
</div>
<div class="collapse show" id="notifiers-{%s typeK %}">
<table class="table table-striped table-hover table-sm">
<thead>
<tr>
<th scope="col">Labels</th>
<th scope="col">Address</th>
</tr>
</thead>
<tbody>
{% for _, n := range ns %}
<tr>
<td>
{% for _, l := range n.Labels %}
<span class="ms-1 badge bg-primary">{%s l.Name %}={%s l.Value %}</span>
{% endfor %}
</td>
<td>{%s n.Notifier.Addr() %}</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% endfor %}
{% else %}
<div>
<p>No items...</p>
</div>
{% endif %}
{%= tpl.Footer() %}
{% endfunc %}
{% func Alert(alert *APIAlert) %}
{%= tpl.Header("", navItems) %}
{%code

File diff suppressed because it is too large Load diff

View file

@ -124,6 +124,8 @@ This may be useful for passing secrets to the config.
## Security
It is expected that all the backend services protected by `vmauth` are located in an isolated private network, so they can be accessed by external users only via `vmauth`.
Do not transfer Basic Auth headers in plaintext over untrusted networks. Enable https. This can be done by passing the following `-tls*` command-line flags to `vmauth`:
```

View file

@ -149,7 +149,7 @@ func (tbf *tmpBlocksFile) MustReadBlockRefAt(partRef storage.PartRef, addr tmpBl
} else {
bb := tmpBufPool.Get()
defer tmpBufPool.Put(bb)
bb.B = bytesutil.ResizeNoCopy(bb.B, addr.size)
bb.B = bytesutil.ResizeNoCopyMayOverallocate(bb.B, addr.size)
tbf.r.MustReadAt(bb.B, int64(addr.offset))
buf = bb.B
}

View file

@ -4,6 +4,8 @@ import (
"flag"
"fmt"
"math"
"regexp"
"sort"
"strings"
"sync"
@ -275,55 +277,29 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
return rv, nil
}
if be, ok := e.(*metricsql.BinaryOpExpr); ok {
// Execute left and right sides of the binary operation in parallel.
// This should reduce execution times for heavy queries.
// On the other side this can increase CPU and RAM usage when executing heavy queries.
// TODO: think on how to limit CPU and RAM usage while leaving short execution times.
var left, right []*timeseries
var mu sync.Mutex
var wg sync.WaitGroup
var errGlobal error
wg.Add(1)
go func() {
defer wg.Done()
ecCopy := newEvalConfig(ec)
tss, err := evalExpr(ecCopy, be.Left)
mu.Lock()
if err != nil {
if errGlobal == nil {
errGlobal = err
}
}
left = tss
mu.Unlock()
}()
wg.Add(1)
go func() {
defer wg.Done()
ecCopy := newEvalConfig(ec)
tss, err := evalExpr(ecCopy, be.Right)
mu.Lock()
if err != nil {
if errGlobal == nil {
errGlobal = err
}
}
right = tss
mu.Unlock()
}()
wg.Wait()
if errGlobal != nil {
return nil, errGlobal
}
bf := getBinaryOpFunc(be.Op)
if bf == nil {
return nil, fmt.Errorf(`unknown binary op %q`, be.Op)
}
var err error
var tssLeft, tssRight []*timeseries
switch strings.ToLower(be.Op) {
case "and", "if":
// Fetch right-side series at first, since the left side of `and` and `if` operator
// usually contains lower number of time series. This should produce more specific label filters
// for the left side of the query. This, in turn, should reduce the time to select series
// for the left side of the query.
tssRight, tssLeft, err = execBinaryOpArgs(ec, be.Right, be.Left, be)
default:
tssLeft, tssRight, err = execBinaryOpArgs(ec, be.Left, be.Right, be)
}
if err != nil {
return nil, fmt.Errorf("cannot execute %q: %w", be.AppendString(nil), err)
}
bfa := &binaryOpFuncArg{
be: be,
left: left,
right: right,
left: tssLeft,
right: tssRight,
}
rv, err := bf(bfa)
if err != nil {
@ -348,6 +324,111 @@ func evalExpr(ec *EvalConfig, e metricsql.Expr) ([]*timeseries, error) {
return nil, fmt.Errorf("unexpected expression %q", e.AppendString(nil))
}
func execBinaryOpArgs(ec *EvalConfig, exprFirst, exprSecond metricsql.Expr, be *metricsql.BinaryOpExpr) ([]*timeseries, []*timeseries, error) {
// Execute binary operation in the following way:
//
// 1) execute the exprFirst
// 2) get common label filters for series returned at step 1
// 3) push down the found common label filters to exprSecond. This filters out unneeded series
// during exprSecond exection instead of spending compute resources on extracting and processing these series
// before they are dropped later when matching time series according to https://prometheus.io/docs/prometheus/latest/querying/operators/#vector-matching
// 4) execute the exprSecond with possible additional filters found at step 3
//
// Typical use cases:
// - Kubernetes-related: show pod creation time with the node name:
//
// kube_pod_created{namespace="prod"} * on (uid) group_left(node) kube_pod_info
//
// Without the optimization `kube_pod_info` would select and spend compute resources
// for more time series than needed. The selected time series would be dropped later
// when matching time series on the right and left sides of binary operand.
//
// - Generic alerting queries, which rely on `info` metrics.
// See https://grafana.com/blog/2021/08/04/how-to-use-promql-joins-for-more-effective-queries-of-prometheus-metrics-at-scale/
//
// - Queries, which get additional labels from `info` metrics.
// See https://www.robustperception.io/exposing-the-software-version-to-prometheus
tssFirst, err := evalExpr(ec, exprFirst)
if err != nil {
return nil, nil, err
}
switch strings.ToLower(be.Op) {
case "or":
// Do not pushdown common label filters from tssFirst for `or` operation, since this can filter out the needed time series from tssSecond.
// See https://prometheus.io/docs/prometheus/latest/querying/operators/#logical-set-binary-operators for details.
default:
lfs := getCommonLabelFilters(tssFirst)
lfs = metricsql.TrimFiltersByGroupModifier(lfs, be)
exprSecond = metricsql.PushdownBinaryOpFilters(exprSecond, lfs)
}
tssSecond, err := evalExpr(ec, exprSecond)
if err != nil {
return nil, nil, err
}
return tssFirst, tssSecond, nil
}
func getCommonLabelFilters(tss []*timeseries) []metricsql.LabelFilter {
m := make(map[string][]string)
for _, ts := range tss {
for _, tag := range ts.MetricName.Tags {
m[string(tag.Key)] = append(m[string(tag.Key)], string(tag.Value))
}
}
lfs := make([]metricsql.LabelFilter, 0, len(m))
for key, values := range m {
if len(values) != len(tss) {
// Skip the tag, since it doesn't belong to all the time series.
continue
}
values = getUniqueValues(values)
if len(values) > 10000 {
// Skip the filter on the given tag, since it needs to enumerate too many unique values.
// This may slow down the search for matching time series.
continue
}
lf := metricsql.LabelFilter{
Label: key,
}
if len(values) == 1 {
lf.Value = values[0]
} else {
sort.Strings(values)
lf.Value = joinRegexpValues(values)
lf.IsRegexp = true
}
lfs = append(lfs, lf)
}
sort.Slice(lfs, func(i, j int) bool {
return lfs[i].Label < lfs[j].Label
})
return lfs
}
func getUniqueValues(a []string) []string {
m := make(map[string]struct{}, len(a))
results := make([]string, 0, len(a))
for _, s := range a {
if _, ok := m[s]; !ok {
results = append(results, s)
m[s] = struct{}{}
}
}
return results
}
func joinRegexpValues(a []string) string {
var b []byte
for i, s := range a {
sQuoted := regexp.QuoteMeta(s)
b = append(b, sQuoted...)
if i < len(a)-1 {
b = append(b, '|')
}
}
return string(b)
}
func tryGetArgRollupFuncWithMetricExpr(ae *metricsql.AggrFuncExpr) (*metricsql.FuncExpr, newRollupFunc) {
if len(ae.Args) != 1 {
return nil, nil

View file

@ -0,0 +1,50 @@
package promql
import (
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
"github.com/VictoriaMetrics/metricsql"
)
func TestGetCommonLabelFilters(t *testing.T) {
f := func(metrics string, lfsExpected string) {
t.Helper()
var tss []*timeseries
var rows prometheus.Rows
rows.UnmarshalWithErrLogger(metrics, func(errStr string) {
t.Fatalf("unexpected error when parsing %s: %s", metrics, errStr)
})
for _, row := range rows.Rows {
var tags []storage.Tag
for _, tag := range row.Tags {
tags = append(tags, storage.Tag{
Key: []byte(tag.Key),
Value: []byte(tag.Value),
})
}
var ts timeseries
ts.MetricName.Tags = tags
tss = append(tss, &ts)
}
lfs := getCommonLabelFilters(tss)
me := &metricsql.MetricExpr{
LabelFilters: lfs,
}
lfsMarshaled := me.AppendString(nil)
if string(lfsMarshaled) != lfsExpected {
t.Fatalf("unexpected common label filters;\ngot\n%s\nwant\n%s", lfsMarshaled, lfsExpected)
}
}
f(``, `{}`)
f(`m 1`, `{}`)
f(`m{a="b"} 1`, `{a="b"}`)
f(`m{c="d",a="b"} 1`, `{a="b", c="d"}`)
f(`m1{a="foo"} 1
m2{a="bar"} 1`, `{a=~"bar|foo"}`)
f(`m1{a="foo"} 1
m2{b="bar"} 1`, `{}`)
f(`m1{a="foo",b="bar"} 1
m2{b="bar",c="x"} 1`, `{b="bar"}`)
}

View file

@ -98,7 +98,7 @@ func marshalTimeseriesFast(dst []byte, tss []*timeseries, maxSize int, step int6
// Allocate the buffer for the marshaled tss before its' marshaling.
// This should reduce memory fragmentation and memory usage.
dst = bytesutil.ResizeNoCopy(dst, size)
dst = bytesutil.ResizeNoCopyMayOverallocate(dst, size)
dst = marshalFastTimestamps(dst[:0], tss[0].Timestamps)
for _, ts := range tss {
dst = ts.marshalFastNoTimestamps(dst)

View file

@ -1,12 +1,12 @@
{
"files": {
"main.css": "./static/css/main.098d452b.css",
"main.js": "./static/js/main.2fe3eeab.js",
"main.js": "./static/js/main.7750d578.js",
"static/js/27.939f971b.chunk.js": "./static/js/27.939f971b.chunk.js",
"index.html": "./index.html"
},
"entrypoints": [
"static/css/main.098d452b.css",
"static/js/main.2fe3eeab.js"
"static/js/main.7750d578.js"
]
}

View file

@ -1 +1 @@
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><script defer="defer" src="./static/js/main.2fe3eeab.js"></script><link href="./static/css/main.098d452b.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><script defer="defer" src="./static/js/main.7750d578.js"></script><link href="./static/css/main.098d452b.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -10,9 +10,9 @@
"dependencies": {
"@date-io/dayjs": "^2.11.0",
"@emotion/styled": "^11.6.0",
"@mui/icons-material": "^5.3.0",
"@mui/lab": "^5.0.0-alpha.65",
"@mui/material": "^5.3.0",
"@mui/icons-material": "^5.3.1",
"@mui/lab": "^5.0.0-alpha.66",
"@mui/material": "^5.3.1",
"@mui/styles": "^5.3.0",
"@testing-library/jest-dom": "^5.16.1",
"@testing-library/react": "^12.1.2",
@ -21,7 +21,7 @@
"@types/lodash.debounce": "^4.0.6",
"@types/lodash.get": "^4.4.6",
"@types/lodash.throttle": "^4.1.6",
"@types/node": "^17.0.10",
"@types/node": "^17.0.13",
"@types/numeral": "^2.0.2",
"@types/qs": "^6.9.7",
"@types/react": "^17.0.38",
@ -32,7 +32,7 @@
"lodash.get": "^4.4.2",
"lodash.throttle": "^4.1.1",
"numeral": "^2.0.6",
"preact": "^10.6.4",
"preact": "^10.6.5",
"qs": "^6.10.3",
"typescript": "~4.5.5",
"uplot": "^1.6.18",
@ -40,8 +40,8 @@
},
"devDependencies": {
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.16.7",
"@typescript-eslint/eslint-plugin": "^5.10.0",
"@typescript-eslint/parser": "^5.10.0",
"@typescript-eslint/eslint-plugin": "^5.10.1",
"@typescript-eslint/parser": "^5.10.1",
"customize-cra": "^1.0.0",
"eslint-plugin-react": "^7.28.0",
"react-app-rewired": "^2.1.11"
@ -3090,9 +3090,9 @@
}
},
"node_modules/@mui/base": {
"version": "5.0.0-alpha.65",
"resolved": "https://registry.npmjs.org/@mui/base/-/base-5.0.0-alpha.65.tgz",
"integrity": "sha512-6LCTWVoSnEoQuWdxA+Z1qqmlNK4aZj7LvCuLJzq3RVS2PskRuo1O3caVFxAzRu7xrY3zsyL/sUsJI+rdcjhuXw==",
"version": "5.0.0-alpha.66",
"resolved": "https://registry.npmjs.org/@mui/base/-/base-5.0.0-alpha.66.tgz",
"integrity": "sha512-LARfVx0HmGV5YwU2pdIqEApQwz/CtEnYtKkV856hlY0cgi5NQL2htzZ/9ujKz0j3LFUaMYiYuJ2AOwrNtGFGrw==",
"dependencies": {
"@babel/runtime": "^7.16.7",
"@emotion/is-prop-valid": "^1.1.1",
@ -3121,9 +3121,9 @@
}
},
"node_modules/@mui/icons-material": {
"version": "5.3.0",
"resolved": "https://registry.npmjs.org/@mui/icons-material/-/icons-material-5.3.0.tgz",
"integrity": "sha512-1+dN2N8BgozmdMeHXQLrvSr1G/7Xc0NmAMLSvu8XA9RxhcTos/p66vrvpPASw2qvt14dkfeqyHwvbLRgAU9slw==",
"version": "5.3.1",
"resolved": "https://registry.npmjs.org/@mui/icons-material/-/icons-material-5.3.1.tgz",
"integrity": "sha512-8zBWCaE8DHjIGZhGgMod92p6Rm38EhXrS+cZtaV0+jOTMeWh7z+mvswXzb/rVKc0ZYqw6mQYBcn2uEs2yclI9w==",
"dependencies": {
"@babel/runtime": "^7.16.7"
},
@ -3146,16 +3146,16 @@
}
},
"node_modules/@mui/lab": {
"version": "5.0.0-alpha.65",
"resolved": "https://registry.npmjs.org/@mui/lab/-/lab-5.0.0-alpha.65.tgz",
"integrity": "sha512-YiZvUGK/GbwgR4WU/JgdYrjF9AC9C4qn+mM3ShGsX0xPzTkwFG28uyKoNy2PN+/r10aQxdkkUsVQk3BCLC8/Sg==",
"version": "5.0.0-alpha.66",
"resolved": "https://registry.npmjs.org/@mui/lab/-/lab-5.0.0-alpha.66.tgz",
"integrity": "sha512-C/WB5vo+arkdbx3r1gRRKh4BcZJ763+ePcPHYj3pShDA1vG0jecp1RWO8MJ07SnsdfDHiKWtlUDJh+DpieR0Og==",
"dependencies": {
"@babel/runtime": "^7.16.7",
"@date-io/date-fns": "^2.11.0",
"@date-io/dayjs": "^2.11.0",
"@date-io/luxon": "^2.11.1",
"@date-io/moment": "^2.11.0",
"@mui/base": "5.0.0-alpha.65",
"@mui/base": "5.0.0-alpha.66",
"@mui/system": "^5.3.0",
"@mui/utils": "^5.3.0",
"clsx": "^1.1.1",
@ -3200,12 +3200,12 @@
}
},
"node_modules/@mui/material": {
"version": "5.3.0",
"resolved": "https://registry.npmjs.org/@mui/material/-/material-5.3.0.tgz",
"integrity": "sha512-zLdlweBHrKpOwHFoMXA6FFgKQOONuX4sQhODcj9dk5uPeScuMDE26svcsrPam5Y1PKXZX78YEGHB5Jr5PHGpTA==",
"version": "5.3.1",
"resolved": "https://registry.npmjs.org/@mui/material/-/material-5.3.1.tgz",
"integrity": "sha512-XWPsJ2jet2zfnKojth5d2IaHIJPpJnHq1ACCSlNf898BjYh1j50gRWsPpIHiptQ0oc0pdWmMcmrXbdANKR1ybw==",
"dependencies": {
"@babel/runtime": "^7.16.7",
"@mui/base": "5.0.0-alpha.65",
"@mui/base": "5.0.0-alpha.66",
"@mui/system": "^5.3.0",
"@mui/types": "^7.1.0",
"@mui/utils": "^5.3.0",
@ -4385,9 +4385,9 @@
"peer": true
},
"node_modules/@types/node": {
"version": "17.0.10",
"resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.10.tgz",
"integrity": "sha512-S/3xB4KzyFxYGCppyDt68yzBU9ysL88lSdIah4D6cptdcltc4NCPCAMc0+PCpg/lLIyC7IPvj2Z52OJWeIUkog=="
"version": "17.0.13",
"resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.13.tgz",
"integrity": "sha512-Y86MAxASe25hNzlDbsviXl8jQHb0RDvKt4c40ZJQ1Don0AAL0STLZSs4N+6gLEO55pedy7r2cLwS+ZDxPm/2Bw=="
},
"node_modules/@types/numeral": {
"version": "2.0.2",
@ -4575,14 +4575,14 @@
"peer": true
},
"node_modules/@typescript-eslint/eslint-plugin": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.10.0.tgz",
"integrity": "sha512-XXVKnMsq2fuu9K2KsIxPUGqb6xAImz8MEChClbXmE3VbveFtBUU5bzM6IPVWqzyADIgdkS2Ws/6Xo7W2TeZWjQ==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.10.1.tgz",
"integrity": "sha512-xN3CYqFlyE/qOcy978/L0xLR2HlcAGIyIK5sMOasxaaAPfQRj/MmMV6OC3I7NZO84oEUdWCOju34Z9W8E0pFDQ==",
"dev": true,
"dependencies": {
"@typescript-eslint/scope-manager": "5.10.0",
"@typescript-eslint/type-utils": "5.10.0",
"@typescript-eslint/utils": "5.10.0",
"@typescript-eslint/scope-manager": "5.10.1",
"@typescript-eslint/type-utils": "5.10.1",
"@typescript-eslint/utils": "5.10.1",
"debug": "^4.3.2",
"functional-red-black-tree": "^1.0.1",
"ignore": "^5.1.8",
@ -4608,13 +4608,13 @@
}
},
"node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/scope-manager": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.10.0.tgz",
"integrity": "sha512-tgNgUgb4MhqK6DoKn3RBhyZ9aJga7EQrw+2/OiDk5hKf3pTVZWyqBi7ukP+Z0iEEDMF5FDa64LqODzlfE4O/Dg==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.10.1.tgz",
"integrity": "sha512-Lyvi559Gvpn94k7+ElXNMEnXu/iundV5uFmCUNnftbFrUbAJ1WBoaGgkbOBm07jVZa682oaBU37ao/NGGX4ZDg==",
"dev": true,
"dependencies": {
"@typescript-eslint/types": "5.10.0",
"@typescript-eslint/visitor-keys": "5.10.0"
"@typescript-eslint/types": "5.10.1",
"@typescript-eslint/visitor-keys": "5.10.1"
},
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
@ -4625,9 +4625,9 @@
}
},
"node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/types": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.10.0.tgz",
"integrity": "sha512-wUljCgkqHsMZbw60IbOqT/puLfyqqD5PquGiBo1u1IS3PLxdi3RDGlyf032IJyh+eQoGhz9kzhtZa+VC4eWTlQ==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.10.1.tgz",
"integrity": "sha512-ZvxQ2QMy49bIIBpTqFiOenucqUyjTQ0WNLhBM6X1fh1NNlYAC6Kxsx8bRTY3jdYsYg44a0Z/uEgQkohbR0H87Q==",
"dev": true,
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
@ -4638,12 +4638,12 @@
}
},
"node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/visitor-keys": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.10.0.tgz",
"integrity": "sha512-GMxj0K1uyrFLPKASLmZzCuSddmjZVbVj3Ouy5QVuIGKZopxvOr24JsS7gruz6C3GExE01mublZ3mIBOaon9zuQ==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.10.1.tgz",
"integrity": "sha512-NjQ0Xinhy9IL979tpoTRuLKxMc0zJC7QVSdeerXs2/QvOy2yRkzX5dRb10X5woNUdJgU8G3nYRDlI33sq1K4YQ==",
"dev": true,
"dependencies": {
"@typescript-eslint/types": "5.10.0",
"@typescript-eslint/types": "5.10.1",
"eslint-visitor-keys": "^3.0.0"
},
"engines": {
@ -4695,14 +4695,14 @@
}
},
"node_modules/@typescript-eslint/parser": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.10.0.tgz",
"integrity": "sha512-pJB2CCeHWtwOAeIxv8CHVGJhI5FNyJAIpx5Pt72YkK3QfEzt6qAlXZuyaBmyfOdM62qU0rbxJzNToPTVeJGrQw==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.10.1.tgz",
"integrity": "sha512-GReo3tjNBwR5RnRO0K2wDIDN31cM3MmDtgyQ85oAxAmC5K3j/g85IjP+cDfcqDsDDBf1HNKQAD0WqOYL8jXqUA==",
"dev": true,
"dependencies": {
"@typescript-eslint/scope-manager": "5.10.0",
"@typescript-eslint/types": "5.10.0",
"@typescript-eslint/typescript-estree": "5.10.0",
"@typescript-eslint/scope-manager": "5.10.1",
"@typescript-eslint/types": "5.10.1",
"@typescript-eslint/typescript-estree": "5.10.1",
"debug": "^4.3.2"
},
"engines": {
@ -4722,13 +4722,13 @@
}
},
"node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/scope-manager": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.10.0.tgz",
"integrity": "sha512-tgNgUgb4MhqK6DoKn3RBhyZ9aJga7EQrw+2/OiDk5hKf3pTVZWyqBi7ukP+Z0iEEDMF5FDa64LqODzlfE4O/Dg==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.10.1.tgz",
"integrity": "sha512-Lyvi559Gvpn94k7+ElXNMEnXu/iundV5uFmCUNnftbFrUbAJ1WBoaGgkbOBm07jVZa682oaBU37ao/NGGX4ZDg==",
"dev": true,
"dependencies": {
"@typescript-eslint/types": "5.10.0",
"@typescript-eslint/visitor-keys": "5.10.0"
"@typescript-eslint/types": "5.10.1",
"@typescript-eslint/visitor-keys": "5.10.1"
},
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
@ -4739,9 +4739,9 @@
}
},
"node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/types": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.10.0.tgz",
"integrity": "sha512-wUljCgkqHsMZbw60IbOqT/puLfyqqD5PquGiBo1u1IS3PLxdi3RDGlyf032IJyh+eQoGhz9kzhtZa+VC4eWTlQ==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.10.1.tgz",
"integrity": "sha512-ZvxQ2QMy49bIIBpTqFiOenucqUyjTQ0WNLhBM6X1fh1NNlYAC6Kxsx8bRTY3jdYsYg44a0Z/uEgQkohbR0H87Q==",
"dev": true,
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
@ -4752,13 +4752,13 @@
}
},
"node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/typescript-estree": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.10.0.tgz",
"integrity": "sha512-x+7e5IqfwLwsxTdliHRtlIYkgdtYXzE0CkFeV6ytAqq431ZyxCFzNMNR5sr3WOlIG/ihVZr9K/y71VHTF/DUQA==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.10.1.tgz",
"integrity": "sha512-PwIGnH7jIueXv4opcwEbVGDATjGPO1dx9RkUl5LlHDSe+FXxPwFL5W/qYd5/NHr7f6lo/vvTrAzd0KlQtRusJQ==",
"dev": true,
"dependencies": {
"@typescript-eslint/types": "5.10.0",
"@typescript-eslint/visitor-keys": "5.10.0",
"@typescript-eslint/types": "5.10.1",
"@typescript-eslint/visitor-keys": "5.10.1",
"debug": "^4.3.2",
"globby": "^11.0.4",
"is-glob": "^4.0.3",
@ -4779,12 +4779,12 @@
}
},
"node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/visitor-keys": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.10.0.tgz",
"integrity": "sha512-GMxj0K1uyrFLPKASLmZzCuSddmjZVbVj3Ouy5QVuIGKZopxvOr24JsS7gruz6C3GExE01mublZ3mIBOaon9zuQ==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.10.1.tgz",
"integrity": "sha512-NjQ0Xinhy9IL979tpoTRuLKxMc0zJC7QVSdeerXs2/QvOy2yRkzX5dRb10X5woNUdJgU8G3nYRDlI33sq1K4YQ==",
"dev": true,
"dependencies": {
"@typescript-eslint/types": "5.10.0",
"@typescript-eslint/types": "5.10.1",
"eslint-visitor-keys": "^3.0.0"
},
"engines": {
@ -4829,12 +4829,12 @@
}
},
"node_modules/@typescript-eslint/type-utils": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-5.10.0.tgz",
"integrity": "sha512-TzlyTmufJO5V886N+hTJBGIfnjQDQ32rJYxPaeiyWKdjsv2Ld5l8cbS7pxim4DeNs62fKzRSt8Q14Evs4JnZyQ==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-5.10.1.tgz",
"integrity": "sha512-AfVJkV8uck/UIoDqhu+ptEdBoQATON9GXnhOpPLzkQRJcSChkvD//qsz9JVffl2goxX+ybs5klvacE9vmrQyCw==",
"dev": true,
"dependencies": {
"@typescript-eslint/utils": "5.10.0",
"@typescript-eslint/utils": "5.10.1",
"debug": "^4.3.2",
"tsutils": "^3.21.0"
},
@ -4913,15 +4913,15 @@
}
},
"node_modules/@typescript-eslint/utils": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-5.10.0.tgz",
"integrity": "sha512-IGYwlt1CVcFoE2ueW4/ioEwybR60RAdGeiJX/iDAw0t5w0wK3S7QncDwpmsM70nKgGTuVchEWB8lwZwHqPAWRg==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-5.10.1.tgz",
"integrity": "sha512-RRmlITiUbLuTRtn/gcPRi4202niF+q7ylFLCKu4c+O/PcpRvZ/nAUwQ2G00bZgpWkhrNLNnvhZLbDn8Ml0qsQw==",
"dev": true,
"dependencies": {
"@types/json-schema": "^7.0.9",
"@typescript-eslint/scope-manager": "5.10.0",
"@typescript-eslint/types": "5.10.0",
"@typescript-eslint/typescript-estree": "5.10.0",
"@typescript-eslint/scope-manager": "5.10.1",
"@typescript-eslint/types": "5.10.1",
"@typescript-eslint/typescript-estree": "5.10.1",
"eslint-scope": "^5.1.1",
"eslint-utils": "^3.0.0"
},
@ -4937,13 +4937,13 @@
}
},
"node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/scope-manager": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.10.0.tgz",
"integrity": "sha512-tgNgUgb4MhqK6DoKn3RBhyZ9aJga7EQrw+2/OiDk5hKf3pTVZWyqBi7ukP+Z0iEEDMF5FDa64LqODzlfE4O/Dg==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.10.1.tgz",
"integrity": "sha512-Lyvi559Gvpn94k7+ElXNMEnXu/iundV5uFmCUNnftbFrUbAJ1WBoaGgkbOBm07jVZa682oaBU37ao/NGGX4ZDg==",
"dev": true,
"dependencies": {
"@typescript-eslint/types": "5.10.0",
"@typescript-eslint/visitor-keys": "5.10.0"
"@typescript-eslint/types": "5.10.1",
"@typescript-eslint/visitor-keys": "5.10.1"
},
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
@ -4954,9 +4954,9 @@
}
},
"node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/types": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.10.0.tgz",
"integrity": "sha512-wUljCgkqHsMZbw60IbOqT/puLfyqqD5PquGiBo1u1IS3PLxdi3RDGlyf032IJyh+eQoGhz9kzhtZa+VC4eWTlQ==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.10.1.tgz",
"integrity": "sha512-ZvxQ2QMy49bIIBpTqFiOenucqUyjTQ0WNLhBM6X1fh1NNlYAC6Kxsx8bRTY3jdYsYg44a0Z/uEgQkohbR0H87Q==",
"dev": true,
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
@ -4967,13 +4967,13 @@
}
},
"node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/typescript-estree": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.10.0.tgz",
"integrity": "sha512-x+7e5IqfwLwsxTdliHRtlIYkgdtYXzE0CkFeV6ytAqq431ZyxCFzNMNR5sr3WOlIG/ihVZr9K/y71VHTF/DUQA==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.10.1.tgz",
"integrity": "sha512-PwIGnH7jIueXv4opcwEbVGDATjGPO1dx9RkUl5LlHDSe+FXxPwFL5W/qYd5/NHr7f6lo/vvTrAzd0KlQtRusJQ==",
"dev": true,
"dependencies": {
"@typescript-eslint/types": "5.10.0",
"@typescript-eslint/visitor-keys": "5.10.0",
"@typescript-eslint/types": "5.10.1",
"@typescript-eslint/visitor-keys": "5.10.1",
"debug": "^4.3.2",
"globby": "^11.0.4",
"is-glob": "^4.0.3",
@ -4994,12 +4994,12 @@
}
},
"node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/visitor-keys": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.10.0.tgz",
"integrity": "sha512-GMxj0K1uyrFLPKASLmZzCuSddmjZVbVj3Ouy5QVuIGKZopxvOr24JsS7gruz6C3GExE01mublZ3mIBOaon9zuQ==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.10.1.tgz",
"integrity": "sha512-NjQ0Xinhy9IL979tpoTRuLKxMc0zJC7QVSdeerXs2/QvOy2yRkzX5dRb10X5woNUdJgU8G3nYRDlI33sq1K4YQ==",
"dev": true,
"dependencies": {
"@typescript-eslint/types": "5.10.0",
"@typescript-eslint/types": "5.10.1",
"eslint-visitor-keys": "^3.0.0"
},
"engines": {
@ -15589,9 +15589,9 @@
"peer": true
},
"node_modules/preact": {
"version": "10.6.4",
"resolved": "https://registry.npmjs.org/preact/-/preact-10.6.4.tgz",
"integrity": "sha512-WyosM7pxGcndU8hY0OQlLd54tOU+qmG45QXj2dAYrL11HoyU/EzOSTlpJsirbBr1QW7lICxSsVJJmcmUglovHQ==",
"version": "10.6.5",
"resolved": "https://registry.npmjs.org/preact/-/preact-10.6.5.tgz",
"integrity": "sha512-i+LXM6JiVjQXSt2jG2vZZFapGpCuk1fl8o6ii3G84MA3xgj686FKjs4JFDkmUVhtxyq21+4ay74zqPykz9hU6w==",
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/preact"
@ -21694,9 +21694,9 @@
}
},
"@mui/base": {
"version": "5.0.0-alpha.65",
"resolved": "https://registry.npmjs.org/@mui/base/-/base-5.0.0-alpha.65.tgz",
"integrity": "sha512-6LCTWVoSnEoQuWdxA+Z1qqmlNK4aZj7LvCuLJzq3RVS2PskRuo1O3caVFxAzRu7xrY3zsyL/sUsJI+rdcjhuXw==",
"version": "5.0.0-alpha.66",
"resolved": "https://registry.npmjs.org/@mui/base/-/base-5.0.0-alpha.66.tgz",
"integrity": "sha512-LARfVx0HmGV5YwU2pdIqEApQwz/CtEnYtKkV856hlY0cgi5NQL2htzZ/9ujKz0j3LFUaMYiYuJ2AOwrNtGFGrw==",
"requires": {
"@babel/runtime": "^7.16.7",
"@emotion/is-prop-valid": "^1.1.1",
@ -21708,24 +21708,24 @@
}
},
"@mui/icons-material": {
"version": "5.3.0",
"resolved": "https://registry.npmjs.org/@mui/icons-material/-/icons-material-5.3.0.tgz",
"integrity": "sha512-1+dN2N8BgozmdMeHXQLrvSr1G/7Xc0NmAMLSvu8XA9RxhcTos/p66vrvpPASw2qvt14dkfeqyHwvbLRgAU9slw==",
"version": "5.3.1",
"resolved": "https://registry.npmjs.org/@mui/icons-material/-/icons-material-5.3.1.tgz",
"integrity": "sha512-8zBWCaE8DHjIGZhGgMod92p6Rm38EhXrS+cZtaV0+jOTMeWh7z+mvswXzb/rVKc0ZYqw6mQYBcn2uEs2yclI9w==",
"requires": {
"@babel/runtime": "^7.16.7"
}
},
"@mui/lab": {
"version": "5.0.0-alpha.65",
"resolved": "https://registry.npmjs.org/@mui/lab/-/lab-5.0.0-alpha.65.tgz",
"integrity": "sha512-YiZvUGK/GbwgR4WU/JgdYrjF9AC9C4qn+mM3ShGsX0xPzTkwFG28uyKoNy2PN+/r10aQxdkkUsVQk3BCLC8/Sg==",
"version": "5.0.0-alpha.66",
"resolved": "https://registry.npmjs.org/@mui/lab/-/lab-5.0.0-alpha.66.tgz",
"integrity": "sha512-C/WB5vo+arkdbx3r1gRRKh4BcZJ763+ePcPHYj3pShDA1vG0jecp1RWO8MJ07SnsdfDHiKWtlUDJh+DpieR0Og==",
"requires": {
"@babel/runtime": "^7.16.7",
"@date-io/date-fns": "^2.11.0",
"@date-io/dayjs": "^2.11.0",
"@date-io/luxon": "^2.11.1",
"@date-io/moment": "^2.11.0",
"@mui/base": "5.0.0-alpha.65",
"@mui/base": "5.0.0-alpha.66",
"@mui/system": "^5.3.0",
"@mui/utils": "^5.3.0",
"clsx": "^1.1.1",
@ -21736,12 +21736,12 @@
}
},
"@mui/material": {
"version": "5.3.0",
"resolved": "https://registry.npmjs.org/@mui/material/-/material-5.3.0.tgz",
"integrity": "sha512-zLdlweBHrKpOwHFoMXA6FFgKQOONuX4sQhODcj9dk5uPeScuMDE26svcsrPam5Y1PKXZX78YEGHB5Jr5PHGpTA==",
"version": "5.3.1",
"resolved": "https://registry.npmjs.org/@mui/material/-/material-5.3.1.tgz",
"integrity": "sha512-XWPsJ2jet2zfnKojth5d2IaHIJPpJnHq1ACCSlNf898BjYh1j50gRWsPpIHiptQ0oc0pdWmMcmrXbdANKR1ybw==",
"requires": {
"@babel/runtime": "^7.16.7",
"@mui/base": "5.0.0-alpha.65",
"@mui/base": "5.0.0-alpha.66",
"@mui/system": "^5.3.0",
"@mui/types": "^7.1.0",
"@mui/utils": "^5.3.0",
@ -22567,9 +22567,9 @@
"peer": true
},
"@types/node": {
"version": "17.0.10",
"resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.10.tgz",
"integrity": "sha512-S/3xB4KzyFxYGCppyDt68yzBU9ysL88lSdIah4D6cptdcltc4NCPCAMc0+PCpg/lLIyC7IPvj2Z52OJWeIUkog=="
"version": "17.0.13",
"resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.13.tgz",
"integrity": "sha512-Y86MAxASe25hNzlDbsviXl8jQHb0RDvKt4c40ZJQ1Don0AAL0STLZSs4N+6gLEO55pedy7r2cLwS+ZDxPm/2Bw=="
},
"@types/numeral": {
"version": "2.0.2",
@ -22757,14 +22757,14 @@
"peer": true
},
"@typescript-eslint/eslint-plugin": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.10.0.tgz",
"integrity": "sha512-XXVKnMsq2fuu9K2KsIxPUGqb6xAImz8MEChClbXmE3VbveFtBUU5bzM6IPVWqzyADIgdkS2Ws/6Xo7W2TeZWjQ==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.10.1.tgz",
"integrity": "sha512-xN3CYqFlyE/qOcy978/L0xLR2HlcAGIyIK5sMOasxaaAPfQRj/MmMV6OC3I7NZO84oEUdWCOju34Z9W8E0pFDQ==",
"dev": true,
"requires": {
"@typescript-eslint/scope-manager": "5.10.0",
"@typescript-eslint/type-utils": "5.10.0",
"@typescript-eslint/utils": "5.10.0",
"@typescript-eslint/scope-manager": "5.10.1",
"@typescript-eslint/type-utils": "5.10.1",
"@typescript-eslint/utils": "5.10.1",
"debug": "^4.3.2",
"functional-red-black-tree": "^1.0.1",
"ignore": "^5.1.8",
@ -22774,28 +22774,28 @@
},
"dependencies": {
"@typescript-eslint/scope-manager": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.10.0.tgz",
"integrity": "sha512-tgNgUgb4MhqK6DoKn3RBhyZ9aJga7EQrw+2/OiDk5hKf3pTVZWyqBi7ukP+Z0iEEDMF5FDa64LqODzlfE4O/Dg==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.10.1.tgz",
"integrity": "sha512-Lyvi559Gvpn94k7+ElXNMEnXu/iundV5uFmCUNnftbFrUbAJ1WBoaGgkbOBm07jVZa682oaBU37ao/NGGX4ZDg==",
"dev": true,
"requires": {
"@typescript-eslint/types": "5.10.0",
"@typescript-eslint/visitor-keys": "5.10.0"
"@typescript-eslint/types": "5.10.1",
"@typescript-eslint/visitor-keys": "5.10.1"
}
},
"@typescript-eslint/types": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.10.0.tgz",
"integrity": "sha512-wUljCgkqHsMZbw60IbOqT/puLfyqqD5PquGiBo1u1IS3PLxdi3RDGlyf032IJyh+eQoGhz9kzhtZa+VC4eWTlQ==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.10.1.tgz",
"integrity": "sha512-ZvxQ2QMy49bIIBpTqFiOenucqUyjTQ0WNLhBM6X1fh1NNlYAC6Kxsx8bRTY3jdYsYg44a0Z/uEgQkohbR0H87Q==",
"dev": true
},
"@typescript-eslint/visitor-keys": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.10.0.tgz",
"integrity": "sha512-GMxj0K1uyrFLPKASLmZzCuSddmjZVbVj3Ouy5QVuIGKZopxvOr24JsS7gruz6C3GExE01mublZ3mIBOaon9zuQ==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.10.1.tgz",
"integrity": "sha512-NjQ0Xinhy9IL979tpoTRuLKxMc0zJC7QVSdeerXs2/QvOy2yRkzX5dRb10X5woNUdJgU8G3nYRDlI33sq1K4YQ==",
"dev": true,
"requires": {
"@typescript-eslint/types": "5.10.0",
"@typescript-eslint/types": "5.10.1",
"eslint-visitor-keys": "^3.0.0"
}
},
@ -22826,41 +22826,41 @@
}
},
"@typescript-eslint/parser": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.10.0.tgz",
"integrity": "sha512-pJB2CCeHWtwOAeIxv8CHVGJhI5FNyJAIpx5Pt72YkK3QfEzt6qAlXZuyaBmyfOdM62qU0rbxJzNToPTVeJGrQw==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.10.1.tgz",
"integrity": "sha512-GReo3tjNBwR5RnRO0K2wDIDN31cM3MmDtgyQ85oAxAmC5K3j/g85IjP+cDfcqDsDDBf1HNKQAD0WqOYL8jXqUA==",
"dev": true,
"requires": {
"@typescript-eslint/scope-manager": "5.10.0",
"@typescript-eslint/types": "5.10.0",
"@typescript-eslint/typescript-estree": "5.10.0",
"@typescript-eslint/scope-manager": "5.10.1",
"@typescript-eslint/types": "5.10.1",
"@typescript-eslint/typescript-estree": "5.10.1",
"debug": "^4.3.2"
},
"dependencies": {
"@typescript-eslint/scope-manager": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.10.0.tgz",
"integrity": "sha512-tgNgUgb4MhqK6DoKn3RBhyZ9aJga7EQrw+2/OiDk5hKf3pTVZWyqBi7ukP+Z0iEEDMF5FDa64LqODzlfE4O/Dg==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.10.1.tgz",
"integrity": "sha512-Lyvi559Gvpn94k7+ElXNMEnXu/iundV5uFmCUNnftbFrUbAJ1WBoaGgkbOBm07jVZa682oaBU37ao/NGGX4ZDg==",
"dev": true,
"requires": {
"@typescript-eslint/types": "5.10.0",
"@typescript-eslint/visitor-keys": "5.10.0"
"@typescript-eslint/types": "5.10.1",
"@typescript-eslint/visitor-keys": "5.10.1"
}
},
"@typescript-eslint/types": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.10.0.tgz",
"integrity": "sha512-wUljCgkqHsMZbw60IbOqT/puLfyqqD5PquGiBo1u1IS3PLxdi3RDGlyf032IJyh+eQoGhz9kzhtZa+VC4eWTlQ==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.10.1.tgz",
"integrity": "sha512-ZvxQ2QMy49bIIBpTqFiOenucqUyjTQ0WNLhBM6X1fh1NNlYAC6Kxsx8bRTY3jdYsYg44a0Z/uEgQkohbR0H87Q==",
"dev": true
},
"@typescript-eslint/typescript-estree": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.10.0.tgz",
"integrity": "sha512-x+7e5IqfwLwsxTdliHRtlIYkgdtYXzE0CkFeV6ytAqq431ZyxCFzNMNR5sr3WOlIG/ihVZr9K/y71VHTF/DUQA==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.10.1.tgz",
"integrity": "sha512-PwIGnH7jIueXv4opcwEbVGDATjGPO1dx9RkUl5LlHDSe+FXxPwFL5W/qYd5/NHr7f6lo/vvTrAzd0KlQtRusJQ==",
"dev": true,
"requires": {
"@typescript-eslint/types": "5.10.0",
"@typescript-eslint/visitor-keys": "5.10.0",
"@typescript-eslint/types": "5.10.1",
"@typescript-eslint/visitor-keys": "5.10.1",
"debug": "^4.3.2",
"globby": "^11.0.4",
"is-glob": "^4.0.3",
@ -22869,12 +22869,12 @@
}
},
"@typescript-eslint/visitor-keys": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.10.0.tgz",
"integrity": "sha512-GMxj0K1uyrFLPKASLmZzCuSddmjZVbVj3Ouy5QVuIGKZopxvOr24JsS7gruz6C3GExE01mublZ3mIBOaon9zuQ==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.10.1.tgz",
"integrity": "sha512-NjQ0Xinhy9IL979tpoTRuLKxMc0zJC7QVSdeerXs2/QvOy2yRkzX5dRb10X5woNUdJgU8G3nYRDlI33sq1K4YQ==",
"dev": true,
"requires": {
"@typescript-eslint/types": "5.10.0",
"@typescript-eslint/types": "5.10.1",
"eslint-visitor-keys": "^3.0.0"
}
},
@ -22901,12 +22901,12 @@
}
},
"@typescript-eslint/type-utils": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-5.10.0.tgz",
"integrity": "sha512-TzlyTmufJO5V886N+hTJBGIfnjQDQ32rJYxPaeiyWKdjsv2Ld5l8cbS7pxim4DeNs62fKzRSt8Q14Evs4JnZyQ==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-5.10.1.tgz",
"integrity": "sha512-AfVJkV8uck/UIoDqhu+ptEdBoQATON9GXnhOpPLzkQRJcSChkvD//qsz9JVffl2goxX+ybs5klvacE9vmrQyCw==",
"dev": true,
"requires": {
"@typescript-eslint/utils": "5.10.0",
"@typescript-eslint/utils": "5.10.1",
"debug": "^4.3.2",
"tsutils": "^3.21.0"
}
@ -22947,43 +22947,43 @@
}
},
"@typescript-eslint/utils": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-5.10.0.tgz",
"integrity": "sha512-IGYwlt1CVcFoE2ueW4/ioEwybR60RAdGeiJX/iDAw0t5w0wK3S7QncDwpmsM70nKgGTuVchEWB8lwZwHqPAWRg==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-5.10.1.tgz",
"integrity": "sha512-RRmlITiUbLuTRtn/gcPRi4202niF+q7ylFLCKu4c+O/PcpRvZ/nAUwQ2G00bZgpWkhrNLNnvhZLbDn8Ml0qsQw==",
"dev": true,
"requires": {
"@types/json-schema": "^7.0.9",
"@typescript-eslint/scope-manager": "5.10.0",
"@typescript-eslint/types": "5.10.0",
"@typescript-eslint/typescript-estree": "5.10.0",
"@typescript-eslint/scope-manager": "5.10.1",
"@typescript-eslint/types": "5.10.1",
"@typescript-eslint/typescript-estree": "5.10.1",
"eslint-scope": "^5.1.1",
"eslint-utils": "^3.0.0"
},
"dependencies": {
"@typescript-eslint/scope-manager": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.10.0.tgz",
"integrity": "sha512-tgNgUgb4MhqK6DoKn3RBhyZ9aJga7EQrw+2/OiDk5hKf3pTVZWyqBi7ukP+Z0iEEDMF5FDa64LqODzlfE4O/Dg==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.10.1.tgz",
"integrity": "sha512-Lyvi559Gvpn94k7+ElXNMEnXu/iundV5uFmCUNnftbFrUbAJ1WBoaGgkbOBm07jVZa682oaBU37ao/NGGX4ZDg==",
"dev": true,
"requires": {
"@typescript-eslint/types": "5.10.0",
"@typescript-eslint/visitor-keys": "5.10.0"
"@typescript-eslint/types": "5.10.1",
"@typescript-eslint/visitor-keys": "5.10.1"
}
},
"@typescript-eslint/types": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.10.0.tgz",
"integrity": "sha512-wUljCgkqHsMZbw60IbOqT/puLfyqqD5PquGiBo1u1IS3PLxdi3RDGlyf032IJyh+eQoGhz9kzhtZa+VC4eWTlQ==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.10.1.tgz",
"integrity": "sha512-ZvxQ2QMy49bIIBpTqFiOenucqUyjTQ0WNLhBM6X1fh1NNlYAC6Kxsx8bRTY3jdYsYg44a0Z/uEgQkohbR0H87Q==",
"dev": true
},
"@typescript-eslint/typescript-estree": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.10.0.tgz",
"integrity": "sha512-x+7e5IqfwLwsxTdliHRtlIYkgdtYXzE0CkFeV6ytAqq431ZyxCFzNMNR5sr3WOlIG/ihVZr9K/y71VHTF/DUQA==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.10.1.tgz",
"integrity": "sha512-PwIGnH7jIueXv4opcwEbVGDATjGPO1dx9RkUl5LlHDSe+FXxPwFL5W/qYd5/NHr7f6lo/vvTrAzd0KlQtRusJQ==",
"dev": true,
"requires": {
"@typescript-eslint/types": "5.10.0",
"@typescript-eslint/visitor-keys": "5.10.0",
"@typescript-eslint/types": "5.10.1",
"@typescript-eslint/visitor-keys": "5.10.1",
"debug": "^4.3.2",
"globby": "^11.0.4",
"is-glob": "^4.0.3",
@ -22992,12 +22992,12 @@
}
},
"@typescript-eslint/visitor-keys": {
"version": "5.10.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.10.0.tgz",
"integrity": "sha512-GMxj0K1uyrFLPKASLmZzCuSddmjZVbVj3Ouy5QVuIGKZopxvOr24JsS7gruz6C3GExE01mublZ3mIBOaon9zuQ==",
"version": "5.10.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.10.1.tgz",
"integrity": "sha512-NjQ0Xinhy9IL979tpoTRuLKxMc0zJC7QVSdeerXs2/QvOy2yRkzX5dRb10X5woNUdJgU8G3nYRDlI33sq1K4YQ==",
"dev": true,
"requires": {
"@typescript-eslint/types": "5.10.0",
"@typescript-eslint/types": "5.10.1",
"eslint-visitor-keys": "^3.0.0"
}
},
@ -31024,9 +31024,9 @@
"peer": true
},
"preact": {
"version": "10.6.4",
"resolved": "https://registry.npmjs.org/preact/-/preact-10.6.4.tgz",
"integrity": "sha512-WyosM7pxGcndU8hY0OQlLd54tOU+qmG45QXj2dAYrL11HoyU/EzOSTlpJsirbBr1QW7lICxSsVJJmcmUglovHQ=="
"version": "10.6.5",
"resolved": "https://registry.npmjs.org/preact/-/preact-10.6.5.tgz",
"integrity": "sha512-i+LXM6JiVjQXSt2jG2vZZFapGpCuk1fl8o6ii3G84MA3xgj686FKjs4JFDkmUVhtxyq21+4ay74zqPykz9hU6w=="
},
"prelude-ls": {
"version": "1.2.1",

View file

@ -6,9 +6,9 @@
"dependencies": {
"@date-io/dayjs": "^2.11.0",
"@emotion/styled": "^11.6.0",
"@mui/icons-material": "^5.3.0",
"@mui/lab": "^5.0.0-alpha.65",
"@mui/material": "^5.3.0",
"@mui/icons-material": "^5.3.1",
"@mui/lab": "^5.0.0-alpha.66",
"@mui/material": "^5.3.1",
"@mui/styles": "^5.3.0",
"@testing-library/jest-dom": "^5.16.1",
"@testing-library/react": "^12.1.2",
@ -17,7 +17,7 @@
"@types/lodash.debounce": "^4.0.6",
"@types/lodash.get": "^4.4.6",
"@types/lodash.throttle": "^4.1.6",
"@types/node": "^17.0.10",
"@types/node": "^17.0.13",
"@types/numeral": "^2.0.2",
"@types/qs": "^6.9.7",
"@types/react": "^17.0.38",
@ -28,7 +28,7 @@
"lodash.get": "^4.4.2",
"lodash.throttle": "^4.1.1",
"numeral": "^2.0.6",
"preact": "^10.6.4",
"preact": "^10.6.5",
"qs": "^6.10.3",
"typescript": "~4.5.5",
"uplot": "^1.6.18",
@ -62,8 +62,8 @@
},
"devDependencies": {
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.16.7",
"@typescript-eslint/eslint-plugin": "^5.10.0",
"@typescript-eslint/parser": "^5.10.0",
"@typescript-eslint/eslint-plugin": "^5.10.1",
"@typescript-eslint/parser": "^5.10.1",
"customize-cra": "^1.0.0",
"eslint-plugin-react": "^7.28.0",
"react-app-rewired": "^2.1.11"

View file

@ -1,4 +1,4 @@
import React, {FC, useMemo} from "preact/compat";
import React, {FC, useMemo, useState} from "preact/compat";
import {InstantMetricResult} from "../../../api/types";
import {InstantDataSeries} from "../../../types";
import Table from "@mui/material/Table";
@ -7,6 +7,7 @@ import TableCell from "@mui/material/TableCell";
import TableContainer from "@mui/material/TableContainer";
import TableHead from "@mui/material/TableHead";
import TableRow from "@mui/material/TableRow";
import TableSortLabel from "@mui/material/TableSortLabel";
import makeStyles from "@mui/styles/makeStyles";
import {useSortedCategories} from "../../../hooks/useSortedCategories";
@ -26,12 +27,30 @@ const TableView: FC<GraphViewProps> = ({data}) => {
const sortedColumns = useSortedCategories(data);
const [orderBy, setOrderBy] = useState("");
const [orderDir, setOrderDir] = useState<"asc" | "desc">("asc");
const rows: InstantDataSeries[] = useMemo(() => {
return data?.map(d => ({
const rows = data?.map(d => ({
metadata: sortedColumns.map(c => d.metric[c.key] || "-"),
value: d.value ? d.value[1] : "-"
}));
}, [sortedColumns, data]);
const orderByValue = orderBy === "Value";
const rowIndex = sortedColumns.findIndex(c => c.key === orderBy);
if (!orderByValue && rowIndex === -1) return rows;
return rows.sort((a,b) => {
const n1 = orderByValue ? Number(a.value) : a.metadata[rowIndex];
const n2 = orderByValue ? Number(b.value) : b.metadata[rowIndex];
const asc = orderDir === "asc" ? n1 < n2 : n1 > n2;
return asc ? -1 : 1;
});
}, [sortedColumns, data, orderBy, orderDir]);
const sortHandler = (key: string) => {
setOrderDir((prev) => prev === "asc" && orderBy === key ? "desc" : "asc");
setOrderBy(key);
};
return (
<>
@ -41,8 +60,25 @@ const TableView: FC<GraphViewProps> = ({data}) => {
<TableHead>
<TableRow>
{sortedColumns.map((col, index) => (
<TableCell style={{textTransform: "capitalize"}} key={index}>{col.key}</TableCell>))}
<TableCell align="right">Value</TableCell>
<TableCell key={index} style={{textTransform: "capitalize"}}>
<TableSortLabel
active={orderBy === col.key}
direction={orderDir}
onClick={() => sortHandler(col.key)}
>
{col.key}
</TableSortLabel>
</TableCell>
))}
<TableCell align="right">
<TableSortLabel
active={orderBy === "Value"}
direction={orderDir}
onClick={() => sortHandler("Value")}
>
Value
</TableSortLabel>
</TableCell>
</TableRow>
</TableHead>
<TableBody>

View file

@ -5,12 +5,16 @@ import {LegendItem} from "./types";
import {getColorLine, getDashLine} from "./helpers";
import {HideSeriesArgs} from "./types";
export const getSeriesItem = (d: MetricResult, hideSeries: string[]): Series => {
interface SeriesItem extends Series {
freeFormFields: {[key: string]: string};
}
export const getSeriesItem = (d: MetricResult, hideSeries: string[]): SeriesItem => {
const label = getNameForMetric(d);
return {
label,
dash: getDashLine(d.group),
class: JSON.stringify(d.metric),
freeFormFields: d.metric,
width: 1.4,
stroke: getColorLine(d.group, label),
show: !includesHideSeries(label, d.group, hideSeries),
@ -22,12 +26,12 @@ export const getSeriesItem = (d: MetricResult, hideSeries: string[]): Series =>
};
};
export const getLegendItem = (s: Series, group: number): LegendItem => ({
export const getLegendItem = (s: SeriesItem, group: number): LegendItem => ({
group,
label: s.label || "",
color: s.stroke as string,
checked: s.show || false,
freeFormFields: JSON.parse(s.class || "{}"),
freeFormFields: s.freeFormFields,
});
export const getHideSeries = ({hideSeries, legend, metaKey, series}: HideSeriesArgs): string[] => {

View file

@ -6,10 +6,22 @@ sort: 15
## tip
* FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): cover more cases with the [label filters' propagation optimization](https://utcc.utoronto.ca/~cks/space/blog/sysadmin/PrometheusLabelNonOptimization). This should improve the average performance for practical queries. The following cases are additionally covered:
* Multi-level [transform functions](https://docs.victoriametrics.com/MetricsQL.html#transform-functions). For example, `abs(round(foo{a="b"})) + bar{x="y"}` is now optimized to `abs(round(foo{a="b",x="y"})) + bar{a="b",x="y"}`
* Binary operations with `on()`, `without()`, `group_left()` and `group_right()` modifiers. For example, `foo{a="b"} on (a) + bar` is now optimized to `foo{a="b"} on (a) + bar{a="b"}`
* Multi-level binary operations. For example, `foo{a="b"} + bar{x="y"} + baz{z="q"}` is now optimized to `foo{a="b",x="y",z="q"} + bar{a="b",x="y",z="q"} + baz{a="b",x="y",z="q"}`
* Aggregate functions. For example, `sum(foo{a="b"}) by (c) + bar{c="d"}` is now optimized to `sum(foo{a="b",c="d"}) by (c) + bar{c="d"}`
* FEATURE [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): optimize joining with `*_info` labels. For example: `kube_pod_created{namespace="prod"} * on (uid) group_left(node) kube_pod_info` now automatically adds the needed filters on `uid` label to `kube_pod_info` before selecting series for the right side of `*` operation. This may save CPU, RAM and disk IO resources. See [this article](https://www.robustperception.io/exposing-the-software-version-to-prometheus) for details on `*_info` labels. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1827).
* FEATURE: all: expose `process_cpu_cores_available` metric, which shows the number of CPU cores available to the app. The number can be fractional if the corresponding cgroup limit is set to a fractional value. This metric is useful for alerting on CPU saturation. For example, the following query alerts when the app uses more than 90% of CPU during the last 5 minutes: `rate(process_cpu_seconds_total[5m]) / process_cpu_cores_available > 0.9` . See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2107).
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add ability to configure notifiers (e.g. alertmanager) via a file in the way similar to Prometheus. See [these docs](https://docs.victoriametrics.com/vmalert.html#notifier-configuration-file), [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2127).
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add support for Consul service discovery for notifiers. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1947).
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add support for specifying Basic Auth password for notifiers via a file. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1567).
* BUGFIX: return proper results from `highestMax()` function at [Graphite render API](https://docs.victoriametrics.com/#graphite-render-api-usage). Previously it was incorrectly returning timeseries with min peaks instead of max peaks.
* BUGFIX: properly limit indexdb cache sizes. Previously they could exceed values set via `-memory.allowedPercent` and/or `-memory.allowedBytes` when `indexdb` contained many data parts. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2007).
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix a bug, which could break time range picker when editing `From` or `To` input fields. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2080).
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix a bug, which could break switching between `graph`, `json` and `table` views. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2084).
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix possible UI freeze after querying `node_uname_info` time series. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2115).
* BUGFIX: show the original location of the warning or error message when logging throttled messages. Previously the location inside `lib/logger/throttler.go` was shown. This could increase the complexity of debugging.

View file

@ -356,3 +356,7 @@ There could be a slight difference in stored values for time series. Due to diff
The query engine may behave differently for some functions. Please see [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e).
## If downsampling and deduplication are enabled how will this work?
[Deduplication](https://docs.victoriametrics.com/#deduplication) is a special case of zero-offset [downsampling](https://docs.victoriametrics.com/#downsampling). So, if both downsampling and deduplication are enabled, then deduplication is replaced by zero-offset downsampling

View file

@ -8,10 +8,10 @@ sort: 17
0. Document all the changes for new release in [CHANGELOG.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/CHANGELOG.md).
1. Create the following release tags:
* `git tag v1.xx.y` in `master` branch
* `git tag v1.xx.y-cluster` in `cluster` branch
* `git tag v1.xx.y-enterprise` in `enterprise` branch
* `git tag v1.xx.y-enterprise-cluster` in `enterprise-cluster` branch
* `git tag -s v1.xx.y` in `master` branch
* `git tag -s v1.xx.y-cluster` in `cluster` branch
* `git tag -s v1.xx.y-enterprise` in `enterprise` branch
* `git tag -s v1.xx.y-enterprise-cluster` in `enterprise-cluster` branch
2. Run `TAG=v1.xx.y make publish-release`. It will create `*.tar.gz` release archives with the corresponding `_checksums.txt` files inside `bin` directory and publish Docker images for the given `TAG`, `TAG-cluster`, `TAG-enterprise` and `TAG-enterprise-cluster`.
5. Push release tag to https://github.com/VictoriaMetrics/VictoriaMetrics : `git push origin v1.xx.y`.
6. Go to https://github.com/VictoriaMetrics/VictoriaMetrics/releases , create new release from the pushed tag on step 5 and upload `*.tar.gz` archive with the corresponding `_checksums.txt` from step 2.

View file

@ -713,10 +713,10 @@ VmAgentStatus defines the observed state of VmAgent
| Field | Description | Scheme | Required |
| ----- | ----------- | ------ | -------- |
| replicas | ReplicaCount Total number of non-terminated pods targeted by this VMAlert cluster (their labels match the selector). | int32 | true |
| updatedReplicas | UpdatedReplicas Total number of non-terminated pods targeted by this VMAlert cluster that have the desired version spec. | int32 | true |
| availableReplicas | AvailableReplicas Total number of available pods (ready for at least minReadySeconds) targeted by this VMAlert cluster. | int32 | true |
| unavailableReplicas | UnavailableReplicas Total number of unavailable pods targeted by this VMAlert cluster. | int32 | true |
| replicas | ReplicaCount Total number of non-terminated pods targeted by this VMAgent cluster (their labels match the selector). | int32 | true |
| updatedReplicas | UpdatedReplicas Total number of non-terminated pods targeted by this VMAgent cluster that have the desired version spec. | int32 | true |
| availableReplicas | AvailableReplicas Total number of available pods (ready for at least minReadySeconds) targeted by this VMAgent cluster. | int32 | true |
| unavailableReplicas | UnavailableReplicas Total number of unavailable pods targeted by this VMAgent cluster. | int32 | true |
[Back to TOC](#table-of-contents)
@ -751,8 +751,8 @@ EmbeddedHPA embeds HorizontalPodAutoScaler spec v2. https://kubernetes.io/docs/r
| ----- | ----------- | ------ | -------- |
| minReplicas | | *int32 | false |
| maxReplicas | | int32 | false |
| metrics | | []v2beta2.MetricSpec | false |
| behaviour | | *v2beta2.HorizontalPodAutoscalerBehavior | false |
| metrics | | [][v2beta2.MetricSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#metricspec-v2beta2-autoscaling) | false |
| behaviour | | *[v2beta2.HorizontalPodAutoscalerBehavior](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#horizontalpodautoscalerbehavior-v2beta2-autoscaling) | false |
[Back to TOC](#table-of-contents)

View file

@ -118,7 +118,7 @@ The VictoriaMetrics Operator introduces additional resources in Kubernetes to de
## VMSingle
[VMSingle](https://github.com/VictoriaMetrics/VictoriaMetrics/) represents database for storing metrics, for all possible config options check api [doc](/Operator/api.MD#VMSingle):
[VMSingle](https://github.com/VictoriaMetrics/VictoriaMetrics/) represents database for storing metrics, for all possible config options check api [doc](https://docs.victoriametrics.com/operator/api.html#vmsingle):
```yaml
cat <<EOF | kubectl apply -f -
@ -273,7 +273,7 @@ Or you can use default rbac account, that will be created for `VMAgent` by opera
kubectl apply -f release/examples/vmagent_rbac.yaml
```
Modify `VMAgent` config parameters at `release/examples/vmagent.yaml` and apply it, config options [doc](/docs/api.MD#VMAgent)
Modify `VMAgent` config parameters at `release/examples/vmagent.yaml` and apply it, config options [doc](https://docs.victoriametrics.com/operator/api.html#vmagent)
Example:
@ -354,7 +354,7 @@ stringData:
EOF
```
Then add `Alertmanager` object, other config options at [doc](/docs/api.MD#Alertmanager)
Then add `Alertmanager` object, other config options at [doc](https://docs.victoriametrics.com/operator/api.html#alertmanager)
you have to set configSecret with name of secret, that we created before - `alertmanager-config`.
```yaml
cat << EOF | kubectl apply -f -
@ -452,7 +452,7 @@ EOF
## VMAlert
[VMAlert](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/app/vmalert) - executes a list of given [alerting](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) or [recording](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) rules against configured address. It
has few required config options - `datasource` and `notifier` are required, for other config parameters check [doc](/Operator/api.MD#VMAlert).
has few required config options - `datasource` and `notifier` are required, for other config parameters check [doc](https://docs.victoriametrics.com/operator/api.html#vmalert).
```yaml
cat << EOF | kubectl apply -f -
@ -478,7 +478,7 @@ EOF
by corresponding `Service` and it's `Endpoint`s.
It has various options for scraping configuration of target (with basic auth,tls access, by specific port name etc).
Let's make some demo, you have to deploy [VMAgent](#VMAgent) and [VMSingle](#VMSingle) from previous step with match any selectors:
Let's make some demo, you have to deploy [VMAgent](#vmagent) and [VMSingle](#vmsingle) from previous step with match any selectors:
```yaml
cat <<EOF | kubectl apply -f -
@ -714,7 +714,7 @@ kubectl logs vmagent-example-vmagent-5777fdf7bf-tctcv vmagent --tail 100
It generates `VMAlert` config with ruleset defined at `VMRule` spec.
Lets create `VMAlert` with selector for `VMRule` with label project=devops.
You also need datasource from previous step [VMSingle](#VMSingle) and [VMAgent](#VMAgent) connected to it.
You also need datasource from previous step [VMSingle](#vmsingle) and [VMAgent](#vmagent) connected to it.
```yaml
cat << EOF | kubectl apply -f -
@ -1100,7 +1100,7 @@ static_configs: added targets: 2, removed targets: 0; total targets: 2
[VMAuth](https://docs.victoriametrics.com/vmauth.html) allows protecting application with authentication and route traffic by rules.
api docs [link](/Operator/api.MD#VMAuthSpec)
api docs [link](https://docs.victoriametrics.com/operator/api.html#vmauthspec)
First create `VMAuth` configuration:
```yaml
@ -1141,7 +1141,7 @@ users:
## VMUser
`VMUser` configures `VMAuth`. api doc [link](/Operator/api.MD#VMUserSpec)
`VMUser` configures `VMAuth`. api doc [link](https://docs.victoriametrics.com/operator/api.html#vmuserspec)
There are two authentication mechanisms: `bearerToken` and `basicAuth` with `username` and `password`. Only one of them can be used with `VMUser` at one time.
If you need to provide access with different mechanisms for single endpoint, create multiple `VMUsers`.

View file

@ -47,7 +47,8 @@ To start using `vmalert` you will need the following things:
* list of rules - PromQL/MetricsQL expressions to execute;
* datasource address - reachable MetricsQL endpoint to run queries against;
* notifier address [optional] - reachable [Alert Manager](https://github.com/prometheus/alertmanager) instance for processing,
aggregating alerts, and sending notifications.
aggregating alerts, and sending notifications. Please note, notifier address also supports Consul Service Discovery via
[config file](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmalert/notifier/config.go).
* remote write address [optional] - [remote write](https://prometheus.io/docs/prometheus/latest/storage/#remote-storage-integrations)
compatible storage to persist rules and alerts state info;
* remote read address [optional] - MetricsQL compatible datasource to restore alerts state from.
@ -591,6 +592,9 @@ The shortlist of configuration flags is the following:
-notifier.basicAuth.password array
Optional basic auth password for -notifier.url
Supports an array of values separated by comma or specified via multiple flags.
-notifier.basicAuth.passwordFile array
Optional path to basic auth password file for -notifier.url
Supports an array of values separated by comma or specified via multiple flags.
-notifier.basicAuth.username array
Optional basic auth username for -notifier.url
Supports an array of values separated by comma or specified via multiple flags.
@ -693,8 +697,8 @@ The shortlist of configuration flags is the following:
absolute path to all .yaml files in root.
Rule files may contain %{ENV_VAR} placeholders, which are substituted by the corresponding env vars.
Supports an array of values separated by comma or specified via multiple flags.
-rule.configCheckInterval duration
Interval for checking for changes in '-rule' files. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes
-configCheckInterval duration
Interval for checking for changes in '-rule' or '-notifier.config' files. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes
-rule.maxResolveDuration duration
Limits the maximum duration for automatic alert expiration, which is by default equal to 3 evaluation intervals of the parent group.
-rule.validateExpressions
@ -707,6 +711,14 @@ The shortlist of configuration flags is the following:
Path to file with TLS certificate. Used only if -tls is set. Prefer ECDSA certs instead of RSA certs as RSA certs are slower
-tlsKeyFile string
Path to file with TLS key. Used only if -tls is set
-promscrape.consul.waitTime duration
Wait time used by Consul service discovery. Default value is used if not set
-promscrape.consulSDCheckInterval duration
Interval for checking for changes in Consul. This works only if consul_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config for details (default 30s)
-promscrape.discovery.concurrency int
The maximum number of concurrent requests to Prometheus autodiscovery API (Consul, Kubernetes, etc.) (default 100)
-promscrape.discovery.concurrentWaitTime duration
The maximum duration for waiting to perform API requests if more than -promscrape.discovery.concurrency requests are simultaneously performed (default 1m0s)
-version
Show VictoriaMetrics version
```
@ -715,7 +727,7 @@ The shortlist of configuration flags is the following:
`vmalert` supports "hot" config reload via the following methods:
* send SIGHUP signal to `vmalert` process;
* send GET request to `/-/reload` endpoint;
* configure `-rule.configCheckInterval` flag for periodic reload
* configure `-configCheckInterval` flag for periodic reload
on config change.
### URL params
@ -736,6 +748,88 @@ Please note, `params` are used only for executing rules expressions (requests to
If there would be a conflict between URL params set in `datasource.url` flag and params in group definition
the latter will have higher priority.
### Notifier configuration file
Notifier also supports configuration via file specified with flag `notifier.config`:
```
./bin/vmalert -rule=app/vmalert/config/testdata/rules.good.rules \
-datasource.url=http://localhost:8428 \
-notifier.config=app/vmalert/notifier/testdata/consul.good.yaml
```
The configuration file allows to configure static notifiers or discover notifiers via
[Consul](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config).
For example:
```
static_configs:
- targets:
- localhost:9093
- localhost:9095
consul_sd_configs:
- server: localhost:8500
services:
- alertmanager
```
The list of configured or discovered Notifiers can be explored via [UI](#Web).
The configuration file [specification](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmalert/notifier/config.go)
is the following:
```
# Per-target Notifier timeout when pushing alerts.
[ timeout: <duration> | default = 10s ]
# Prefix for the HTTP path alerts are pushed to.
[ path_prefix: <path> | default = / ]
# Configures the protocol scheme used for requests.
[ scheme: <scheme> | default = http ]
# Sets the `Authorization` header on every request with the
# configured username and password.
# password and password_file are mutually exclusive.
basic_auth:
[ username: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
# Optional `Authorization` header configuration.
authorization:
# Sets the authentication type.
[ type: <string> | default: Bearer ]
# Sets the credentials. It is mutually exclusive with
# `credentials_file`.
[ credentials: <secret> ]
# Sets the credentials to the credentials read from the configured file.
# It is mutually exclusive with `credentials`.
[ credentials_file: <filename> ]
# Configures the scrape request's TLS settings.
# see https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tls_config
tls_config:
[ <tls_config> ]
# List of labeled statically configured Notifiers.
static_configs:
targets:
[ - '<host>' ]
# List of Consul service discovery configurations.
# See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config
consul_sd_configs:
[ - <consul_sd_config> ... ]
# List of relabel configurations.
# Supports the same relabeling features as the rest of VictoriaMetrics components.
# See https://docs.victoriametrics.com/vmagent.html#relabeling
relabel_configs:
[ - <relabel_config> ... ]
```
The configuration file can be [hot-reloaded](#hot-config-reload).
## Contributing

View file

@ -128,6 +128,8 @@ This may be useful for passing secrets to the config.
## Security
It is expected that all the backend services protected by `vmauth` are located in an isolated private network, so they can be accessed by external users only via `vmauth`.
Do not transfer Basic Auth headers in plaintext over untrusted networks. Enable https. This can be done by passing the following `-tls*` command-line flags to `vmauth`:
```

20
go.mod
View file

@ -3,15 +3,15 @@ module github.com/VictoriaMetrics/VictoriaMetrics
go 1.17
require (
cloud.google.com/go/storage v1.18.2
cloud.google.com/go/storage v1.19.0
github.com/VictoriaMetrics/fastcache v1.8.0
// Do not use the original github.com/valyala/fasthttp because of issues
// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
github.com/VictoriaMetrics/fasthttp v1.1.0
github.com/VictoriaMetrics/metrics v1.18.1
github.com/VictoriaMetrics/metricsql v0.37.0
github.com/aws/aws-sdk-go v1.42.39
github.com/VictoriaMetrics/metricsql v0.40.0
github.com/aws/aws-sdk-go v1.42.44
github.com/cespare/xxhash/v2 v2.1.2
github.com/cheggaaa/pb/v3 v3.0.8
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
@ -19,7 +19,7 @@ require (
github.com/go-kit/kit v0.12.0
github.com/golang/snappy v0.0.4
github.com/influxdata/influxdb v1.9.5
github.com/klauspost/compress v1.14.1
github.com/klauspost/compress v1.14.2
github.com/mattn/go-colorable v0.1.12 // indirect
github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/oklog/ulid v1.3.1
@ -31,10 +31,10 @@ require (
github.com/valyala/fasttemplate v1.2.1
github.com/valyala/gozstd v1.15.1
github.com/valyala/quicktemplate v1.7.0
golang.org/x/net v0.0.0-20220121210141-e204ce36a2ba
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9
google.golang.org/api v0.65.0
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27
google.golang.org/api v0.66.0
gopkg.in/yaml.v2 v2.4.0
)
@ -54,7 +54,7 @@ require (
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.12.0 // indirect
github.com/prometheus/client_golang v1.12.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
@ -68,8 +68,8 @@ require (
golang.org/x/text v0.3.7 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220118154757-00ab72f36ad5 // indirect
google.golang.org/grpc v1.43.0 // indirect
google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350 // indirect
google.golang.org/grpc v1.44.0 // indirect
google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
)

41
go.sum
View file

@ -55,8 +55,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.18.2 h1:5NQw6tOn3eMm0oE8vTkfjau18kjL79FlMjy/CHTpmoY=
cloud.google.com/go/storage v1.18.2/go.mod h1:AiIj7BWXyhO5gGVmYJ+S8tbkCx3yb0IMjua8Aw4naVM=
cloud.google.com/go/storage v1.19.0 h1:XOQSnPJD8hRtZJ3VdCyK0mBZsGGImrzPAMbSWcHSe6Q=
cloud.google.com/go/storage v1.19.0/go.mod h1:6rgiTRjOqI/Zd9YKimub5TIB4d+p3LH33V3ZE1DMuUM=
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go v41.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
@ -115,8 +115,8 @@ github.com/VictoriaMetrics/fasthttp v1.1.0 h1:3crd4YWHsMwu60GUXRH6OstowiFvqrwS4a
github.com/VictoriaMetrics/fasthttp v1.1.0/go.mod h1:/7DMcogqd+aaD3G3Hg5kFgoFwlR2uydjiWvoLp5ZTqQ=
github.com/VictoriaMetrics/metrics v1.18.1 h1:OZ0+kTTto8oPfHnVAnTOoyl0XlRhRkoQrD2n2cOuRw0=
github.com/VictoriaMetrics/metrics v1.18.1/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA=
github.com/VictoriaMetrics/metricsql v0.37.0 h1:zFKC+XJpEhp0TtTa6pD0pnyg9sDLH4U5nCeDUT8eUAw=
github.com/VictoriaMetrics/metricsql v0.37.0/go.mod h1:6pP1ZeLVJHqJrHlF6Ij3gmpQIznSsgktEcZgsAWYel0=
github.com/VictoriaMetrics/metricsql v0.40.0 h1:QDzuhzsP2cZJyrijIptDJ6gnxd3qWGzQi4Fhj8mOLHo=
github.com/VictoriaMetrics/metricsql v0.40.0/go.mod h1:6pP1ZeLVJHqJrHlF6Ij3gmpQIznSsgktEcZgsAWYel0=
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
@ -161,8 +161,8 @@ github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZve
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
github.com/aws/aws-sdk-go v1.42.39 h1:6Lso73VoCI8Zmv3zAMv4BNg2gHAKNOlbLv1s/ew90SI=
github.com/aws/aws-sdk-go v1.42.39/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc=
github.com/aws/aws-sdk-go v1.42.44 h1:vPlF4cUsdN5ETfvb7ewZFbFZyB6Rsfndt3kS2XqLXKo=
github.com/aws/aws-sdk-go v1.42.44/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o=
@ -654,8 +654,8 @@ github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.14.1 h1:hLQYb23E8/fO+1u53d02A97a8UnsddcvYzq4ERRU4ds=
github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.14.2 h1:S0OHlFk/Gbon/yauFJ4FfJJF5V0fc5HbBTJazi28pRw=
github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
@ -842,8 +842,8 @@ github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83A
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.0 h1:C+UIj/QWtmqY13Arb8kwMt5j34/0Z2iKamrJ+ryC0Gg=
github.com/prometheus/client_golang v1.12.0/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -1168,8 +1168,8 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220121210141-e204ce36a2ba h1:6u6sik+bn/y7vILcYkK3iwTBWN7WtBvB0+SZswQnbf8=
golang.org/x/net v0.0.0-20220121210141-e204ce36a2ba/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1185,7 +1185,6 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg=
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -1297,8 +1296,9 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@ -1446,12 +1446,12 @@ google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6
google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E=
google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
google.golang.org/api v0.64.0/go.mod h1:931CdxA8Rm4t6zqTFGSsgwbAEZ2+GMYurbndwSimebM=
google.golang.org/api v0.65.0 h1:MTW9c+LIBAbwoS1Gb+YV7NjFBt2f7GtAS5hIzh2NjgQ=
google.golang.org/api v0.65.0/go.mod h1:ArYhxgGadlWmqO1IqVujw6Cs8IdD33bTmzKo2Sh+cbg=
google.golang.org/api v0.66.0 h1:CbGy4LEiXCVCiNEDFgGpWOVwsDT7E2Qej1ZvN1P7KPg=
google.golang.org/api v0.66.0/go.mod h1:I1dmXYpX7HGwz/ejRxwQp2qj5bFAz93HiCU1C1oYd9M=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -1523,7 +1523,6 @@ google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEc
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211016002631-37fc39342514/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
@ -1531,8 +1530,10 @@ google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ6
google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220118154757-00ab72f36ad5 h1:zzNejm+EgrbLfDZ6lu9Uud2IVvHySPl8vQzf04laR5Q=
google.golang.org/genproto v0.0.0-20220114231437-d2e6a121cae0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220118154757-00ab72f36ad5/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350 h1:YxHp5zqIcAShDEvRr5/0rVESVS+njYF68PSdazrNLJo=
google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
@ -1566,8 +1567,8 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.43.0 h1:Eeu7bZtDZ2DpRCsLhUlcrLnvYaMK1Gz86a+hMVvELmM=
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg=
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=

View file

@ -53,7 +53,7 @@ func (bb *ByteBuffer) MustReadAt(p []byte, offset int64) {
func (bb *ByteBuffer) ReadFrom(r io.Reader) (int64, error) {
b := bb.B
bLen := len(b)
b = ResizeWithCopy(b, 4*1024)
b = ResizeWithCopyMayOverallocate(b, 4*1024)
b = b[:cap(b)]
offset := bLen
for {

View file

@ -1,35 +1,66 @@
package bytesutil
import (
"math/bits"
"reflect"
"unsafe"
)
// ResizeWithCopy resizes b to n bytes and returns the resized buffer (which may be newly allocated).
// ResizeWithCopyMayOverallocate resizes b to minimum n bytes and returns the resized buffer (which may be newly allocated).
//
// If newly allocated buffer is returned then b contents is copied to it.
func ResizeWithCopy(b []byte, n int) []byte {
func ResizeWithCopyMayOverallocate(b []byte, n int) []byte {
if n <= cap(b) {
return b[:n]
}
nNew := roundToNearestPow2(n)
bNew := make([]byte, nNew)
copy(bNew, b)
return bNew[:n]
}
// ResizeWithCopyNoOverallocate resizes b to exactly n bytes and returns the resized buffer (which may be newly allocated).
//
// If newly allocated buffer is returned then b contents is copied to it.
func ResizeWithCopyNoOverallocate(b []byte, n int) []byte {
if n <= cap(b) {
return b[:n]
}
// Allocate the exact number of bytes instead of using `b = append(b[:cap(b)], make([]byte, nn)...)`,
// since `append()` may allocate more than the requested bytes for additional capacity.
// Using make() instead of append() should save RAM when the resized slice is cached somewhere.
bNew := make([]byte, n)
copy(bNew, b)
return bNew
}
// ResizeNoCopy resizes b to n bytes and returns the resized buffer (which may be newly allocated).
// ResizeNoCopyMayOverallocate resizes b to minimum n bytes and returns the resized buffer (which may be newly allocated).
//
// If newly allocated buffer is returned then b contents isn't copied to it.
func ResizeNoCopy(b []byte, n int) []byte {
func ResizeNoCopyMayOverallocate(b []byte, n int) []byte {
if n <= cap(b) {
return b[:n]
}
nNew := roundToNearestPow2(n)
bNew := make([]byte, nNew)
return bNew[:n]
}
// ResizeNoCopyNoOverallocate resizes b to exactly n bytes and returns the resized buffer (which may be newly allocated).
//
// If newly allocated buffer is returned then b contents isn't copied to it.
func ResizeNoCopyNoOverallocate(b []byte, n int) []byte {
if n <= cap(b) {
return b[:n]
}
return make([]byte, n)
}
// roundToNearestPow2 rounds n to the nearest power of 2
//
// It is expected that n > 0
func roundToNearestPow2(n int) int {
pow2 := uint8(bits.Len(uint(n - 1)))
return 1 << pow2
}
// ToUnsafeString converts b to string without memory allocations.
//
// The returned string is valid only until b is reachable and unmodified.

View file

@ -5,26 +5,63 @@ import (
"testing"
)
func TestResizeNoCopy(t *testing.T) {
func TestRoundToNearestPow2(t *testing.T) {
f := func(n, resultExpected int) {
t.Helper()
result := roundToNearestPow2(n)
if result != resultExpected {
t.Fatalf("unexpected roundtoNearestPow2(%d); got %d; want %d", n, result, resultExpected)
}
}
f(1, 1)
f(2, 2)
f(3, 4)
f(4, 4)
f(5, 8)
f(6, 8)
f(7, 8)
f(8, 8)
f(9, 16)
f(10, 16)
f(16, 16)
f(17, 32)
f(32, 32)
f(33, 64)
f(64, 64)
}
func TestResizeNoCopyNoOverallocate(t *testing.T) {
for i := 0; i < 1000; i++ {
b := ResizeNoCopy(nil, i)
b := ResizeNoCopyNoOverallocate(nil, i)
if len(b) != i {
t.Fatalf("invalid b size; got %d; expecting %d", len(b), i)
t.Fatalf("invalid b size; got %d; want %d", len(b), i)
}
b1 := ResizeNoCopy(b, i)
if cap(b) != i {
t.Fatalf("invalid cap(b); got %d; want %d", cap(b), i)
}
b1 := ResizeNoCopyNoOverallocate(b, i)
if len(b1) != len(b) || (len(b) > 0 && &b1[0] != &b[0]) {
t.Fatalf("invalid b1; got %x; expecting %x", &b1[0], &b[0])
t.Fatalf("invalid b1; got %x; want %x", &b1[0], &b[0])
}
b2 := ResizeNoCopy(b[:0], i)
if cap(b1) != i {
t.Fatalf("invalid cap(b1); got %d; want %d", cap(b1), i)
}
b2 := ResizeNoCopyNoOverallocate(b[:0], i)
if len(b2) != len(b) || (len(b) > 0 && &b2[0] != &b[0]) {
t.Fatalf("invalid b2; got %x; expecting %x", &b2[0], &b[0])
t.Fatalf("invalid b2; got %x; want %x", &b2[0], &b[0])
}
if cap(b2) != i {
t.Fatalf("invalid cap(b2); got %d; want %d", cap(b2), i)
}
if i > 0 {
b[0] = 123
b3 := ResizeNoCopy(b, i+1)
b3 := ResizeNoCopyNoOverallocate(b, i+1)
if len(b3) != i+1 {
t.Fatalf("invalid b3 len; got %d; want %d", len(b3), i+1)
}
if cap(b3) != i+1 {
t.Fatalf("invalid cap(b3); got %d; want %d", cap(b3), i+1)
}
if &b3[0] == &b[0] {
t.Fatalf("b3 must be newly allocated")
}
@ -35,26 +72,75 @@ func TestResizeNoCopy(t *testing.T) {
}
}
func TestResizeWithCopy(t *testing.T) {
func TestResizeNoCopyMayOverallocate(t *testing.T) {
for i := 0; i < 1000; i++ {
b := ResizeWithCopy(nil, i)
b := ResizeNoCopyMayOverallocate(nil, i)
if len(b) != i {
t.Fatalf("invalid b size; got %d; expecting %d", len(b), i)
t.Fatalf("invalid b size; got %d; want %d", len(b), i)
}
b1 := ResizeWithCopy(b, i)
capExpected := roundToNearestPow2(i)
if cap(b) != capExpected {
t.Fatalf("invalid cap(b); got %d; want %d", cap(b), capExpected)
}
b1 := ResizeNoCopyMayOverallocate(b, i)
if len(b1) != len(b) || (len(b) > 0 && &b1[0] != &b[0]) {
t.Fatalf("invalid b1; got %x; expecting %x", &b1[0], &b[0])
t.Fatalf("invalid b1; got %x; want %x", &b1[0], &b[0])
}
b2 := ResizeWithCopy(b[:0], i)
if cap(b1) != capExpected {
t.Fatalf("invalid cap(b1); got %d; want %d", cap(b1), capExpected)
}
b2 := ResizeNoCopyMayOverallocate(b[:0], i)
if len(b2) != len(b) || (len(b) > 0 && &b2[0] != &b[0]) {
t.Fatalf("invalid b2; got %x; expecting %x", &b2[0], &b[0])
t.Fatalf("invalid b2; got %x; want %x", &b2[0], &b[0])
}
if cap(b2) != capExpected {
t.Fatalf("invalid cap(b2); got %d; want %d", cap(b2), capExpected)
}
if i > 0 {
b[0] = 123
b3 := ResizeWithCopy(b, i+1)
b3 := ResizeNoCopyMayOverallocate(b, i+1)
if len(b3) != i+1 {
t.Fatalf("invalid b3 len; got %d; want %d", len(b3), i+1)
}
capExpected = roundToNearestPow2(i + 1)
if cap(b3) != capExpected {
t.Fatalf("invalid cap(b3); got %d; want %d", cap(b3), capExpected)
}
}
}
}
func TestResizeWithCopyNoOverallocate(t *testing.T) {
for i := 0; i < 1000; i++ {
b := ResizeWithCopyNoOverallocate(nil, i)
if len(b) != i {
t.Fatalf("invalid b size; got %d; want %d", len(b), i)
}
if cap(b) != i {
t.Fatalf("invalid cap(b); got %d; want %d", cap(b), i)
}
b1 := ResizeWithCopyNoOverallocate(b, i)
if len(b1) != len(b) || (len(b) > 0 && &b1[0] != &b[0]) {
t.Fatalf("invalid b1; got %x; want %x", &b1[0], &b[0])
}
if cap(b1) != i {
t.Fatalf("invalid cap(b1); got %d; want %d", cap(b1), i)
}
b2 := ResizeWithCopyNoOverallocate(b[:0], i)
if len(b2) != len(b) || (len(b) > 0 && &b2[0] != &b[0]) {
t.Fatalf("invalid b2; got %x; want %x", &b2[0], &b[0])
}
if cap(b2) != i {
t.Fatalf("invalid cap(b2); got %d; want %d", cap(b2), i)
}
if i > 0 {
b[0] = 123
b3 := ResizeWithCopyNoOverallocate(b, i+1)
if len(b3) != i+1 {
t.Fatalf("invalid b3 len; got %d; want %d", len(b3), i+1)
}
if cap(b3) != i+1 {
t.Fatalf("invalid cap(b3); got %d; want %d", cap(b3), i+1)
}
if &b3[0] == &b[0] {
t.Fatalf("b3 must be newly allocated for i=%d", i)
}
@ -65,6 +151,47 @@ func TestResizeWithCopy(t *testing.T) {
}
}
func TestResizeWithCopyMayOverallocate(t *testing.T) {
for i := 0; i < 1000; i++ {
b := ResizeWithCopyMayOverallocate(nil, i)
if len(b) != i {
t.Fatalf("invalid b size; got %d; want %d", len(b), i)
}
capExpected := roundToNearestPow2(i)
if cap(b) != capExpected {
t.Fatalf("invalid cap(b); got %d; want %d", cap(b), capExpected)
}
b1 := ResizeWithCopyMayOverallocate(b, i)
if len(b1) != len(b) || (len(b) > 0 && &b1[0] != &b[0]) {
t.Fatalf("invalid b1; got %x; want %x", &b1[0], &b[0])
}
if cap(b1) != capExpected {
t.Fatalf("invalid cap(b1); got %d; want %d", cap(b1), capExpected)
}
b2 := ResizeWithCopyMayOverallocate(b[:0], i)
if len(b2) != len(b) || (len(b) > 0 && &b2[0] != &b[0]) {
t.Fatalf("invalid b2; got %x; want %x", &b2[0], &b[0])
}
if cap(b2) != capExpected {
t.Fatalf("invalid cap(b2); got %d; want %d", cap(b2), capExpected)
}
if i > 0 {
b[0] = 123
b3 := ResizeWithCopyMayOverallocate(b, i+1)
if len(b3) != i+1 {
t.Fatalf("invalid b3 len; got %d; want %d", len(b3), i+1)
}
capExpected = roundToNearestPow2(i + 1)
if cap(b3) != capExpected {
t.Fatalf("invalid cap(b3); got %d; want %d", cap(b3), capExpected)
}
if b3[0] != b[0] || b3[0] != 123 {
t.Fatalf("b3[0] must equal b[0]; got %d; want %d", b3[0], b[0])
}
}
}
}
func TestToUnsafeString(t *testing.T) {
s := "str"
if !bytes.Equal([]byte("str"), ToUnsafeBytes(s)) {

View file

@ -7,33 +7,42 @@ import (
"runtime"
"strconv"
"strings"
"github.com/VictoriaMetrics/metrics"
)
// AvailableCPUs returns the number of available CPU cores for the app.
//
// The number is rounded to the next integer value if fractional number of CPU cores are available.
func AvailableCPUs() int {
return runtime.GOMAXPROCS(-1)
}
func init() {
updateGOMAXPROCSToCPUQuota()
cpuQuota := getCPUQuota()
if cpuQuota > 0 {
updateGOMAXPROCSToCPUQuota(cpuQuota)
}
cpuCoresAvailable := cpuQuota
if cpuCoresAvailable <= 0 {
cpuCoresAvailable = float64(runtime.NumCPU())
}
metrics.NewGauge(`process_cpu_cores_available`, func() float64 {
return cpuCoresAvailable
})
}
// updateGOMAXPROCSToCPUQuota updates GOMAXPROCS to cgroup CPU quota if GOMAXPROCS isn't set in environment var.
func updateGOMAXPROCSToCPUQuota() {
// updateGOMAXPROCSToCPUQuota updates GOMAXPROCS to cpuQuota if GOMAXPROCS isn't set in environment var.
func updateGOMAXPROCSToCPUQuota(cpuQuota float64) {
if v := os.Getenv("GOMAXPROCS"); v != "" {
// Do not override explicitly set GOMAXPROCS.
return
}
q := getCPUQuota()
if q <= 0 {
// Do not change GOMAXPROCS
return
}
gomaxprocs := int(q + 0.5)
gomaxprocs := int(cpuQuota + 0.5)
numCPU := runtime.NumCPU()
if gomaxprocs > numCPU {
// There is no sense in setting more GOMAXPROCS than the number of available CPU cores.
return
gomaxprocs = numCPU
}
if gomaxprocs <= 0 {
gomaxprocs = 1

View file

@ -93,7 +93,7 @@ func nearestDelta(next, prev int64, precisionBits, prevTrailingZeros uint8) (int
// There is no need in handling special case origin = -1<<63.
}
originBits := 64 - uint8(bits.LeadingZeros64(uint64(origin)))
originBits := uint8(bits.Len64(uint64(origin)))
if originBits <= precisionBits {
// Cannot zero trailing bits for the given precisionBits.
return d, decIfNonZero(prevTrailingZeros)
@ -136,7 +136,7 @@ func getTrailingZeros(v int64, precisionBits uint8) uint8 {
v = -v
// There is no need in special case handling for v = -1<<63
}
vBits := 64 - uint8(bits.LeadingZeros64(uint64(v)))
vBits := uint8(bits.Len64(uint64(v)))
if vBits <= precisionBits {
return 0
}

View file

@ -135,7 +135,7 @@ func (s *Server) serveUDP(insertHandler func(r io.Reader) error) {
go func() {
defer wg.Done()
var bb bytesutil.ByteBuffer
bb.B = bytesutil.ResizeNoCopy(bb.B, 64*1024)
bb.B = bytesutil.ResizeNoCopyNoOverallocate(bb.B, 64*1024)
for {
bb.Reset()
bb.B = bb.B[:cap(bb.B)]

View file

@ -135,7 +135,7 @@ func (s *Server) serveUDP(insertHandler func(r io.Reader) error) {
go func() {
defer wg.Done()
var bb bytesutil.ByteBuffer
bb.B = bytesutil.ResizeNoCopy(bb.B, 64*1024)
bb.B = bytesutil.ResizeNoCopyNoOverallocate(bb.B, 64*1024)
for {
bb.Reset()
bb.B = bb.B[:cap(bb.B)]

View file

@ -153,7 +153,7 @@ func (s *Server) serveUDP(insertHandler func(r io.Reader) error) {
go func() {
defer wg.Done()
var bb bytesutil.ByteBuffer
bb.B = bytesutil.ResizeNoCopy(bb.B, 64*1024)
bb.B = bytesutil.ResizeNoCopyNoOverallocate(bb.B, 64*1024)
for {
bb.Reset()
bb.B = bb.B[:cap(bb.B)]

View file

@ -211,13 +211,13 @@ func (bsr *blockStreamReader) Next() bool {
bsr.bh = &bsr.bhs[bsr.bhIdx]
bsr.bhIdx++
bsr.sb.itemsData = bytesutil.ResizeNoCopy(bsr.sb.itemsData, int(bsr.bh.itemsBlockSize))
bsr.sb.itemsData = bytesutil.ResizeNoCopyMayOverallocate(bsr.sb.itemsData, int(bsr.bh.itemsBlockSize))
if err := fs.ReadFullData(bsr.itemsReader, bsr.sb.itemsData); err != nil {
bsr.err = fmt.Errorf("cannot read compressed items block with size %d: %w", bsr.bh.itemsBlockSize, err)
return false
}
bsr.sb.lensData = bytesutil.ResizeNoCopy(bsr.sb.lensData, int(bsr.bh.lensBlockSize))
bsr.sb.lensData = bytesutil.ResizeNoCopyMayOverallocate(bsr.sb.lensData, int(bsr.bh.lensBlockSize))
if err := fs.ReadFullData(bsr.lensReader, bsr.sb.lensData); err != nil {
bsr.err = fmt.Errorf("cannot read compressed lens block with size %d: %w", bsr.bh.lensBlockSize, err)
return false
@ -260,7 +260,7 @@ func (bsr *blockStreamReader) readNextBHS() error {
bsr.mrIdx++
// Read compressed index block.
bsr.packedBuf = bytesutil.ResizeNoCopy(bsr.packedBuf, int(mr.indexBlockSize))
bsr.packedBuf = bytesutil.ResizeNoCopyMayOverallocate(bsr.packedBuf, int(mr.indexBlockSize))
if err := fs.ReadFullData(bsr.indexReader, bsr.packedBuf); err != nil {
return fmt.Errorf("cannot read compressed index block with size %d: %w", mr.indexBlockSize, err)
}

View file

@ -117,9 +117,10 @@ func (ib *inmemoryBlock) Add(x []byte) bool {
if len(x)+len(data) > maxInmemoryBlockSize {
return false
}
if cap(data) < maxInmemoryBlockSize {
dataLen := len(data)
data = bytesutil.ResizeWithCopy(data, maxInmemoryBlockSize)[:dataLen]
if cap(data) == 0 {
// Pre-allocate data and items in order to reduce memory allocations
data = make([]byte, 0, maxInmemoryBlockSize)
ib.items = make([]Item, 0, 512)
}
dataLen := len(data)
data = append(data, x...)
@ -141,7 +142,7 @@ func (ib *inmemoryBlock) sort() {
data := ib.data
items := ib.items
bb := bbPool.Get()
b := bytesutil.ResizeNoCopy(bb.B, len(data))
b := bytesutil.ResizeNoCopyMayOverallocate(bb.B, len(data))
b = b[:0]
for i, it := range items {
bLen := len(b)
@ -394,7 +395,7 @@ func (ib *inmemoryBlock) UnmarshalData(sb *storageBlock, firstItem, commonPrefix
// Resize ib.data to dataLen instead of maxInmemoryBlockSize,
// since the data isn't going to be resized after unmarshaling.
// This may save memory for caching the unmarshaled block.
data := bytesutil.ResizeNoCopy(ib.data, dataLen)
data := bytesutil.ResizeNoCopyNoOverallocate(ib.data, dataLen)
if n := int(itemsCount) - cap(ib.items); n > 0 {
ib.items = append(ib.items[:cap(ib.items)], make([]Item, n)...)
}
@ -492,7 +493,8 @@ func (ib *inmemoryBlock) unmarshalDataPlain(sb *storageBlock, firstItem []byte,
// Unmarshal items data.
data := ib.data
items := ib.items
data = bytesutil.ResizeNoCopy(data, len(firstItem)+len(sb.itemsData)+len(commonPrefix)*int(itemsCount))
dataLen := len(firstItem) + len(sb.itemsData) + len(commonPrefix)*(int(itemsCount)-1)
data = bytesutil.ResizeNoCopyNoOverallocate(data, dataLen)
data = append(data[:0], firstItem...)
items = append(items[:0], Item{
Start: 0,
@ -504,20 +506,23 @@ func (ib *inmemoryBlock) unmarshalDataPlain(sb *storageBlock, firstItem []byte,
if uint64(len(b)) < itemLen {
return fmt.Errorf("not enough data for decoding item from itemsData; want %d bytes; remained %d bytes", itemLen, len(b))
}
dataLen := len(data)
dataStart := len(data)
data = append(data, commonPrefix...)
data = append(data, b[:itemLen]...)
items = append(items, Item{
Start: uint32(dataLen),
Start: uint32(dataStart),
End: uint32(len(data)),
})
b = b[itemLen:]
}
ib.data = data
ib.items = items
if len(b) > 0 {
return fmt.Errorf("unexpected tail left after itemsData with len %d: %q", len(b), b)
}
if len(data) != dataLen {
return fmt.Errorf("unexpected data len; got %d; want %d", len(data), dataLen)
}
ib.data = data
ib.items = items
return nil
}

View file

@ -274,7 +274,7 @@ func (ps *partSearch) nextBHS() error {
}
func (ps *partSearch) readIndexBlock(mr *metaindexRow) (*indexBlock, error) {
ps.compressedIndexBuf = bytesutil.ResizeNoCopy(ps.compressedIndexBuf, int(mr.indexBlockSize))
ps.compressedIndexBuf = bytesutil.ResizeNoCopyMayOverallocate(ps.compressedIndexBuf, int(mr.indexBlockSize))
ps.p.indexFile.MustReadAt(ps.compressedIndexBuf, int64(mr.indexBlockOffset))
var err error
@ -311,10 +311,10 @@ func (ps *partSearch) getInmemoryBlock(bh *blockHeader) (*inmemoryBlock, error)
func (ps *partSearch) readInmemoryBlock(bh *blockHeader) (*inmemoryBlock, error) {
ps.sb.Reset()
ps.sb.itemsData = bytesutil.ResizeNoCopy(ps.sb.itemsData, int(bh.itemsBlockSize))
ps.sb.itemsData = bytesutil.ResizeNoCopyMayOverallocate(ps.sb.itemsData, int(bh.itemsBlockSize))
ps.p.itemsFile.MustReadAt(ps.sb.itemsData, int64(bh.itemsBlockOffset))
ps.sb.lensData = bytesutil.ResizeNoCopy(ps.sb.lensData, int(bh.lensBlockSize))
ps.sb.lensData = bytesutil.ResizeNoCopyMayOverallocate(ps.sb.lensData, int(bh.lensBlockSize))
ps.p.lensFile.MustReadAt(ps.sb.lensData, int64(bh.lensBlockOffset))
ib := getInmemoryBlock()

View file

@ -89,24 +89,20 @@ func (ts *TableSearch) Seek(k []byte) {
ts.err = nil
// Initialize the psHeap.
var errors []error
ts.psHeap = ts.psHeap[:0]
for i := range ts.psPool {
ps := &ts.psPool[i]
ps.Seek(k)
if !ps.NextItem() {
if err := ps.Error(); err != nil {
errors = append(errors, err)
// Return only the first error, since it has no sense in returning all errors.
ts.err = fmt.Errorf("cannot seek %q: %w", k, err)
return
}
continue
}
ts.psHeap = append(ts.psHeap, ps)
}
if len(errors) > 0 {
// Return only the first error, since it has no sense in returning all errors.
ts.err = fmt.Errorf("cannot seek %q: %w", k, errors[0])
return
}
if len(ts.psHeap) == 0 {
ts.err = io.EOF
return

View file

@ -499,7 +499,7 @@ func (q *queue) readBlock(dst []byte) ([]byte, error) {
again:
// Read block len.
header := headerBufPool.Get()
header.B = bytesutil.ResizeNoCopy(header.B, 8)
header.B = bytesutil.ResizeNoCopyMayOverallocate(header.B, 8)
err := q.readFull(header.B)
blockLen := encoding.UnmarshalUint64(header.B)
headerBufPool.Put(header)
@ -520,7 +520,7 @@ again:
// Read block contents.
dstLen := len(dst)
dst = bytesutil.ResizeWithCopy(dst, dstLen+int(blockLen))
dst = bytesutil.ResizeWithCopyMayOverallocate(dst, dstLen+int(blockLen))
if err := q.readFull(dst[dstLen:]); err != nil {
logger.Errorf("skipping corrupted %q, since contents with size %d bytes cannot be read from it: %s", q.readerPath, blockLen, err)
if err := q.skipBrokenChunkFile(); err != nil {

View file

@ -40,7 +40,7 @@ func ReadLinesBlock(r io.Reader, dstBuf, tailBuf []byte) ([]byte, []byte, error)
func ReadLinesBlockExt(r io.Reader, dstBuf, tailBuf []byte, maxLineLen int) ([]byte, []byte, error) {
startTime := time.Now()
if cap(dstBuf) < defaultBlockSize {
dstBuf = bytesutil.ResizeNoCopy(dstBuf, defaultBlockSize)
dstBuf = bytesutil.ResizeNoCopyNoOverallocate(dstBuf, defaultBlockSize)
}
dstBuf = append(dstBuf[:0], tailBuf...)
tailBuf = tailBuf[:0]
@ -79,7 +79,7 @@ again:
if cap(dstBuf) < 2*len(dstBuf) {
// Increase dsbBuf capacity, so more data could be read into it.
dstBufLen := len(dstBuf)
dstBuf = bytesutil.ResizeWithCopy(dstBuf, 2*cap(dstBuf))
dstBuf = bytesutil.ResizeWithCopyNoOverallocate(dstBuf, 2*cap(dstBuf))
dstBuf = dstBuf[:dstBufLen]
}
goto again

View file

@ -84,7 +84,7 @@ func ParseStream(req *http.Request, callback func(block *Block) error) error {
wg.Wait()
return fmt.Errorf("too big metricName size; got %d; shouldn't exceed %d", bufSize, 1024*1024)
}
uw.metricNameBuf = bytesutil.ResizeNoCopy(uw.metricNameBuf, int(bufSize))
uw.metricNameBuf = bytesutil.ResizeNoCopyMayOverallocate(uw.metricNameBuf, int(bufSize))
if _, err := io.ReadFull(br, uw.metricNameBuf); err != nil {
readErrors.Inc()
wg.Wait()
@ -105,7 +105,7 @@ func ParseStream(req *http.Request, callback func(block *Block) error) error {
wg.Wait()
return fmt.Errorf("too big native block size; got %d; shouldn't exceed %d", bufSize, 1024*1024)
}
uw.blockBuf = bytesutil.ResizeNoCopy(uw.blockBuf, int(bufSize))
uw.blockBuf = bytesutil.ResizeNoCopyMayOverallocate(uw.blockBuf, int(bufSize))
if _, err := io.ReadFull(br, uw.blockBuf); err != nil {
readErrors.Inc()
wg.Wait()

View file

@ -18,6 +18,11 @@ type blockStreamReader struct {
// Currently active block.
Block Block
// Contains TSID for the previous block.
// This field is needed for checking that TSIDs
// increase over time when reading blocks.
tsidPrev TSID
// Filesystem path to the stream reader.
//
// Is empty for inmemory stream readers.
@ -213,12 +218,12 @@ func (bsr *blockStreamReader) NextBlock() bool {
if bsr.err != nil {
return false
}
tsidPrev := bsr.Block.bh.TSID
bsr.tsidPrev = bsr.Block.bh.TSID
bsr.Block.Reset()
err := bsr.readBlock()
if err == nil {
if bsr.Block.bh.TSID.Less(&tsidPrev) {
bsr.err = fmt.Errorf("possible data corruption: the next TSID=%v is smaller than the previous TSID=%v", &bsr.Block.bh.TSID, &tsidPrev)
if bsr.Block.bh.TSID.Less(&bsr.tsidPrev) {
bsr.err = fmt.Errorf("possible data corruption: the next TSID=%v is smaller than the previous TSID=%v", &bsr.Block.bh.TSID, &bsr.tsidPrev)
return false
}
if bsr.Block.bh.RowsCount == 0 {
@ -303,7 +308,7 @@ func (bsr *blockStreamReader) readBlock() error {
if usePrevTimestamps {
bsr.Block.timestampsData = append(bsr.Block.timestampsData[:0], bsr.prevTimestampsData...)
} else {
bsr.Block.timestampsData = bytesutil.ResizeNoCopy(bsr.Block.timestampsData, int(bsr.Block.bh.TimestampsBlockSize))
bsr.Block.timestampsData = bytesutil.ResizeNoCopyMayOverallocate(bsr.Block.timestampsData, int(bsr.Block.bh.TimestampsBlockSize))
if err := fs.ReadFullData(bsr.timestampsReader, bsr.Block.timestampsData); err != nil {
return fmt.Errorf("cannot read timestamps block at offset %d: %w", bsr.timestampsBlockOffset, err)
}
@ -312,7 +317,7 @@ func (bsr *blockStreamReader) readBlock() error {
}
// Read values data.
bsr.Block.valuesData = bytesutil.ResizeNoCopy(bsr.Block.valuesData, int(bsr.Block.bh.ValuesBlockSize))
bsr.Block.valuesData = bytesutil.ResizeNoCopyMayOverallocate(bsr.Block.valuesData, int(bsr.Block.bh.ValuesBlockSize))
if err := fs.ReadFullData(bsr.valuesReader, bsr.Block.valuesData); err != nil {
return fmt.Errorf("cannot read values block at offset %d: %w", bsr.valuesBlockOffset, err)
}
@ -347,7 +352,7 @@ func (bsr *blockStreamReader) readIndexBlock() error {
}
// Read index block.
bsr.compressedIndexData = bytesutil.ResizeNoCopy(bsr.compressedIndexData, int(bsr.mr.IndexBlockSize))
bsr.compressedIndexData = bytesutil.ResizeNoCopyMayOverallocate(bsr.compressedIndexData, int(bsr.mr.IndexBlockSize))
if err := fs.ReadFullData(bsr.indexReader, bsr.compressedIndexData); err != nil {
return fmt.Errorf("cannot read index block from index data at offset %d: %w", bsr.indexBlockOffset, err)
}

View file

@ -376,7 +376,7 @@ func (mn *MetricName) Marshal(dst []byte) []byte {
tag := &mn.Tags[i]
requiredSize += len(tag.Key) + len(tag.Value) + 2
}
dst = bytesutil.ResizeWithCopy(dst, requiredSize)[:dstLen]
dst = bytesutil.ResizeWithCopyMayOverallocate(dst, requiredSize)[:dstLen]
// Marshal MetricGroup
dst = marshalTagValue(dst, mn.MetricGroup)
@ -483,7 +483,7 @@ func MarshalMetricNameRaw(dst []byte, labels []prompb.Label) []byte {
dstSize += len(label.Value)
dstSize += 4
}
dst = bytesutil.ResizeWithCopy(dst, dstSize)[:dstLen]
dst = bytesutil.ResizeWithCopyMayOverallocate(dst, dstSize)[:dstLen]
// Marshal labels to dst.
for i := range labels {

View file

@ -211,7 +211,7 @@ func skipSmallMetaindexRows(metaindex []metaindexRow, tsid *TSID) []metaindexRow
}
func (ps *partSearch) readIndexBlock(mr *metaindexRow) (*indexBlock, error) {
ps.compressedIndexBuf = bytesutil.ResizeNoCopy(ps.compressedIndexBuf, int(mr.IndexBlockSize))
ps.compressedIndexBuf = bytesutil.ResizeNoCopyMayOverallocate(ps.compressedIndexBuf, int(mr.IndexBlockSize))
ps.p.indexFile.MustReadAt(ps.compressedIndexBuf, int64(mr.IndexBlockOffset))
var err error

View file

@ -92,23 +92,19 @@ func (pts *partitionSearch) Init(pt *partition, tsids []TSID, tr TimeRange) {
}
// Initialize the psHeap.
var errors []error
pts.psHeap = pts.psHeap[:0]
for i := range pts.psPool {
ps := &pts.psPool[i]
if !ps.NextBlock() {
if err := ps.Error(); err != nil {
errors = append(errors, err)
// Return only the first error, since it has no sense in returning all errors.
pts.err = fmt.Errorf("cannot initialize partition search: %w", err)
return
}
continue
}
pts.psHeap = append(pts.psHeap, ps)
}
if len(errors) > 0 {
// Return only the first error, since it has no sense in returning all errors.
pts.err = fmt.Errorf("cannot initialize partition search: %w", errors[0])
return
}
if len(pts.psHeap) == 0 {
pts.err = io.EOF
return

View file

@ -75,10 +75,10 @@ func (br *BlockRef) MustReadBlock(dst *Block, fetchData bool) {
return
}
dst.timestampsData = bytesutil.ResizeNoCopy(dst.timestampsData, int(br.bh.TimestampsBlockSize))
dst.timestampsData = bytesutil.ResizeNoCopyMayOverallocate(dst.timestampsData, int(br.bh.TimestampsBlockSize))
br.p.timestampsFile.MustReadAt(dst.timestampsData, int64(br.bh.TimestampsBlockOffset))
dst.valuesData = bytesutil.ResizeNoCopy(dst.valuesData, int(br.bh.ValuesBlockSize))
dst.valuesData = bytesutil.ResizeNoCopyMayOverallocate(dst.valuesData, int(br.bh.ValuesBlockSize))
br.p.valuesFile.MustReadAt(dst.valuesData, int64(br.bh.ValuesBlockOffset))
}

View file

@ -338,7 +338,6 @@ func (tb *table) AddRows(rows []rawRow) error {
// Do this under tb.ptwsLock.
minTimestamp, maxTimestamp := tb.getMinMaxTimestamps()
tb.ptwsLock.Lock()
var errors []error
for i := range missingRows {
r := &missingRows[i]
@ -362,18 +361,15 @@ func (tb *table) AddRows(rows []rawRow) error {
pt, err := createPartition(r.Timestamp, tb.smallPartitionsPath, tb.bigPartitionsPath, tb.getDeletedMetricIDs, tb.retentionMsecs)
if err != nil {
errors = append(errors, err)
continue
// Return only the first error, since it has no sense in returning all errors.
tb.ptwsLock.Unlock()
return fmt.Errorf("errors while adding rows to table %q: %w", tb.path, err)
}
pt.AddRows(missingRows[i : i+1])
tb.addPartitionNolock(pt)
}
tb.ptwsLock.Unlock()
if len(errors) > 0 {
// Return only the first error, since it has no sense in returning all errors.
return fmt.Errorf("errors while adding rows to table %q: %w", tb.path, errors[0])
}
return nil
}

View file

@ -93,23 +93,19 @@ func (ts *tableSearch) Init(tb *table, tsids []TSID, tr TimeRange) {
}
// Initialize the ptsHeap.
var errors []error
ts.ptsHeap = ts.ptsHeap[:0]
for i := range ts.ptsPool {
pts := &ts.ptsPool[i]
if !pts.NextBlock() {
if err := pts.Error(); err != nil {
errors = append(errors, err)
// Return only the first error, since it has no sense in returning all errors.
ts.err = fmt.Errorf("cannot initialize table search: %w", err)
return
}
continue
}
ts.ptsHeap = append(ts.ptsHeap, pts)
}
if len(errors) > 0 {
// Return only the first error, since it has no sense in returning all errors.
ts.err = fmt.Errorf("cannot initialize table search: %w", errors[0])
return
}
if len(ts.ptsHeap) == 0 {
ts.err = io.EOF
return

View file

@ -1,5 +1,37 @@
# Changes
## [1.19.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.2...storage/v1.19.0) (2022-01-25)
### Features
* **storage:** add fully configurable and idempotency-aware retry strategy ([#5384](https://www.github.com/googleapis/google-cloud-go/issues/5384), [#5185](https://www.github.com/googleapis/google-cloud-go/issues/5185), [#5170](https://www.github.com/googleapis/google-cloud-go/issues/5170), [#5223](https://www.github.com/googleapis/google-cloud-go/issues/5223), [#5221](https://www.github.com/googleapis/google-cloud-go/issues/5221), [#5193](https://www.github.com/googleapis/google-cloud-go/issues/5193), [#5159](https://www.github.com/googleapis/google-cloud-go/issues/5159), [#5165](https://www.github.com/googleapis/google-cloud-go/issues/5165), [#5166](https://www.github.com/googleapis/google-cloud-go/issues/5166), [#5210](https://www.github.com/googleapis/google-cloud-go/issues/5210), [#5172](https://www.github.com/googleapis/google-cloud-go/issues/5172), [#5314](https://www.github.com/googleapis/google-cloud-go/issues/5314))
* This release contains changes to fully align this library's retry strategy
with best practices as described in the
Cloud Storage [docs](https://cloud.google.com/storage/docs/retry-strategy).
* The library will now retry only idempotent operations by default. This means
that for certain operations, including object upload, compose, rewrite,
update, and delete, requests will not be retried by default unless
[idempotency conditions](https://cloud.google.com/storage/docs/retry-strategy#idempotency)
for the request have been met.
* The library now has methods to configure aspects of retry policy for
API calls, including which errors are retried, the timing of the
exponential backoff, and how idempotency is taken into account.
* If you wish to re-enable retries for a non-idempotent request, use the
[RetryAlways](https://pkg.go.dev/cloud.google.com/go/storage@main#RetryAlways)
policy.
* For full details on how to configure retries, see the
[package docs](https://pkg.go.dev/cloud.google.com/go/storage@main#hdr-Retrying_failed_requests)
and the
[Cloud Storage docs](https://cloud.google.com/storage/docs/retry-strategy)
* **storage:** GenerateSignedPostPolicyV4 can use existing creds to authenticate ([#5105](https://www.github.com/googleapis/google-cloud-go/issues/5105)) ([46489f4](https://www.github.com/googleapis/google-cloud-go/commit/46489f4c8a634068a3e7cf2fd5e5ca11b555c0a8))
* **storage:** post policy can be signed with a fn that takes raw bytes ([#5079](https://www.github.com/googleapis/google-cloud-go/issues/5079)) ([25d1278](https://www.github.com/googleapis/google-cloud-go/commit/25d1278cab539fbfdd8563ed6b297e30d3fe555c))
* **storage:** add rpo (turbo replication) support ([#5003](https://www.github.com/googleapis/google-cloud-go/issues/5003)) ([3bd5995](https://www.github.com/googleapis/google-cloud-go/commit/3bd59958e0c06d2655b67fcb5410668db3c52af0))
### Bug Fixes
* **storage:** fix nil check in gRPC Reader ([#5376](https://www.github.com/googleapis/google-cloud-go/issues/5376)) ([5e7d722](https://www.github.com/googleapis/google-cloud-go/commit/5e7d722d18a62b28ba98169b3bdbb49401377264))
### [1.18.2](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.1...storage/v1.18.2) (2021-10-18)

View file

@ -3,7 +3,7 @@
- [About Cloud Storage](https://cloud.google.com/storage/)
- [API documentation](https://cloud.google.com/storage/docs)
- [Go client documentation](https://pkg.go.dev/cloud.google.com/go/storage)
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/storage)
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/main/storage)
### Example Usage

View file

@ -73,6 +73,7 @@ type ACLHandle struct {
object string
isDefault bool
userProject string // for requester-pays buckets
retry *retryConfig
}
// Delete permanently deletes the ACL entry for the given entity.
@ -120,12 +121,12 @@ func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) {
func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
var acls *raw.ObjectAccessControls
var err error
err = runWithRetry(ctx, func() error {
err = run(ctx, func() error {
req := a.c.raw.DefaultObjectAccessControls.List(a.bucket)
a.configureCall(ctx, req)
acls, err = req.Do()
return err
})
}, a.retry, true)
if err != nil {
return nil, err
}
@ -135,18 +136,21 @@ func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity))
a.configureCall(ctx, req)
return run(ctx, func() error {
return req.Do()
}, a.retry, false)
}
func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
var acls *raw.BucketAccessControls
var err error
err = runWithRetry(ctx, func() error {
err = run(ctx, func() error {
req := a.c.raw.BucketAccessControls.List(a.bucket)
a.configureCall(ctx, req)
acls, err = req.Do()
return err
})
}, a.retry, true)
if err != nil {
return nil, err
}
@ -161,25 +165,29 @@ func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRol
}
req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl)
a.configureCall(ctx, req)
return run(ctx, func() error {
_, err := req.Do()
return err
}, a.retry, false)
}
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity))
a.configureCall(ctx, req)
return run(ctx, func() error {
return req.Do()
}, a.retry, false)
}
func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
var acls *raw.ObjectAccessControls
var err error
err = runWithRetry(ctx, func() error {
err = run(ctx, func() error {
req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object)
a.configureCall(ctx, req)
acls, err = req.Do()
return err
})
}, a.retry, true)
if err != nil {
return nil, err
}
@ -204,14 +212,18 @@ func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRol
req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl)
}
a.configureCall(ctx, req)
return run(ctx, func() error {
_, err := req.Do()
return err
}, a.retry, false)
}
func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity))
a.configureCall(ctx, req)
return run(ctx, func() error {
return req.Do()
}, a.retry, false)
}
func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) {

View file

@ -44,6 +44,7 @@ type BucketHandle struct {
defaultObjectACL ACLHandle
conds *BucketConditions
userProject string // project for Requester Pays buckets
retry *retryConfig
}
// Bucket returns a BucketHandle, which provides operations on the named bucket.
@ -54,18 +55,22 @@ type BucketHandle struct {
// found at:
// https://cloud.google.com/storage/docs/bucket-naming
func (c *Client) Bucket(name string) *BucketHandle {
retry := c.retry.clone()
return &BucketHandle{
c: c,
name: name,
acl: ACLHandle{
c: c,
bucket: name,
retry: retry,
},
defaultObjectACL: ACLHandle{
c: c,
bucket: name,
isDefault: true,
retry: retry,
},
retry: retry,
}
}
@ -95,7 +100,7 @@ func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *Buck
if attrs != nil && attrs.PredefinedDefaultObjectACL != "" {
req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL)
}
return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err })
return run(ctx, func() error { _, err := req.Context(ctx).Do(); return err }, b.retry, true)
}
// Delete deletes the Bucket.
@ -107,7 +112,8 @@ func (b *BucketHandle) Delete(ctx context.Context) (err error) {
if err != nil {
return err
}
return runWithRetry(ctx, func() error { return req.Context(ctx).Do() })
return run(ctx, func() error { return req.Context(ctx).Do() }, b.retry, true)
}
func (b *BucketHandle) newDeleteCall() (*raw.BucketsDeleteCall, error) {
@ -144,6 +150,7 @@ func (b *BucketHandle) DefaultObjectACL() *ACLHandle {
// for valid object names can be found at:
// https://cloud.google.com/storage/docs/naming-objects
func (b *BucketHandle) Object(name string) *ObjectHandle {
retry := b.retry.clone()
return &ObjectHandle{
c: b.c,
bucket: b.name,
@ -153,9 +160,11 @@ func (b *BucketHandle) Object(name string) *ObjectHandle {
bucket: b.name,
object: name,
userProject: b.userProject,
retry: retry,
},
gen: -1,
userProject: b.userProject,
retry: retry,
}
}
@ -169,10 +178,10 @@ func (b *BucketHandle) Attrs(ctx context.Context) (attrs *BucketAttrs, err error
return nil, err
}
var resp *raw.Bucket
err = runWithRetry(ctx, func() error {
err = run(ctx, func() error {
resp, err = req.Context(ctx).Do()
return err
})
}, b.retry, true)
var e *googleapi.Error
if ok := xerrors.As(err, &e); ok && e.Code == http.StatusNotFound {
return nil, ErrBucketNotExist
@ -210,12 +219,20 @@ func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (
if uattrs.PredefinedDefaultObjectACL != "" {
req.PredefinedDefaultObjectAcl(uattrs.PredefinedDefaultObjectACL)
}
// TODO(jba): retry iff metagen is set?
isIdempotent := b.conds != nil && b.conds.MetagenerationMatch != 0
var rawBucket *raw.Bucket
call := func() error {
rb, err := req.Context(ctx).Do()
if err != nil {
rawBucket = rb
return err
}
if err := run(ctx, call, b.retry, isIdempotent); err != nil {
return nil, err
}
return newBucket(rb)
return newBucket(rawBucket)
}
func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPatchCall, error) {
@ -282,8 +299,54 @@ func (b *BucketHandle) SignedURL(object string, opts *SignedURLOptions) (string,
return SignedURL(b.name, object, newopts)
}
// TODO: Add a similar wrapper for GenerateSignedPostPolicyV4 allowing users to
// omit PrivateKey/SignBytes
// GenerateSignedPostPolicyV4 generates a PostPolicyV4 value from bucket, object and opts.
// The generated URL and fields will then allow an unauthenticated client to perform multipart uploads.
//
// This method only requires the Expires field in the specified PostPolicyV4Options
// to be non-nil. If not provided, it attempts to fill the GoogleAccessID and PrivateKey
// from the GOOGLE_APPLICATION_CREDENTIALS environment variable.
// If you are authenticating with a custom HTTP client, Service Account based
// auto-detection will be hindered.
//
// If no private key is found, it attempts to use the GoogleAccessID to sign the URL.
// This requires the IAM Service Account Credentials API to be enabled
// (https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview)
// and iam.serviceAccounts.signBlob permissions on the GoogleAccessID service account.
// If you do not want these fields set for you, you may pass them in through opts or use
// GenerateSignedPostPolicyV4(bucket, name string, opts *PostPolicyV4Options) instead.
func (b *BucketHandle) GenerateSignedPostPolicyV4(object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) {
if opts.GoogleAccessID != "" && (opts.SignRawBytes != nil || opts.SignBytes != nil || len(opts.PrivateKey) > 0) {
return GenerateSignedPostPolicyV4(b.name, object, opts)
}
// Make a copy of opts so we don't modify the pointer parameter.
newopts := opts.clone()
if newopts.GoogleAccessID == "" {
id, err := b.detectDefaultGoogleAccessID()
if err != nil {
return nil, err
}
newopts.GoogleAccessID = id
}
if newopts.SignBytes == nil && newopts.SignRawBytes == nil && len(newopts.PrivateKey) == 0 {
if b.c.creds != nil && len(b.c.creds.JSON) > 0 {
var sa struct {
PrivateKey string `json:"private_key"`
}
err := json.Unmarshal(b.c.creds.JSON, &sa)
if err == nil && sa.PrivateKey != "" {
newopts.PrivateKey = []byte(sa.PrivateKey)
}
}
// Don't error out if we can't unmarshal the private key from the client,
// fallback to the default sign function for the service account.
if len(newopts.PrivateKey) == 0 {
newopts.SignRawBytes = b.defaultSignBytesFunc(newopts.GoogleAccessID)
}
}
return GenerateSignedPostPolicyV4(b.name, object, newopts)
}
func (b *BucketHandle) detectDefaultGoogleAccessID() (string, error) {
returnErr := errors.New("no credentials found on client and not on GCE (Google Compute Engine)")
@ -461,6 +524,12 @@ type BucketAttrs struct {
// The project number of the project the bucket belongs to.
// This field is read-only.
ProjectNumber uint64
// RPO configures the Recovery Point Objective (RPO) policy of the bucket.
// Set to RPOAsyncTurbo to turn on Turbo Replication for a bucket.
// See https://cloud.google.com/storage/docs/managing-turbo-replication for
// more information.
RPO RPO
}
// BucketPolicyOnly is an alias for UniformBucketLevelAccess.
@ -728,6 +797,7 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
Etag: b.Etag,
LocationType: b.LocationType,
ProjectNumber: b.ProjectNumber,
RPO: toRPO(b),
}, nil
}
@ -780,6 +850,7 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket {
Logging: b.Logging.toRawBucketLogging(),
Website: b.Website.toRawBucketWebsite(),
IamConfiguration: bktIAM,
Rpo: b.RPO.String(),
}
}
@ -889,6 +960,12 @@ type BucketAttrsToUpdate struct {
// See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch.
PredefinedDefaultObjectACL string
// RPO configures the Recovery Point Objective (RPO) policy of the bucket.
// Set to RPOAsyncTurbo to turn on Turbo Replication for a bucket.
// See https://cloud.google.com/storage/docs/managing-turbo-replication for
// more information.
RPO RPO
setLabels map[string]string
deleteLabels map[string]bool
}
@ -1001,7 +1078,10 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
rb.DefaultObjectAcl = nil
rb.ForceSendFields = append(rb.ForceSendFields, "DefaultObjectAcl")
}
rb.StorageClass = ua.StorageClass
rb.Rpo = ua.RPO.String()
if ua.setLabels != nil || ua.deleteLabels != nil {
rb.Labels = map[string]string{}
for k, v := range ua.setLabels {
@ -1081,10 +1161,10 @@ func (b *BucketHandle) LockRetentionPolicy(ctx context.Context) error {
metageneration = b.conds.MetagenerationMatch
}
req := b.c.raw.Buckets.LockRetentionPolicy(b.name, metageneration)
return runWithRetry(ctx, func() error {
return run(ctx, func() error {
_, err := req.Context(ctx).Do()
return err
})
}, b.retry, true)
}
// applyBucketConds modifies the provided call using the conditions in conds.
@ -1347,6 +1427,20 @@ func toPublicAccessPrevention(b *raw.BucketIamConfiguration) PublicAccessPrevent
}
}
func toRPO(b *raw.Bucket) RPO {
if b == nil {
return RPOUnknown
}
switch b.Rpo {
case rpoDefault:
return RPODefault
case rpoAsyncTurbo:
return RPOAsyncTurbo
default:
return RPOUnknown
}
}
// Objects returns an iterator over the objects in the bucket that match the
// Query q. If q is nil, no filtering is done. Objects will be iterated over
// lexicographically by name.
@ -1367,6 +1461,33 @@ func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator {
return it
}
// Retryer returns a bucket handle that is configured with custom retry
// behavior as specified by the options that are passed to it. All operations
// on the new handle will use the customized retry configuration.
// Retry options set on a object handle will take precedence over options set on
// the bucket handle.
// These retry options will merge with the client's retry configuration (if set)
// for the returned handle. Options passed into this method will take precedence
// over retry options on the client. Note that you must explicitly pass in each
// option you want to override.
func (b *BucketHandle) Retryer(opts ...RetryOption) *BucketHandle {
b2 := *b
var retry *retryConfig
if b.retry != nil {
// merge the options with the existing retry
retry = b.retry
} else {
retry = &retryConfig{}
}
for _, opt := range opts {
opt.apply(retry)
}
b2.retry = retry
b2.acl.retry = retry
b2.defaultObjectACL.retry = retry
return &b2
}
// An ObjectIterator is an iterator over ObjectAttrs.
//
// Note: This iterator is not safe for concurrent operations without explicit synchronization.
@ -1434,10 +1555,10 @@ func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error)
}
var resp *raw.Objects
var err error
err = runWithRetry(it.ctx, func() error {
err = run(it.ctx, func() error {
resp, err = req.Context(it.ctx).Do()
return err
})
}, it.bucket.retry, true)
if err != nil {
var e *googleapi.Error
if ok := xerrors.As(err, &e); ok && e.Code == http.StatusNotFound {
@ -1518,10 +1639,10 @@ func (it *BucketIterator) fetch(pageSize int, pageToken string) (token string, e
req.MaxResults(int64(pageSize))
}
var resp *raw.Buckets
err = runWithRetry(it.ctx, func() error {
err = run(it.ctx, func() error {
resp, err = req.Context(it.ctx).Do()
return err
})
}, it.client.retry, true)
if err != nil {
return "", err
}
@ -1534,3 +1655,39 @@ func (it *BucketIterator) fetch(pageSize int, pageToken string) (token string, e
}
return resp.NextPageToken, nil
}
// RPO (Recovery Point Objective) configures the turbo replication feature. See
// https://cloud.google.com/storage/docs/managing-turbo-replication for more information.
type RPO int
const (
// RPOUnknown is a zero value. It may be returned from bucket.Attrs() if RPO
// is not present in the bucket metadata, that is, the bucket is not dual-region.
// This value is also used if the RPO field is not set in a call to GCS.
RPOUnknown RPO = iota
// RPODefault represents default replication. It is used to reset RPO on an
// existing bucket that has this field set to RPOAsyncTurbo. Otherwise it
// is equivalent to RPOUnknown, and is always ignored. This value is valid
// for dual- or multi-region buckets.
RPODefault
// RPOAsyncTurbo represents turbo replication and is used to enable Turbo
// Replication on a bucket. This value is only valid for dual-region buckets.
RPOAsyncTurbo
rpoUnknown string = ""
rpoDefault = "DEFAULT"
rpoAsyncTurbo = "ASYNC_TURBO"
)
func (rpo RPO) String() string {
switch rpo {
case RPODefault:
return rpoDefault
case RPOAsyncTurbo:
return rpoAsyncTurbo
default:
return rpoUnknown
}
}

View file

@ -138,8 +138,11 @@ func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.Rewr
var res *raw.RewriteResponse
var err error
setClientHeader(call.Header())
err = runWithRetry(ctx, func() error { res, err = call.Do(); return err })
if err != nil {
retryCall := func() error { res, err = call.Do(); return err }
isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist)
if err := run(ctx, retryCall, c.dst.retry, isIdempotent); err != nil {
return nil, err
}
c.RewriteToken = res.RewriteToken
@ -230,8 +233,11 @@ func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
}
var obj *raw.Object
setClientHeader(call.Header())
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
if err != nil {
retryCall := func() error { obj, err = call.Do(); return err }
isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist)
if err := run(ctx, retryCall, c.dst.retry, isIdempotent); err != nil {
return nil, err
}
return newObject(obj), nil

View file

@ -19,15 +19,9 @@ Google Cloud Storage stores data in named objects, which are grouped into bucket
More information about Google Cloud Storage is available at
https://cloud.google.com/storage/docs.
See https://godoc.org/cloud.google.com/go for authentication, timeouts,
See https://pkg.go.dev/cloud.google.com/go for authentication, timeouts,
connection pooling and similar aspects of this package.
All of the methods of this package use exponential backoff to retry calls that fail
with certain errors, as described in
https://cloud.google.com/storage/docs/exponential-backoff. Retrying continues
indefinitely unless the controlling context is canceled or the client is closed. See
context.WithTimeout and context.WithCancel.
Creating a Client
@ -246,12 +240,52 @@ as the documentation of GenerateSignedPostPolicyV4.
Errors
Errors returned by this client are often of the type [`googleapi.Error`](https://godoc.org/google.golang.org/api/googleapi#Error).
These errors can be introspected for more information by using `errors.As` with the richer `googleapi.Error` type. For example:
Errors returned by this client are often of the type googleapi.Error.
These errors can be introspected for more information by using errors.As
with the richer googleapi.Error type. For example:
var e *googleapi.Error
if ok := errors.As(err, &e); ok {
if e.Code == 409 { ... }
}
See https://pkg.go.dev/google.golang.org/api/googleapi#Error for more information.
Retrying failed requests
Methods in this package may retry calls that fail with transient errors.
Retrying continues indefinitely unless the controlling context is canceled, the
client is closed, or a non-transient error is received. To stop retries from
continuing, use context timeouts or cancellation.
The retry strategy in this library follows best practices for Cloud Storage. By
default, operations are retried only if they are idempotent, and exponential
backoff with jitter is employed. In addition, errors are only retried if they
are defined as transient by the service. See
https://cloud.google.com/storage/docs/retry-strategy for more information.
Users can configure non-default retry behavior for a single library call (using
BucketHandle.Retryer and ObjectHandle.Retryer) or for all calls made by a
client (using Client.SetRetry). For example:
o := client.Bucket(bucket).Object(object).Retryer(
// Use WithBackoff to change the timing of the exponential backoff.
storage.WithBackoff(gax.Backoff{
Initial: 2 * time.Second,
}),
// Use WithPolicy to configure the idempotency policy. RetryAlways will
// retry the operation even if it is non-idempotent.
storage.WithPolicy(storage.RetryAlways),
)
// Use a context timeout to set an overall deadline on the call, including all
// potential retries.
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
// Delete an object using the specified strategy and timeout.
if err := o.Delete(ctx); err != nil {
// Handle err.
}
*/
package storage // import "cloud.google.com/go/storage"

71
vendor/cloud.google.com/go/storage/emulator_test.sh generated vendored Normal file
View file

@ -0,0 +1,71 @@
#!/bin/bash
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License..
# Fail on any error
set -eo pipefail
# Display commands being run
set -x
# Only run on Go 1.17+
min_minor_ver=17
v=`go version | { read _ _ v _; echo ${v#go}; }`
comps=(${v//./ })
minor_ver=${comps[1]}
if [ "$minor_ver" -lt "$min_minor_ver" ]; then
echo minor version $minor_ver, skipping
exit 0
fi
export STORAGE_EMULATOR_HOST="http://localhost:9000"
DEFAULT_IMAGE_NAME='gcr.io/cloud-devrel-public-resources/storage-testbench'
DEFAULT_IMAGE_TAG='latest'
DOCKER_IMAGE=${DEFAULT_IMAGE_NAME}:${DEFAULT_IMAGE_TAG}
CONTAINER_NAME=storage_testbench
# Get the docker image for the testbench
docker pull $DOCKER_IMAGE
# Start the testbench
# Note: --net=host makes the container bind directly to the Docker hosts network,
# with no network isolation. If we were to use port-mapping instead, reset connection errors
# would be captured differently and cause unexpected test behaviour.
# The host networking driver works only on Linux hosts.
# See more about using host networking: https://docs.docker.com/network/host/
docker run --name $CONTAINER_NAME --rm --net=host $DOCKER_IMAGE &
echo "Running the Cloud Storage testbench: $STORAGE_EMULATOR_HOST"
# Check that the server is running - retry several times to allow for start-up time
response=$(curl -w "%{http_code}\n" $STORAGE_EMULATOR_HOST --retry-connrefused --retry 5 -o /dev/null)
if [[ $response != 200 ]]
then
echo "Testbench server did not start correctly"
exit 1
fi
# Stop the testbench & cleanup environment variables
function cleanup() {
echo "Cleanup testbench"
docker stop $CONTAINER_NAME
unset STORAGE_EMULATOR_HOST;
}
trap cleanup EXIT
# Run tests
go test -v -timeout 10m ./ -run="TestRetryConformance" -short 2>&1 | tee -a sponge_log.log

View file

@ -89,7 +89,7 @@ type HMACKey struct {
type HMACKeyHandle struct {
projectID string
accessID string
retry *retryConfig
raw *raw.ProjectsHmacKeysService
}
@ -100,6 +100,7 @@ func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle {
return &HMACKeyHandle{
projectID: projectID,
accessID: accessID,
retry: c.retry,
raw: raw.NewProjectsHmacKeysService(c.raw),
}
}
@ -126,10 +127,10 @@ func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMAC
var metadata *raw.HmacKeyMetadata
var err error
err = runWithRetry(ctx, func() error {
err = run(ctx, func() error {
metadata, err = call.Context(ctx).Do()
return err
})
}, hkh.retry, true)
if err != nil {
return nil, err
}
@ -156,9 +157,9 @@ func (hkh *HMACKeyHandle) Delete(ctx context.Context, opts ...HMACKeyOption) err
}
setClientHeader(delCall.Header())
return runWithRetry(ctx, func() error {
return run(ctx, func() error {
return delCall.Context(ctx).Do()
})
}, hkh.retry, true)
}
func pbHmacKeyToHMACKey(pb *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, error) {
@ -214,8 +215,13 @@ func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEma
setClientHeader(call.Header())
hkPb, err := call.Context(ctx).Do()
if err != nil {
var hkPb *raw.HmacKey
if err := run(ctx, func() error {
h, err := call.Context(ctx).Do()
hkPb = h
return err
}, c.retry, false); err != nil {
return nil, err
}
@ -257,10 +263,11 @@ func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opt
var metadata *raw.HmacKeyMetadata
var err error
err = runWithRetry(ctx, func() error {
isIdempotent := len(au.Etag) > 0
err = run(ctx, func() error {
metadata, err = call.Context(ctx).Do()
return err
})
}, h.retry, isIdempotent)
if err != nil {
return nil, err
@ -285,6 +292,7 @@ type HMACKeysIterator struct {
nextFunc func() error
index int
desc hmacKeyDesc
retry *retryConfig
}
// ListHMACKeys returns an iterator for listing HMACKeys.
@ -297,6 +305,7 @@ func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMA
ctx: ctx,
raw: raw.NewProjectsHmacKeysService(c.raw),
projectID: projectID,
retry: c.retry,
}
for _, opt := range opts {
@ -361,10 +370,10 @@ func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string,
ctx := it.ctx
var resp *raw.HmacKeysMetadata
err = runWithRetry(it.ctx, func() error {
err = run(it.ctx, func() error {
resp, err = call.Context(ctx).Do()
return err
})
}, it.retry, true)
if err != nil {
return "", err
}

View file

@ -29,6 +29,7 @@ func (b *BucketHandle) IAM() *iam.Handle {
return iam.InternalNewHandleClient(&iamClient{
raw: b.c.raw,
userProject: b.userProject,
retry: b.retry,
}, b.name)
}
@ -36,6 +37,7 @@ func (b *BucketHandle) IAM() *iam.Handle {
type iamClient struct {
raw *raw.Service
userProject string
retry *retryConfig
}
func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) {
@ -52,10 +54,10 @@ func (c *iamClient) GetWithVersion(ctx context.Context, resource string, request
call.UserProject(c.userProject)
}
var rp *raw.Policy
err = runWithRetry(ctx, func() error {
err = run(ctx, func() error {
rp, err = call.Context(ctx).Do()
return err
})
}, c.retry, true)
if err != nil {
return nil, err
}
@ -72,10 +74,11 @@ func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (
if c.userProject != "" {
call.UserProject(c.userProject)
}
return runWithRetry(ctx, func() error {
isIdempotent := len(p.Etag) > 0
return run(ctx, func() error {
_, err := call.Context(ctx).Do()
return err
})
}, c.retry, isIdempotent)
}
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) {
@ -88,10 +91,10 @@ func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (
call.UserProject(c.userProject)
}
var res *raw.TestIamPermissionsResponse
err = runWithRetry(ctx, func() error {
err = run(ctx, func() error {
res, err = call.Context(ctx).Do()
return err
})
}, c.retry, true)
if err != nil {
return nil, err
}

View file

@ -1,4 +1,4 @@
// Copyright 2021 Google LLC
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -39,6 +39,22 @@
//
// The following is an example of making an API call with the newly created client.
//
// ctx := context.Background()
// c, err := storage.NewClient(ctx)
// if err != nil {
// // TODO: Handle error.
// }
// defer c.Close()
//
// req := &storagepb.DeleteBucketRequest{
// // TODO: Fill request struct fields.
// // See https://pkg.go.dev/google.golang.org/genproto/googleapis/storage/v2#DeleteBucketRequest.
// }
// err = c.DeleteBucket(ctx, req)
// if err != nil {
// // TODO: Handle error.
// }
//
// Use of Context
//
// The ctx passed to NewClient is used for authentication requests and
@ -68,7 +84,7 @@ import (
type clientHookParams struct{}
type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
const versionClient = "20211015"
const versionClient = "20220114"
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
out, _ := metadata.FromOutgoingContext(ctx)

View file

@ -10,6 +10,101 @@
"grpc": {
"libraryClient": "Client",
"rpcs": {
"ComposeObject": {
"methods": [
"ComposeObject"
]
},
"CreateBucket": {
"methods": [
"CreateBucket"
]
},
"CreateHmacKey": {
"methods": [
"CreateHmacKey"
]
},
"CreateNotification": {
"methods": [
"CreateNotification"
]
},
"DeleteBucket": {
"methods": [
"DeleteBucket"
]
},
"DeleteHmacKey": {
"methods": [
"DeleteHmacKey"
]
},
"DeleteNotification": {
"methods": [
"DeleteNotification"
]
},
"DeleteObject": {
"methods": [
"DeleteObject"
]
},
"GetBucket": {
"methods": [
"GetBucket"
]
},
"GetHmacKey": {
"methods": [
"GetHmacKey"
]
},
"GetIamPolicy": {
"methods": [
"GetIamPolicy"
]
},
"GetNotification": {
"methods": [
"GetNotification"
]
},
"GetObject": {
"methods": [
"GetObject"
]
},
"GetServiceAccount": {
"methods": [
"GetServiceAccount"
]
},
"ListBuckets": {
"methods": [
"ListBuckets"
]
},
"ListHmacKeys": {
"methods": [
"ListHmacKeys"
]
},
"ListNotifications": {
"methods": [
"ListNotifications"
]
},
"ListObjects": {
"methods": [
"ListObjects"
]
},
"LockBucketRetentionPolicy": {
"methods": [
"LockBucketRetentionPolicy"
]
},
"QueryWriteStatus": {
"methods": [
"QueryWriteStatus"
@ -20,11 +115,41 @@
"ReadObject"
]
},
"RewriteObject": {
"methods": [
"RewriteObject"
]
},
"SetIamPolicy": {
"methods": [
"SetIamPolicy"
]
},
"StartResumableWrite": {
"methods": [
"StartResumableWrite"
]
},
"TestIamPermissions": {
"methods": [
"TestIamPermissions"
]
},
"UpdateBucket": {
"methods": [
"UpdateBucket"
]
},
"UpdateHmacKey": {
"methods": [
"UpdateHmacKey"
]
},
"UpdateObject": {
"methods": [
"UpdateObject"
]
},
"WriteObject": {
"methods": [
"WriteObject"

File diff suppressed because it is too large Load diff

View file

@ -17,42 +17,66 @@ package storage
import (
"context"
"io"
"net"
"net/url"
"strings"
"cloud.google.com/go/internal"
gax "github.com/googleapis/gax-go/v2"
"golang.org/x/xerrors"
"google.golang.org/api/googleapi"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// runWithRetry calls the function until it returns nil or a non-retryable error, or
// the context is done.
func runWithRetry(ctx context.Context, call func() error) error {
return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
var defaultRetry *retryConfig = &retryConfig{}
// run determines whether a retry is necessary based on the config and
// idempotency information. It then calls the function with or without retries
// as appropriate, using the configured settings.
func run(ctx context.Context, call func() error, retry *retryConfig, isIdempotent bool) error {
if retry == nil {
retry = defaultRetry
}
if (retry.policy == RetryIdempotent && !isIdempotent) || retry.policy == RetryNever {
return call()
}
bo := gax.Backoff{}
if retry.backoff != nil {
bo.Multiplier = retry.backoff.Multiplier
bo.Initial = retry.backoff.Initial
bo.Max = retry.backoff.Max
}
var errorFunc func(err error) bool = shouldRetry
if retry.shouldRetry != nil {
errorFunc = retry.shouldRetry
}
return internal.Retry(ctx, bo, func() (stop bool, err error) {
err = call()
if err == nil {
return true, nil
}
if shouldRetry(err) {
return false, err
}
return true, err
return !errorFunc(err), err
})
}
func shouldRetry(err error) bool {
if err == io.ErrUnexpectedEOF {
if err == nil {
return false
}
if xerrors.Is(err, io.ErrUnexpectedEOF) {
return true
}
switch e := err.(type) {
case *net.OpError:
if strings.Contains(e.Error(), "use of closed network connection") {
// TODO: check against net.ErrClosed (go 1.16+) instead of string
return true
}
case *googleapi.Error:
// Retry on 429 and 5xx, according to
// Retry on 408, 429, and 5xx, according to
// https://cloud.google.com/storage/docs/exponential-backoff.
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
return e.Code == 408 || e.Code == 429 || (e.Code >= 500 && e.Code < 600)
case *url.Error:
// Retry socket-level errors ECONNREFUSED and ENETUNREACH (from syscall).
// Retry socket-level errors ECONNREFUSED and ECONNRESET (from syscall).
// Unfortunately the error type is unexported, so we resort to string
// matching.
retriable := []string{"connection refused", "connection reset"}

View file

@ -137,7 +137,12 @@ func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (re
if b.userProject != "" {
call.UserProject(b.userProject)
}
rn, err := call.Context(ctx).Do()
var rn *raw.Notification
err = run(ctx, func() error {
rn, err = call.Context(ctx).Do()
return err
}, b.retry, false)
if err != nil {
return nil, err
}
@ -156,10 +161,10 @@ func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notific
call.UserProject(b.userProject)
}
var res *raw.Notifications
err = runWithRetry(ctx, func() error {
err = run(ctx, func() error {
res, err = call.Context(ctx).Do()
return err
})
}, b.retry, true)
if err != nil {
return nil, err
}
@ -184,7 +189,7 @@ func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err e
if b.userProject != "" {
call.UserProject(b.userProject)
}
return runWithRetry(ctx, func() error {
return run(ctx, func() error {
return call.Context(ctx).Do()
})
}, b.retry, true)
}

View file

@ -52,22 +52,38 @@ type PostPolicyV4Options struct {
// Exactly one of PrivateKey or SignBytes must be non-nil.
PrivateKey []byte
// SignBytes is a function for implementing custom signing. For example, if
// SignBytes is a function for implementing custom signing.
//
// Deprecated: Use SignRawBytes. If both SignBytes and SignRawBytes are defined,
// SignBytes will be ignored.
// This SignBytes function expects the bytes it receives to be hashed, while
// SignRawBytes accepts the raw bytes without hashing, allowing more flexibility.
// Add the following to the top of your signing function to hash the bytes
// to use SignRawBytes instead:
// shaSum := sha256.Sum256(bytes)
// bytes = shaSum[:]
//
SignBytes func(hashBytes []byte) (signature []byte, err error)
// SignRawBytes is a function for implementing custom signing. For example, if
// your application is running on Google App Engine, you can use
// appengine's internal signing function:
// ctx := appengine.NewContext(request)
// acc, _ := appengine.ServiceAccount(ctx)
// url, err := SignedURL("bucket", "object", &SignedURLOptions{
// &PostPolicyV4Options{
// GoogleAccessID: acc,
// SignBytes: func(b []byte) ([]byte, error) {
// SignRawBytes: func(b []byte) ([]byte, error) {
// _, signedBytes, err := appengine.SignBytes(ctx, b)
// return signedBytes, err
// },
// // etc.
// })
//
// Exactly one of PrivateKey or SignBytes must be non-nil.
SignBytes func(hashBytes []byte) (signature []byte, err error)
// SignRawBytes is equivalent to the SignBytes field on SignedURLOptions;
// that is, you may use the same signing function for the two.
//
// Exactly one of PrivateKey or SignRawBytes must be non-nil.
SignRawBytes func(bytes []byte) (signature []byte, err error)
// Expires is the expiration time on the signed URL.
// It must be a time in the future.
@ -96,6 +112,23 @@ type PostPolicyV4Options struct {
// a 4XX status code, back with the message describing the problem.
// Optional.
Conditions []PostPolicyV4Condition
shouldHashSignBytes bool
}
func (opts *PostPolicyV4Options) clone() *PostPolicyV4Options {
return &PostPolicyV4Options{
GoogleAccessID: opts.GoogleAccessID,
PrivateKey: opts.PrivateKey,
SignBytes: opts.SignBytes,
SignRawBytes: opts.SignRawBytes,
Expires: opts.Expires,
Style: opts.Style,
Insecure: opts.Insecure,
Fields: opts.Fields,
Conditions: opts.Conditions,
shouldHashSignBytes: opts.shouldHashSignBytes,
}
}
// PolicyV4Fields describes the attributes for a PostPolicyV4 request.
@ -220,20 +253,22 @@ func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options
var signingFn func(hashedBytes []byte) ([]byte, error)
switch {
case opts.SignBytes != nil:
case opts.SignRawBytes != nil:
signingFn = opts.SignRawBytes
case opts.shouldHashSignBytes:
signingFn = opts.SignBytes
case len(opts.PrivateKey) != 0:
parsedRSAPrivKey, err := parseKey(opts.PrivateKey)
if err != nil {
return nil, err
}
signingFn = func(hashedBytes []byte) ([]byte, error) {
return rsa.SignPKCS1v15(rand.Reader, parsedRSAPrivKey, crypto.SHA256, hashedBytes)
signingFn = func(b []byte) ([]byte, error) {
sum := sha256.Sum256(b)
return rsa.SignPKCS1v15(rand.Reader, parsedRSAPrivKey, crypto.SHA256, sum[:])
}
default:
return nil, errors.New("storage: exactly one of PrivateKey or SignedBytes must be set")
return nil, errors.New("storage: exactly one of PrivateKey or SignRawBytes must be set")
}
var descFields PolicyV4Fields
@ -307,10 +342,18 @@ func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options
}
b64Policy := base64.StdEncoding.EncodeToString(condsAsJSON)
var signature []byte
var signErr error
if opts.shouldHashSignBytes {
// SignBytes expects hashed bytes as input instead of raw bytes, so we hash them
shaSum := sha256.Sum256([]byte(b64Policy))
signature, err := signingFn(shaSum[:])
if err != nil {
return nil, err
signature, signErr = signingFn(shaSum[:])
} else {
signature, signErr = signingFn([]byte(b64Policy))
}
if signErr != nil {
return nil, signErr
}
policyFields["policy"] = b64Policy
@ -348,15 +391,16 @@ func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options
// validatePostPolicyV4Options checks that:
// * GoogleAccessID is set
// * either but not both PrivateKey and SignBytes are set or nil, but not both
// * Expires, the deadline is not in the past
// * either PrivateKey or SignRawBytes/SignBytes is set, but not both
// * the deadline set in Expires is not in the past
// * if Style is not set, it'll use PathStyle
// * sets shouldHashSignBytes to true if opts.SignBytes should be used
func validatePostPolicyV4Options(opts *PostPolicyV4Options, now time.Time) error {
if opts == nil || opts.GoogleAccessID == "" {
return errors.New("storage: missing required GoogleAccessID")
}
if privBlank, signBlank := len(opts.PrivateKey) == 0, opts.SignBytes == nil; privBlank == signBlank {
return errors.New("storage: exactly one of PrivateKey or SignedBytes must be set")
if privBlank, signBlank := len(opts.PrivateKey) == 0, opts.SignBytes == nil && opts.SignRawBytes == nil; privBlank == signBlank {
return errors.New("storage: exactly one of PrivateKey or SignRawBytes must be set")
}
if opts.Expires.Before(now) {
return errors.New("storage: expecting Expires to be in the future")
@ -364,6 +408,9 @@ func validatePostPolicyV4Options(opts *PostPolicyV4Options, now time.Time) error
if opts.Style == nil {
opts.Style = PathStyle()
}
if opts.SignRawBytes == nil && opts.SignBytes != nil {
opts.shouldHashSignBytes = true
}
return nil
}

View file

@ -163,7 +163,7 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
}
var res *http.Response
err = runWithRetry(ctx, func() error {
err = run(ctx, func() error {
res, err = o.c.hc.Do(req)
if err != nil {
return err
@ -210,7 +210,7 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
gen = gen64
}
return nil
})
}, o.retry, true)
if err != nil {
return nil, err
}
@ -483,7 +483,7 @@ func (o *ObjectHandle) newRangeReaderWithGRPC(ctx context.Context, offset, lengt
var msg *storagepb.ReadObjectResponse
var err error
err = runWithRetry(cc, func() error {
err = run(cc, func() error {
stream, err = o.c.gc.ReadObject(cc, req)
if err != nil {
return err
@ -492,7 +492,7 @@ func (o *ObjectHandle) newRangeReaderWithGRPC(ctx context.Context, offset, lengt
msg, err = stream.Recv()
return err
})
}, o.retry, true)
if err != nil {
// Close the stream context we just created to ensure we don't leak
// resources.
@ -541,8 +541,8 @@ func (o *ObjectHandle) newRangeReaderWithGRPC(ctx context.Context, offset, lengt
}
// Only support checksums when reading an entire object, not a range.
if msg.GetObjectChecksums().Crc32C != nil && offset == 0 && length == 0 {
r.wantCRC = msg.GetObjectChecksums().GetCrc32C()
if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil && offset == 0 && length == 0 {
r.wantCRC = checksums.GetCrc32C()
r.checkCRC = true
}

View file

@ -41,6 +41,7 @@ import (
"cloud.google.com/go/internal/trace"
"cloud.google.com/go/internal/version"
gapic "cloud.google.com/go/storage/internal/apiv2"
"github.com/googleapis/gax-go/v2"
"golang.org/x/oauth2/google"
"golang.org/x/xerrors"
"google.golang.org/api/googleapi"
@ -50,6 +51,7 @@ import (
"google.golang.org/api/transport"
htransport "google.golang.org/api/transport/http"
storagepb "google.golang.org/genproto/googleapis/storage/v2"
"google.golang.org/grpc"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/types/known/timestamppb"
@ -81,6 +83,12 @@ const (
// ScopeReadWrite grants permissions to manage your
// data in Google Cloud Storage.
ScopeReadWrite = raw.DevstorageReadWriteScope
// defaultConnPoolSize is the default number of connections
// to initialize in the GAPIC gRPC connection pool. A larger
// connection pool may be necessary for jobs that require
// high throughput and/or leverage many concurrent streams.
defaultConnPoolSize = 4
)
var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo)
@ -102,6 +110,7 @@ type Client struct {
readHost string
// May be nil.
creds *google.Credentials
retry *retryConfig
// gc is an optional gRPC-based, GAPIC client.
//
@ -203,11 +212,34 @@ func newHybridClient(ctx context.Context, opts *hybridClientOptions) (*Client, e
if opts == nil {
opts = &hybridClientOptions{}
}
opts.GRPCOpts = append(defaultGRPCOptions(), opts.GRPCOpts...)
c, err := NewClient(ctx, opts.HTTPOpts...)
if err != nil {
return nil, err
}
// Set emulator options for gRPC if an emulator was specified. Note that in a
// hybrid client, STORAGE_EMULATOR_HOST will set the host to use for HTTP and
// STORAGE_EMULATOR_HOST_GRPC will set the host to use for gRPC (when using a
// local emulator, HTTP and gRPC must use different ports, so this is
// necessary).
// TODO: when full gRPC client is available, remove STORAGE_EMULATOR_HOST_GRPC
// and use STORAGE_EMULATOR_HOST for both the HTTP and gRPC based clients.
if host := os.Getenv("STORAGE_EMULATOR_HOST_GRPC"); host != "" {
// Strip the scheme from the emulator host. WithEndpoint does not take a
// scheme for gRPC.
if strings.Contains(host, "://") {
host = strings.SplitN(host, "://", 2)[1]
}
opts.GRPCOpts = append(opts.GRPCOpts,
option.WithEndpoint(host),
option.WithGRPCDialOption(grpc.WithInsecure()),
option.WithoutAuthentication(),
)
}
g, err := gapic.NewClient(ctx, opts.GRPCOpts...)
if err != nil {
return nil, err
@ -217,6 +249,14 @@ func newHybridClient(ctx context.Context, opts *hybridClientOptions) (*Client, e
return c, nil
}
// defaultGRPCOptions returns a set of the default client options
// for gRPC client initialization.
func defaultGRPCOptions() []option.ClientOption {
return []option.ClientOption{
option.WithGRPCConnectionPool(defaultConnPoolSize),
}
}
// Close closes the Client.
//
// Close need not be called at program exit.
@ -836,6 +876,7 @@ type ObjectHandle struct {
encryptionKey []byte // AES-256 key
userProject string // for requester-pays buckets
readCompressed bool // Accept-Encoding: gzip
retry *retryConfig
}
// ACL provides access to the object's access control list.
@ -899,7 +940,7 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error
}
var obj *raw.Object
setClientHeader(call.Header())
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
err = run(ctx, func() error { obj, err = call.Do(); return err }, o.retry, true)
var e *googleapi.Error
if ok := xerrors.As(err, &e); ok && e.Code == http.StatusNotFound {
return nil, ErrObjectNotExist
@ -1000,7 +1041,11 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
}
var obj *raw.Object
setClientHeader(call.Header())
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
var isIdempotent bool
if o.conds != nil && o.conds.MetagenerationMatch != 0 {
isIdempotent = true
}
err = run(ctx, func() error { obj, err = call.Do(); return err }, o.retry, isIdempotent)
var e *googleapi.Error
if ok := xerrors.As(err, &e); ok && e.Code == http.StatusNotFound {
return nil, ErrObjectNotExist
@ -1064,7 +1109,13 @@ func (o *ObjectHandle) Delete(ctx context.Context) error {
}
// Encryption doesn't apply to Delete.
setClientHeader(call.Header())
err := runWithRetry(ctx, func() error { return call.Do() })
var isIdempotent bool
// Delete is idempotent if GenerationMatch or Generation have been passed in.
// The default generation is negative to get the latest version of the object.
if (o.conds != nil && o.conds.GenerationMatch != 0) || o.gen >= 0 {
isIdempotent = true
}
err := run(ctx, func() error { return call.Do() }, o.retry, isIdempotent)
var e *googleapi.Error
if ok := xerrors.As(err, &e); ok && e.Code == http.StatusNotFound {
return ErrObjectNotExist
@ -1759,6 +1810,169 @@ func setConditionField(call reflect.Value, name string, value interface{}) bool
return true
}
// Retryer returns an object handle that is configured with custom retry
// behavior as specified by the options that are passed to it. All operations
// on the new handle will use the customized retry configuration.
// These retry options will merge with the bucket's retryer (if set) for the
// returned handle. Options passed into this method will take precedence over
// retry options on the bucket and client. Note that you must explicitly pass in
// each option you want to override.
func (o *ObjectHandle) Retryer(opts ...RetryOption) *ObjectHandle {
o2 := *o
var retry *retryConfig
if o.retry != nil {
// merge the options with the existing retry
retry = o.retry
} else {
retry = &retryConfig{}
}
for _, opt := range opts {
opt.apply(retry)
}
o2.retry = retry
o2.acl.retry = retry
return &o2
}
// SetRetry configures the client with custom retry behavior as specified by the
// options that are passed to it. All operations using this client will use the
// customized retry configuration.
// This should be called once before using the client for network operations, as
// there could be indeterminate behaviour with operations in progress.
// Retry options set on a bucket or object handle will take precedence over
// these options.
func (c *Client) SetRetry(opts ...RetryOption) {
var retry *retryConfig
if c.retry != nil {
// merge the options with the existing retry
retry = c.retry
} else {
retry = &retryConfig{}
}
for _, opt := range opts {
opt.apply(retry)
}
c.retry = retry
}
// RetryOption allows users to configure non-default retry behavior for API
// calls made to GCS.
type RetryOption interface {
apply(config *retryConfig)
}
// WithBackoff allows configuration of the backoff timing used for retries.
// Available configuration options (Initial, Max and Multiplier) are described
// at https://pkg.go.dev/github.com/googleapis/gax-go/v2#Backoff. If any fields
// are not supplied by the user, gax default values will be used.
func WithBackoff(backoff gax.Backoff) RetryOption {
return &withBackoff{
backoff: backoff,
}
}
type withBackoff struct {
backoff gax.Backoff
}
func (wb *withBackoff) apply(config *retryConfig) {
config.backoff = &wb.backoff
}
// RetryPolicy describes the available policies for which operations should be
// retried. The default is `RetryIdempotent`.
type RetryPolicy int
const (
// RetryIdempotent causes only idempotent operations to be retried when the
// service returns a transient error. Using this policy, fully idempotent
// operations (such as `ObjectHandle.Attrs()`) will always be retried.
// Conditionally idempotent operations (for example `ObjectHandle.Update()`)
// will be retried only if the necessary conditions have been supplied (in
// the case of `ObjectHandle.Update()` this would mean supplying a
// `Conditions.MetagenerationMatch` condition is required).
RetryIdempotent RetryPolicy = iota
// RetryAlways causes all operations to be retried when the service returns a
// transient error, regardless of idempotency considerations.
RetryAlways
// RetryNever causes the client to not perform retries on failed operations.
RetryNever
)
// WithPolicy allows the configuration of which operations should be performed
// with retries for transient errors.
func WithPolicy(policy RetryPolicy) RetryOption {
return &withPolicy{
policy: policy,
}
}
type withPolicy struct {
policy RetryPolicy
}
func (ws *withPolicy) apply(config *retryConfig) {
config.policy = ws.policy
}
// WithErrorFunc allows users to pass a custom function to the retryer. Errors
// will be retried if and only if `shouldRetry(err)` returns true.
// By default, the following errors are retried (see invoke.go for the default
// shouldRetry function):
//
// - HTTP responses with codes 408, 429, 502, 503, and 504.
//
// - Transient network errors such as connection reset and io.ErrUnexpectedEOF.
//
// - Errors which are considered transient using the Temporary() interface.
//
// - Wrapped versions of these errors.
//
// This option can be used to retry on a different set of errors than the
// default.
func WithErrorFunc(shouldRetry func(err error) bool) RetryOption {
return &withErrorFunc{
shouldRetry: shouldRetry,
}
}
type withErrorFunc struct {
shouldRetry func(err error) bool
}
func (wef *withErrorFunc) apply(config *retryConfig) {
config.shouldRetry = wef.shouldRetry
}
type retryConfig struct {
backoff *gax.Backoff
policy RetryPolicy
shouldRetry func(err error) bool
}
func (r *retryConfig) clone() *retryConfig {
if r == nil {
return nil
}
var bo *gax.Backoff
if r.backoff != nil {
bo = &gax.Backoff{
Initial: r.backoff.Initial,
Max: r.backoff.Max,
Multiplier: r.backoff.Multiplier,
}
}
return &retryConfig{
backoff: bo,
policy: r.policy,
shouldRetry: r.shouldRetry,
}
}
// composeSourceObj wraps a *raw.ComposeRequestSourceObjects, but adds the methods
// that modifyCall searches for by name.
type composeSourceObj struct {
@ -1802,10 +2016,10 @@ func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string,
r := c.raw.Projects.ServiceAccount.Get(projectID)
var res *raw.ServiceAccount
var err error
err = runWithRetry(ctx, func() error {
err = run(ctx, func() error {
res, err = r.Context(ctx).Do()
return err
})
}, c.retry, true)
if err != nil {
return "", err
}

View file

@ -172,6 +172,22 @@ func (w *Writer) open() error {
// call to set up the upload as well as calls to upload individual chunks
// for a resumable upload (as long as the chunk size is non-zero). Hence
// there is no need to add retries here.
// Retry only when the operation is idempotent or the retry policy is RetryAlways.
isIdempotent := w.o.conds != nil && (w.o.conds.GenerationMatch >= 0 || w.o.conds.DoesNotExist == true)
var useRetry bool
if (w.o.retry == nil || w.o.retry.policy == RetryIdempotent) && isIdempotent {
useRetry = true
} else if w.o.retry != nil && w.o.retry.policy == RetryAlways {
useRetry = true
}
if useRetry {
if w.o.retry != nil {
call.WithRetry(w.o.retry.backoff, w.o.retry.shouldRetry)
} else {
call.WithRetry(nil, nil)
}
}
resp, err = call.Do()
}
if err != nil {

View file

@ -1,6 +1,7 @@
package metricsql
import (
"fmt"
"sort"
"strings"
)
@ -13,149 +14,400 @@ import (
// according to https://utcc.utoronto.ca/~cks/space/blog/sysadmin/PrometheusLabelNonOptimization
// I.e. such query is converted to `foo{filters1, filters2} op bar{filters1, filters2}`
func Optimize(e Expr) Expr {
switch t := e.(type) {
case *BinaryOpExpr:
// Convert `foo{filters1} op bar{filters2}` to `foo{filters1, filters2} op bar{filters1, filters2}`.
// This should reduce the number of operations
// See https://utcc.utoronto.ca/~cks/space/blog/sysadmin/PrometheusLabelNonOptimization
// for details.
if !canOptimizeBinaryOp(t) {
return optimizeBinaryOpArgs(t)
}
meLeft := getMetricExprForOptimization(t.Left)
if meLeft == nil || !meLeft.hasNonEmptyMetricGroup() {
return optimizeBinaryOpArgs(t)
}
meRight := getMetricExprForOptimization(t.Right)
if meRight == nil || !meRight.hasNonEmptyMetricGroup() {
return optimizeBinaryOpArgs(t)
}
lfs := intersectLabelFilters(meLeft.LabelFilters[1:], meRight.LabelFilters[1:])
meLeft.LabelFilters = append(meLeft.LabelFilters[:1], lfs...)
meRight.LabelFilters = append(meRight.LabelFilters[:1], lfs...)
return t
case *FuncExpr:
for i := range t.Args {
t.Args[i] = Optimize(t.Args[i])
}
return t
case *AggrFuncExpr:
for i := range t.Args {
t.Args[i] = Optimize(t.Args[i])
}
return t
default:
if !canOptimize(e) {
return e
}
eCopy := Clone(e)
optimizeInplace(eCopy)
return eCopy
}
func canOptimizeBinaryOp(be *BinaryOpExpr) bool {
if be.JoinModifier.Op != "" || be.GroupModifier.Op != "" {
func canOptimize(e Expr) bool {
switch t := e.(type) {
case *RollupExpr:
return canOptimize(t.Expr) || canOptimize(t.At)
case *FuncExpr:
for _, arg := range t.Args {
if canOptimize(arg) {
return true
}
}
case *AggrFuncExpr:
for _, arg := range t.Args {
if canOptimize(arg) {
return true
}
}
case *BinaryOpExpr:
return true
}
return false
}
switch be.Op {
case "+", "-", "*", "/", "%", "^",
"==", "!=", ">", "<", ">=", "<=",
"and", "if", "ifnot", "default":
return true
// Clone clones the given expression e and returns the cloned copy.
func Clone(e Expr) Expr {
s := e.AppendString(nil)
eCopy, err := Parse(string(s))
if err != nil {
panic(fmt.Errorf("BUG: cannot parse the expression %q: %w", s, err))
}
return eCopy
}
func optimizeInplace(e Expr) {
switch t := e.(type) {
case *RollupExpr:
optimizeInplace(t.Expr)
optimizeInplace(t.At)
case *FuncExpr:
for _, arg := range t.Args {
optimizeInplace(arg)
}
case *AggrFuncExpr:
for _, arg := range t.Args {
optimizeInplace(arg)
}
case *BinaryOpExpr:
optimizeInplace(t.Left)
optimizeInplace(t.Right)
lfs := getCommonLabelFilters(t)
pushdownBinaryOpFiltersInplace(t, lfs)
}
}
func getCommonLabelFilters(e Expr) []LabelFilter {
switch t := e.(type) {
case *MetricExpr:
return getLabelFiltersWithoutMetricName(t.LabelFilters)
case *RollupExpr:
return getCommonLabelFilters(t.Expr)
case *FuncExpr:
arg := getFuncArgForOptimization(t.Name, t.Args)
if arg == nil {
return nil
}
return getCommonLabelFilters(arg)
case *AggrFuncExpr:
arg := getFuncArgForOptimization(t.Name, t.Args)
if arg == nil {
return nil
}
lfs := getCommonLabelFilters(arg)
return trimFiltersByAggrModifier(lfs, t)
case *BinaryOpExpr:
lfsLeft := getCommonLabelFilters(t.Left)
lfsRight := getCommonLabelFilters(t.Right)
var lfs []LabelFilter
switch strings.ToLower(t.Op) {
case "or":
// {fCommon, f1} or {fCommon, f2} -> {fCommon}
// {fCommon, f1} or on() {fCommon, f2} -> {}
// {fCommon, f1} or on(fCommon) {fCommon, f2} -> {fCommon}
// {fCommon, f1} or on(f1) {fCommon, f2} -> {}
// {fCommon, f1} or on(f2) {fCommon, f2} -> {}
// {fCommon, f1} or on(f3) {fCommon, f2} -> {}
lfs = intersectLabelFilters(lfsLeft, lfsRight)
return TrimFiltersByGroupModifier(lfs, t)
case "unless":
// {f1} unless {f2} -> {f1}
// {f1} unless on() {f2} -> {}
// {f1} unless on(f1) {f2} -> {f1}
// {f1} unless on(f2) {f2} -> {}
// {f1} unless on(f1, f2) {f2} -> {f1}
// {f1} unless on(f3) {f2} -> {}
return TrimFiltersByGroupModifier(lfsLeft, t)
default:
return false
switch strings.ToLower(t.JoinModifier.Op) {
case "group_left":
// {f1} * group_left() {f2} -> {f1, f2}
// {f1} * on() group_left() {f2} -> {f1}
// {f1} * on(f1) group_left() {f2} -> {f1}
// {f1} * on(f2) group_left() {f2} -> {f1, f2}
// {f1} * on(f1, f2) group_left() {f2} -> {f1, f2}
// {f1} * on(f3) group_left() {f2} -> {f1}
lfsRight = TrimFiltersByGroupModifier(lfsRight, t)
return unionLabelFilters(lfsLeft, lfsRight)
case "group_right":
// {f1} * group_right() {f2} -> {f1, f2}
// {f1} * on() group_right() {f2} -> {f2}
// {f1} * on(f1) group_right() {f2} -> {f1, f2}
// {f1} * on(f2) group_right() {f2} -> {f2}
// {f1} * on(f1, f2) group_right() {f2} -> {f1, f2}
// {f1} * on(f3) group_right() {f2} -> {f2}
lfsLeft = TrimFiltersByGroupModifier(lfsLeft, t)
return unionLabelFilters(lfsLeft, lfsRight)
default:
// {f1} * {f2} -> {f1, f2}
// {f1} * on() {f2} -> {}
// {f1} * on(f1) {f2} -> {f1}
// {f1} * on(f2) {f2} -> {f2}
// {f1} * on(f1, f2) {f2} -> {f2}
// {f1} * on(f3} {f2} -> {}
lfs = unionLabelFilters(lfsLeft, lfsRight)
return TrimFiltersByGroupModifier(lfs, t)
}
}
default:
return nil
}
}
func optimizeBinaryOpArgs(be *BinaryOpExpr) *BinaryOpExpr {
be.Left = Optimize(be.Left)
be.Right = Optimize(be.Right)
return be
func trimFiltersByAggrModifier(lfs []LabelFilter, afe *AggrFuncExpr) []LabelFilter {
switch strings.ToLower(afe.Modifier.Op) {
case "by":
return filterLabelFiltersOn(lfs, afe.Modifier.Args)
case "without":
return filterLabelFiltersIgnoring(lfs, afe.Modifier.Args)
default:
return nil
}
}
func getMetricExprForOptimization(e Expr) *MetricExpr {
re, ok := e.(*RollupExpr)
if ok {
// Try optimizing the inner expression in RollupExpr.
return getMetricExprForOptimization(re.Expr)
// TrimFiltersByGroupModifier trims lfs by the specified be.GroupModifier.Op (e.g. on() or ignoring()).
//
// The following cases are possible:
// - It returns lfs as is if be doesn't contain any group modifier
// - It returns only filters specified in on()
// - It drops filters specified inside ignoring()
func TrimFiltersByGroupModifier(lfs []LabelFilter, be *BinaryOpExpr) []LabelFilter {
switch strings.ToLower(be.GroupModifier.Op) {
case "on":
return filterLabelFiltersOn(lfs, be.GroupModifier.Args)
case "ignoring":
return filterLabelFiltersIgnoring(lfs, be.GroupModifier.Args)
default:
return lfs
}
me, ok := e.(*MetricExpr)
if ok {
// Ordinary metric expression, i.e. `foo{bar="baz"}`
return me
}
be, ok := e.(*BinaryOpExpr)
if ok {
if !canOptimizeBinaryOp(be) {
return nil
}
if me, ok := be.Left.(*MetricExpr); ok && isNumberOrScalar(be.Right) {
// foo{bar="baz"} * num_or_scalar
return me
}
if me, ok := be.Right.(*MetricExpr); ok && isNumberOrScalar(be.Left) {
// num_or_scalar * foo{bar="baz"}
return me
}
return nil
}
fe, ok := e.(*FuncExpr)
if !ok {
return nil
}
if IsRollupFunc(fe.Name) {
argIdx := GetRollupArgIdx(fe)
if argIdx >= len(fe.Args) {
return nil
}
arg := fe.Args[argIdx]
return getMetricExprForOptimization(arg)
}
if IsTransformFunc(fe.Name) {
switch strings.ToLower(fe.Name) {
case "absent", "histogram_quantile", "label_join", "label_replace", "scalar", "vector",
"label_set", "label_map", "label_uppercase", "label_lowercase", "label_del", "label_keep", "label_copy",
"label_move", "label_transform", "label_value", "label_match", "label_mismatch", "label_graphite_group",
"prometheus_buckets", "buckets_limit", "histogram_share", "histogram_avg", "histogram_stdvar", "histogram_stddev", "union", "":
// metric expressions for these functions cannot be optimized.
return nil
}
for _, arg := range fe.Args {
if me, ok := arg.(*MetricExpr); ok {
// transform_func(foo{bar="baz"})
return me
}
}
return nil
}
return nil
}
func isNumberOrScalar(e Expr) bool {
if _, ok := e.(*NumberExpr); ok {
return true
func getLabelFiltersWithoutMetricName(lfs []LabelFilter) []LabelFilter {
lfsNew := make([]LabelFilter, 0, len(lfs))
for _, lf := range lfs {
if lf.Label != "__name__" {
lfsNew = append(lfsNew, lf)
}
if fe, ok := e.(*FuncExpr); ok && strings.ToLower(fe.Name) == "scalar" {
return true
}
return false
return lfsNew
}
func intersectLabelFilters(a, b []LabelFilter) []LabelFilter {
m := make(map[string]LabelFilter, len(a)+len(b))
var buf []byte
for _, lf := range a {
buf = lf.AppendString(buf[:0])
m[string(buf)] = lf
// PushdownBinaryOpFilters pushes down the given commonFilters to e if possible.
//
// e must be a part of binary operation - either left or right.
//
// For example, if e contains `foo + sum(bar)` and commonFilters={x="y"},
// then the returned expression will contain `foo{x="y"} + sum(bar)`.
// The `{x="y"}` cannot be pusehd down to `sum(bar)`, since this may change binary operation results.
func PushdownBinaryOpFilters(e Expr, commonFilters []LabelFilter) Expr {
if len(commonFilters) == 0 {
// Fast path - nothing to push down.
return e
}
for _, lf := range b {
buf = lf.AppendString(buf[:0])
m[string(buf)] = lf
eCopy := Clone(e)
pushdownBinaryOpFiltersInplace(eCopy, commonFilters)
return eCopy
}
ss := make([]string, 0, len(m))
for s := range m {
ss = append(ss, s)
func pushdownBinaryOpFiltersInplace(e Expr, lfs []LabelFilter) {
if len(lfs) == 0 {
return
}
switch t := e.(type) {
case *MetricExpr:
t.LabelFilters = unionLabelFilters(t.LabelFilters, lfs)
sortLabelFilters(t.LabelFilters)
case *RollupExpr:
pushdownBinaryOpFiltersInplace(t.Expr, lfs)
case *FuncExpr:
arg := getFuncArgForOptimization(t.Name, t.Args)
if arg != nil {
pushdownBinaryOpFiltersInplace(arg, lfs)
}
case *AggrFuncExpr:
lfs = trimFiltersByAggrModifier(lfs, t)
arg := getFuncArgForOptimization(t.Name, t.Args)
if arg != nil {
pushdownBinaryOpFiltersInplace(arg, lfs)
}
case *BinaryOpExpr:
lfs = TrimFiltersByGroupModifier(lfs, t)
pushdownBinaryOpFiltersInplace(t.Left, lfs)
pushdownBinaryOpFiltersInplace(t.Right, lfs)
}
}
func intersectLabelFilters(lfsA, lfsB []LabelFilter) []LabelFilter {
if len(lfsA) == 0 || len(lfsB) == 0 {
return nil
}
m := getLabelFiltersMap(lfsA)
var b []byte
var lfs []LabelFilter
for _, lf := range lfsB {
b = lf.AppendString(b[:0])
if _, ok := m[string(b)]; ok {
lfs = append(lfs, lf)
}
sort.Strings(ss)
lfs := make([]LabelFilter, 0, len(ss))
for _, s := range ss {
lfs = append(lfs, m[s])
}
return lfs
}
func unionLabelFilters(lfsA, lfsB []LabelFilter) []LabelFilter {
if len(lfsA) == 0 {
return lfsB
}
if len(lfsB) == 0 {
return lfsA
}
m := getLabelFiltersMap(lfsA)
var b []byte
lfs := append([]LabelFilter{}, lfsA...)
for _, lf := range lfsB {
b = lf.AppendString(b[:0])
if _, ok := m[string(b)]; !ok {
lfs = append(lfs, lf)
}
}
return lfs
}
func getLabelFiltersMap(lfs []LabelFilter) map[string]struct{} {
m := make(map[string]struct{}, len(lfs))
var b []byte
for _, lf := range lfs {
b = lf.AppendString(b[:0])
m[string(b)] = struct{}{}
}
return m
}
func sortLabelFilters(lfs []LabelFilter) {
// Make sure the first label filter is __name__ (if any)
if len(lfs) > 0 && lfs[0].isMetricNameFilter() {
lfs = lfs[1:]
}
sort.Slice(lfs, func(i, j int) bool {
a, b := lfs[i], lfs[j]
if a.Label != b.Label {
return a.Label < b.Label
}
return a.Value < b.Value
})
}
func filterLabelFiltersOn(lfs []LabelFilter, args []string) []LabelFilter {
if len(args) == 0 {
return nil
}
m := make(map[string]struct{}, len(args))
for _, arg := range args {
m[arg] = struct{}{}
}
var lfsNew []LabelFilter
for _, lf := range lfs {
if _, ok := m[lf.Label]; ok {
lfsNew = append(lfsNew, lf)
}
}
return lfsNew
}
func filterLabelFiltersIgnoring(lfs []LabelFilter, args []string) []LabelFilter {
if len(args) == 0 {
return lfs
}
m := make(map[string]struct{}, len(args))
for _, arg := range args {
m[arg] = struct{}{}
}
var lfsNew []LabelFilter
for _, lf := range lfs {
if _, ok := m[lf.Label]; !ok {
lfsNew = append(lfsNew, lf)
}
}
return lfsNew
}
func getFuncArgForOptimization(funcName string, args []Expr) Expr {
idx := getFuncArgIdxForOptimization(funcName, args)
if idx < 0 || idx >= len(args) {
return nil
}
return args[idx]
}
func getFuncArgIdxForOptimization(funcName string, args []Expr) int {
funcName = strings.ToLower(funcName)
if IsRollupFunc(funcName) {
return getRollupArgIdxForOptimization(funcName, args)
}
if IsTransformFunc(funcName) {
return getTransformArgIdxForOptimization(funcName, args)
}
if isAggrFunc(funcName) {
return getAggrArgIdxForOptimization(funcName, args)
}
return -1
}
func getAggrArgIdxForOptimization(funcName string, args []Expr) int {
switch strings.ToLower(funcName) {
case "bottomk", "bottomk_avg", "bottomk_max", "bottomk_median", "bottomk_last", "bottomk_min",
"limitk", "outliers_mad", "outliersk", "quantile",
"topk", "topk_avg", "topk_max", "topk_median", "topk_last", "topk_min":
return 1
case "count_values":
return -1
case "quantiles":
return len(args) - 1
default:
return 0
}
}
func getRollupArgIdxForOptimization(funcName string, args []Expr) int {
// This must be kept in sync with GetRollupArgIdx()
switch strings.ToLower(funcName) {
case "absent_over_time":
return -1
case "quantile_over_time", "aggr_over_time",
"hoeffding_bound_lower", "hoeffding_bound_upper":
return 1
case "quantiles_over_time":
return len(args) - 1
default:
return 0
}
}
func getTransformArgIdxForOptimization(funcName string, args []Expr) int {
funcName = strings.ToLower(funcName)
if isLabelManipulationFunc(funcName) {
return -1
}
switch funcName {
case "", "absent", "scalar", "union", "vector":
return -1
case "end", "now", "pi", "ru", "start", "step", "time":
return -1
case "limit_offset":
return 2
case "buckets_limit", "histogram_quantile", "histogram_share", "range_quantile":
return 1
case "histogram_quantiles":
return len(args) - 1
default:
return 0
}
}
func isLabelManipulationFunc(funcName string) bool {
switch strings.ToLower(funcName) {
case "alias", "label_copy", "label_del", "label_graphite_group", "label_join", "label_keep", "label_lowercase",
"label_map", "label_match", "label_mismatch", "label_move", "label_replace", "label_set", "label_transform",
"label_uppercase", "label_value":
return true
default:
return false
}
}

View file

@ -1836,7 +1836,7 @@ func (lf *LabelFilter) AppendString(dst []byte) []byte {
// MetricExpr represents MetricsQL metric with optional filters, i.e. `foo{...}`.
type MetricExpr struct {
// LabelFilters contains a list of label filters from curly braces.
// Metric name if present must be the first.
// Filter or metric name must be the first if present.
LabelFilters []LabelFilter
// labelFilters must be expanded to LabelFilters by expandWithExpr.
@ -1884,6 +1884,9 @@ func (me *MetricExpr) hasNonEmptyMetricGroup() bool {
if len(me.LabelFilters) == 0 {
return false
}
lf := &me.LabelFilters[0]
return me.LabelFilters[0].isMetricNameFilter()
}
func (lf *LabelFilter) isMetricNameFilter() bool {
return lf.Label == "__name__" && !lf.IsNegative && !lf.IsRegexp
}

View file

@ -90,8 +90,7 @@ func IsRollupFunc(funcName string) bool {
//
// -1 is returned if fe isn't a rollup function.
func GetRollupArgIdx(fe *FuncExpr) int {
funcName := fe.Name
funcName = strings.ToLower(funcName)
funcName := strings.ToLower(fe.Name)
if !rollupFuncs[funcName] {
return -1
}

View file

@ -7121,6 +7121,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com",
},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
endpointKey{
Region: "ap-southeast-3",
Variant: fipsVariant,
}: endpoint{
Hostname: "elasticfilesystem-fips.ap-southeast-3.amazonaws.com",
},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@ -7256,6 +7265,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-ap-southeast-3",
}: endpoint{
Hostname: "elasticfilesystem-fips.ap-southeast-3.amazonaws.com",
CredentialScope: credentialScope{
Region: "ap-southeast-3",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-ca-central-1",
}: endpoint{
@ -9659,6 +9677,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@ -10808,6 +10829,9 @@ var awsPartition = partition{
},
"kafka": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
}: endpoint{},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
@ -10817,6 +10841,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
Region: "ap-northeast-3",
}: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@ -17560,6 +17587,21 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "secretsmanager-fips.ca-central-1.amazonaws.com",
},
endpointKey{
Region: "ca-central-1-fips",
}: endpoint{
Hostname: "secretsmanager-fips.ca-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ca-central-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "eu-central-1",
}: endpoint{},

View file

@ -4,6 +4,7 @@ import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"reflect"
@ -525,6 +526,14 @@ func (r *Request) GetBody() io.ReadSeeker {
// Send will not close the request.Request's body.
func (r *Request) Send() error {
defer func() {
// Ensure a non-nil HTTPResponse parameter is set to ensure handlers
// checking for HTTPResponse values, don't fail.
if r.HTTPResponse == nil {
r.HTTPResponse = &http.Response{
Header: http.Header{},
Body: ioutil.NopCloser(&bytes.Buffer{}),
}
}
// Regardless of success or failure of the request trigger the Complete
// request handlers.
r.Handlers.Complete.Run(r)

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.42.39"
const SDKVersion = "1.42.44"

View file

@ -10,9 +10,6 @@ import (
"fmt"
"io"
"math"
"math/bits"
comp "github.com/klauspost/compress"
)
const (
@ -76,8 +73,8 @@ var levels = []compressionLevel{
{0, 0, 0, 0, 0, 6},
// Levels 7-9 use increasingly more lazy matching
// and increasingly stringent conditions for "good enough".
{6, 10, 12, 16, skipNever, 7},
{10, 24, 32, 64, skipNever, 8},
{8, 12, 16, 24, skipNever, 7},
{16, 30, 40, 64, skipNever, 8},
{32, 258, 258, 1024, skipNever, 9},
}
@ -110,6 +107,7 @@ type advancedState struct {
type compressor struct {
compressionLevel
h *huffmanEncoder
w *huffmanBitWriter
// compression algorithm
@ -271,7 +269,7 @@ func (d *compressor) fillWindow(b []byte) {
// Try to find a match starting at index whose length is greater than prevSize.
// We only look at chainCount possibilities before giving up.
// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead
func (d *compressor) findMatch(pos int, prevHead int, lookahead, bpb int) (length, offset int, ok bool) {
func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) {
minMatchLook := maxMatchLength
if lookahead < minMatchLook {
minMatchLook = lookahead
@ -297,14 +295,46 @@ func (d *compressor) findMatch(pos int, prevHead int, lookahead, bpb int) (lengt
}
offset = 0
// Base is 4 bytes at with an additional cost.
// Matches must be better than this.
cGain := minMatchLength*bpb - 12
cGain := 0
if d.chain < 100 {
for i := prevHead; tries > 0; tries-- {
if wEnd == win[i+length] {
n := matchLen(win[i:i+minMatchLook], wPos)
if n > length {
newGain := n*bpb - bits.Len32(uint32(pos-i))
length = n
offset = pos - i
ok = true
if n >= nice {
// The match is good enough that we don't try to find a better one.
break
}
wEnd = win[pos+n]
}
}
if i <= minIndex {
// hashPrev[i & windowMask] has already been overwritten, so stop now.
break
}
i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
if i < minIndex {
break
}
}
return
}
// Some like it higher (CSV), some like it lower (JSON)
const baseCost = 6
// Base is 4 bytes at with an additional cost.
// Matches must be better than this.
for i := prevHead; tries > 0; tries-- {
if wEnd == win[i+length] {
n := matchLen(win[i:i+minMatchLook], wPos)
if n > length {
// Calculate gain. Estimate
newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]])
//fmt.Println(n, "gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n]))
if newGain > cGain {
length = n
offset = pos - i
@ -389,10 +419,16 @@ func (d *compressor) deflateLazy() {
if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
return
}
s.estBitsPerByte = 8
if !d.sync {
s.estBitsPerByte = comp.ShannonEntropyBits(d.window[s.index:d.windowEnd])
s.estBitsPerByte = int(1 + float64(s.estBitsPerByte)/float64(d.windowEnd-s.index))
if d.windowEnd != s.index && d.chain > 100 {
// Get literal huffman coder.
if d.h == nil {
d.h = newHuffmanEncoder(maxFlateBlockTokens)
}
var tmp [256]uint16
for _, v := range d.window[s.index:d.windowEnd] {
tmp[v]++
}
d.h.generate(tmp[:], 15)
}
s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
@ -446,7 +482,7 @@ func (d *compressor) deflateLazy() {
}
if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead, s.estBitsPerByte); ok {
if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok {
s.length = newLength
s.offset = newOffset
}

View file

@ -52,18 +52,18 @@ var lengthBase = [32]uint8{
}
// offset code word extra bits.
var offsetExtraBits = [64]int8{
var offsetExtraBits = [32]int8{
0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
/* extended window */
14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20,
14, 14,
}
var offsetCombined = [32]uint32{}
func init() {
var offsetBase = [64]uint32{
var offsetBase = [32]uint32{
/* normal deflate */
0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
@ -73,9 +73,7 @@ func init() {
0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
/* extended window */
0x008000, 0x00c000, 0x010000, 0x018000, 0x020000,
0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000,
0x100000, 0x180000, 0x200000, 0x300000,
0x008000, 0x00c000,
}
for i := range offsetCombined[:] {

Some files were not shown because too many files have changed in this diff Show more