Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files

This commit is contained in:
Aliaksandr Valialkin 2022-04-12 16:23:16 +03:00
commit 095feeee41
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
110 changed files with 5293 additions and 936 deletions

View file

@ -8,10 +8,10 @@ assignees: ''
**Describe the bug**
A clear and concise description of what the bug is.
It would be a great [upgrading](https://docs.victoriametrics.com/#how-to-upgrade)
It would be great to [upgrade](https://docs.victoriametrics.com/#how-to-upgrade)
to [the latest available release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)
and verifying whether the bug is reproducible there.
It is also recommended reading [troubleshooting docs](https://docs.victoriametrics.com/#troubleshooting).
and verify whether the bug is reproducible there.
It's also recommended to read the [troubleshooting docs](https://docs.victoriametrics.com/#troubleshooting).
**To Reproduce**
Steps to reproduce the behavior.
@ -36,12 +36,11 @@ See how to setup monitoring here:
* [montioring for VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#monitoring)
**Version**
The line returned when passing `--version` command line flag to binary. For example:
The line returned when passing `--version` command line flag to the binary. For example:
```
$ ./victoria-metrics-prod --version
victoria-metrics-20190730-121249-heads-single-node-0-g671d9e55
```
**Used command-line flags**
Please provide applied command-line flags used for running VictoriaMetrics and its components.
Please provide the command-line flags used for running VictoriaMetrics and its components.

View file

@ -16,6 +16,7 @@ updates:
directory: "/app/vmui/packages/vmui/web"
schedule:
interval: "weekly"
open-pull-requests-limit: 0
- package-ecosystem: "docker"
directory: "/"
schedule:
@ -24,3 +25,4 @@ updates:
directory: "/app/vmui/packages/vmui"
schedule:
interval: "weekly"
open-pull-requests-limit: 0

View file

@ -60,7 +60,7 @@ jobs:
GOOS=darwin go build -mod=vendor ./app/vmctl
CGO_ENABLED=0 GOOS=windows go build -mod=vendor ./app/vmagent
- name: Publish coverage
uses: codecov/codecov-action@v2.1.0
uses: codecov/codecov-action@v3
with:
file: ./coverage.txt

View file

@ -538,7 +538,8 @@ Additionally VictoriaMetrics provides the following handlers:
## Graphite API usage
VictoriaMetrics supports the following Graphite APIs, which are needed for [Graphite datasource in Grafana](https://grafana.com/docs/grafana/latest/datasources/graphite/):
VictoriaMetrics supports data ingestion in Graphite protocol - see [these docs](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) for details.
VictoriaMetrics supports the following Graphite querying APIs, which are needed for [Graphite datasource in Grafana](https://grafana.com/docs/grafana/latest/datasources/graphite/):
* Render API - see [these docs](#graphite-render-api-usage).
* Metrics API - see [these docs](#graphite-metrics-api-usage).
@ -840,7 +841,7 @@ The [deduplication](#deduplication) isn't applied for the data exported in nativ
## How to import time series data
Time series data can be imported into VictoriaMetrics via any supported ingestion protocol:
Time series data can be imported into VictoriaMetrics via any supported data ingestion protocol:
* [Prometheus remote_write API](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write). See [these docs](#prometheus-setup) for details.
* DataDog `submit metrics` API. See [these docs](#how-to-send-data-from-datadog-agent) for details.

View file

@ -358,6 +358,8 @@ spread scrape targets among a cluster of two `vmagent` instances:
/path/to/vmagent -promscrape.cluster.membersCount=2 -promscrape.cluster.memberNum=1 -promscrape.config=/path/to/config.yml ...
```
The `-promscrape.cluster.memberNum` can be set to a StatefulSet pod name when `vmagent` runs in Kubernetes. The pod name must end with a number in the range `0 ... promscrape.cluster.memberNum-1`. For example, `-promscrape.cluster.memberNum=vmagent-0`.
By default each scrape target is scraped only by a single `vmagent` instance in the cluster. If there is a need for replicating scrape targets among multiple `vmagent` instances,
then `-promscrape.cluster.replicationFactor` command-line flag must be set to the desired number of replicas. For example, the following commands
start a cluster of three `vmagent` instances, where each target is scraped by two `vmagent` instances:

View file

@ -911,12 +911,17 @@ static_configs:
consul_sd_configs:
[ - <consul_sd_config> ... ]
# List of relabel configurations.
# List of relabel configurations for entities discovered via service discovery.
# Supports the same relabeling features as the rest of VictoriaMetrics components.
# See https://docs.victoriametrics.com/vmagent.html#relabeling
relabel_configs:
[ - <relabel_config> ... ]
# List of relabel configurations for alert labels sent via Notifier.
# Supports the same relabeling features as the rest of VictoriaMetrics components.
# See https://docs.victoriametrics.com/vmagent.html#relabeling
alert_relabel_configs:
[ - <relabel_config> ... ]
```
The configuration file can be [hot-reloaded](#hot-config-reload).

View file

@ -9,6 +9,8 @@ import (
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
)
// Alert the triggered alert
@ -147,3 +149,18 @@ func templateAnnotation(dst io.Writer, text string, data tplData, funcs template
}
return nil
}
func (a Alert) toPromLabels(relabelCfg *promrelabel.ParsedConfigs) []prompbmarshal.Label {
var labels []prompbmarshal.Label
for k, v := range a.Labels {
labels = append(labels, prompbmarshal.Label{
Name: k,
Value: v,
})
}
promrelabel.SortLabels(labels)
if relabelCfg != nil {
return relabelCfg.Apply(labels, 0, false)
}
return labels
}

View file

@ -2,9 +2,12 @@ package notifier
import (
"fmt"
"reflect"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
)
func TestAlert_ExecTemplate(t *testing.T) {
@ -146,3 +149,48 @@ func TestAlert_ExecTemplate(t *testing.T) {
})
}
}
func TestAlert_toPromLabels(t *testing.T) {
fn := func(labels map[string]string, exp []prompbmarshal.Label, relabel *promrelabel.ParsedConfigs) {
t.Helper()
a := Alert{Labels: labels}
got := a.toPromLabels(relabel)
if !reflect.DeepEqual(got, exp) {
t.Fatalf("expected to have: \n%v;\ngot:\n%v",
exp, got)
}
}
fn(nil, nil, nil)
fn(
map[string]string{"foo": "bar", "a": "baz"}, // unsorted
[]prompbmarshal.Label{{Name: "a", Value: "baz"}, {Name: "foo", Value: "bar"}},
nil,
)
pcs, err := promrelabel.ParseRelabelConfigsData([]byte(`
- target_label: "foo"
replacement: "aaa"
- action: labeldrop
regex: "env.*"
`), false)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
fn(
map[string]string{"a": "baz"},
[]prompbmarshal.Label{{Name: "a", Value: "baz"}, {Name: "foo", Value: "aaa"}},
pcs,
)
fn(
map[string]string{"foo": "bar", "a": "baz"},
[]prompbmarshal.Label{{Name: "a", Value: "baz"}, {Name: "foo", Value: "aaa"}},
pcs,
)
fn(
map[string]string{"qux": "bar", "env": "prod", "environment": "production"},
[]prompbmarshal.Label{{Name: "foo", Value: "aaa"}, {Name: "qux", Value: "bar"}},
pcs,
)
}

View file

@ -11,6 +11,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
)
// AlertManager represents integration provider with Prometheus alert manager
@ -22,6 +23,8 @@ type AlertManager struct {
timeout time.Duration
authCfg *promauth.Config
// stores already parsed RelabelConfigs object
relabelConfigs *promrelabel.ParsedConfigs
metrics *metrics
}
@ -59,7 +62,7 @@ func (am *AlertManager) Send(ctx context.Context, alerts []Alert) error {
func (am *AlertManager) send(ctx context.Context, alerts []Alert) error {
b := &bytes.Buffer{}
writeamRequest(b, alerts, am.argFunc)
writeamRequest(b, alerts, am.argFunc, am.relabelConfigs)
req, err := http.NewRequest("POST", am.addr, b)
if err != nil {
@ -103,7 +106,8 @@ type AlertURLGenerator func(Alert) string
const alertManagerPath = "/api/v2/alerts"
// NewAlertManager is a constructor for AlertManager
func NewAlertManager(alertManagerURL string, fn AlertURLGenerator, authCfg promauth.HTTPClientConfig, timeout time.Duration) (*AlertManager, error) {
func NewAlertManager(alertManagerURL string, fn AlertURLGenerator, authCfg promauth.HTTPClientConfig,
relabelCfg *promrelabel.ParsedConfigs, timeout time.Duration) (*AlertManager, error) {
tls := &promauth.TLSConfig{}
if authCfg.TLSConfig != nil {
tls = authCfg.TLSConfig
@ -131,11 +135,12 @@ func NewAlertManager(alertManagerURL string, fn AlertURLGenerator, authCfg proma
}
return &AlertManager{
addr: alertManagerURL,
argFunc: fn,
authCfg: aCfg,
client: &http.Client{Transport: tr},
timeout: timeout,
metrics: newMetrics(alertManagerURL),
addr: alertManagerURL,
argFunc: fn,
authCfg: aCfg,
relabelConfigs: relabelCfg,
client: &http.Client{Transport: tr},
timeout: timeout,
metrics: newMetrics(alertManagerURL),
}, nil
}

View file

@ -1,9 +1,11 @@
{% import (
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
) %}
{% stripspace %}
{% func amRequest(alerts []Alert, generatorURL func(Alert) string) %}
{% func amRequest(alerts []Alert, generatorURL func(Alert) string, relabelCfg *promrelabel.ParsedConfigs) %}
[
{% for i, alert := range alerts %}
{
@ -14,8 +16,9 @@
{% endif %}
"labels": {
"alertname":{%q= alert.Name %}
{% for k,v := range alert.Labels %}
,{%q= k %}:{%q= v %}
{% code lbls := alert.toPromLabels(relabelCfg) %}
{% for _, l := range lbls %}
,{%q= l.Name %}:{%q= l.Value %}
{% endfor %}
},
"annotations": {

View file

@ -7,124 +7,129 @@ package notifier
//line app/vmalert/notifier/alertmanager_request.qtpl:1
import (
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
)
//line app/vmalert/notifier/alertmanager_request.qtpl:6
//line app/vmalert/notifier/alertmanager_request.qtpl:8
import (
qtio422016 "io"
qt422016 "github.com/valyala/quicktemplate"
)
//line app/vmalert/notifier/alertmanager_request.qtpl:6
//line app/vmalert/notifier/alertmanager_request.qtpl:8
var (
_ = qtio422016.Copy
_ = qt422016.AcquireByteBuffer
)
//line app/vmalert/notifier/alertmanager_request.qtpl:6
func streamamRequest(qw422016 *qt422016.Writer, alerts []Alert, generatorURL func(Alert) string) {
//line app/vmalert/notifier/alertmanager_request.qtpl:6
//line app/vmalert/notifier/alertmanager_request.qtpl:8
func streamamRequest(qw422016 *qt422016.Writer, alerts []Alert, generatorURL func(Alert) string, relabelCfg *promrelabel.ParsedConfigs) {
//line app/vmalert/notifier/alertmanager_request.qtpl:8
qw422016.N().S(`[`)
//line app/vmalert/notifier/alertmanager_request.qtpl:8
//line app/vmalert/notifier/alertmanager_request.qtpl:10
for i, alert := range alerts {
//line app/vmalert/notifier/alertmanager_request.qtpl:8
//line app/vmalert/notifier/alertmanager_request.qtpl:10
qw422016.N().S(`{"startsAt":`)
//line app/vmalert/notifier/alertmanager_request.qtpl:10
//line app/vmalert/notifier/alertmanager_request.qtpl:12
qw422016.N().Q(alert.Start.Format(time.RFC3339Nano))
//line app/vmalert/notifier/alertmanager_request.qtpl:10
//line app/vmalert/notifier/alertmanager_request.qtpl:12
qw422016.N().S(`,"generatorURL":`)
//line app/vmalert/notifier/alertmanager_request.qtpl:11
//line app/vmalert/notifier/alertmanager_request.qtpl:13
qw422016.N().Q(generatorURL(alert))
//line app/vmalert/notifier/alertmanager_request.qtpl:11
//line app/vmalert/notifier/alertmanager_request.qtpl:13
qw422016.N().S(`,`)
//line app/vmalert/notifier/alertmanager_request.qtpl:12
//line app/vmalert/notifier/alertmanager_request.qtpl:14
if !alert.End.IsZero() {
//line app/vmalert/notifier/alertmanager_request.qtpl:12
//line app/vmalert/notifier/alertmanager_request.qtpl:14
qw422016.N().S(`"endsAt":`)
//line app/vmalert/notifier/alertmanager_request.qtpl:13
//line app/vmalert/notifier/alertmanager_request.qtpl:15
qw422016.N().Q(alert.End.Format(time.RFC3339Nano))
//line app/vmalert/notifier/alertmanager_request.qtpl:13
//line app/vmalert/notifier/alertmanager_request.qtpl:15
qw422016.N().S(`,`)
//line app/vmalert/notifier/alertmanager_request.qtpl:14
}
//line app/vmalert/notifier/alertmanager_request.qtpl:14
qw422016.N().S(`"labels": {"alertname":`)
//line app/vmalert/notifier/alertmanager_request.qtpl:16
qw422016.N().Q(alert.Name)
//line app/vmalert/notifier/alertmanager_request.qtpl:17
for k, v := range alert.Labels {
//line app/vmalert/notifier/alertmanager_request.qtpl:17
qw422016.N().S(`,`)
//line app/vmalert/notifier/alertmanager_request.qtpl:18
qw422016.N().Q(k)
//line app/vmalert/notifier/alertmanager_request.qtpl:18
qw422016.N().S(`:`)
//line app/vmalert/notifier/alertmanager_request.qtpl:18
qw422016.N().Q(v)
//line app/vmalert/notifier/alertmanager_request.qtpl:19
}
//line app/vmalert/notifier/alertmanager_request.qtpl:16
qw422016.N().S(`"labels": {"alertname":`)
//line app/vmalert/notifier/alertmanager_request.qtpl:18
qw422016.N().Q(alert.Name)
//line app/vmalert/notifier/alertmanager_request.qtpl:19
qw422016.N().S(`},"annotations": {`)
lbls := alert.toPromLabels(relabelCfg)
//line app/vmalert/notifier/alertmanager_request.qtpl:20
for _, l := range lbls {
//line app/vmalert/notifier/alertmanager_request.qtpl:20
qw422016.N().S(`,`)
//line app/vmalert/notifier/alertmanager_request.qtpl:21
qw422016.N().Q(l.Name)
//line app/vmalert/notifier/alertmanager_request.qtpl:21
qw422016.N().S(`:`)
//line app/vmalert/notifier/alertmanager_request.qtpl:21
qw422016.N().Q(l.Value)
//line app/vmalert/notifier/alertmanager_request.qtpl:22
}
//line app/vmalert/notifier/alertmanager_request.qtpl:22
qw422016.N().S(`},"annotations": {`)
//line app/vmalert/notifier/alertmanager_request.qtpl:25
c := len(alert.Annotations)
//line app/vmalert/notifier/alertmanager_request.qtpl:23
//line app/vmalert/notifier/alertmanager_request.qtpl:26
for k, v := range alert.Annotations {
//line app/vmalert/notifier/alertmanager_request.qtpl:24
//line app/vmalert/notifier/alertmanager_request.qtpl:27
c = c - 1
//line app/vmalert/notifier/alertmanager_request.qtpl:25
//line app/vmalert/notifier/alertmanager_request.qtpl:28
qw422016.N().Q(k)
//line app/vmalert/notifier/alertmanager_request.qtpl:25
//line app/vmalert/notifier/alertmanager_request.qtpl:28
qw422016.N().S(`:`)
//line app/vmalert/notifier/alertmanager_request.qtpl:25
//line app/vmalert/notifier/alertmanager_request.qtpl:28
qw422016.N().Q(v)
//line app/vmalert/notifier/alertmanager_request.qtpl:25
//line app/vmalert/notifier/alertmanager_request.qtpl:28
if c > 0 {
//line app/vmalert/notifier/alertmanager_request.qtpl:25
//line app/vmalert/notifier/alertmanager_request.qtpl:28
qw422016.N().S(`,`)
//line app/vmalert/notifier/alertmanager_request.qtpl:25
//line app/vmalert/notifier/alertmanager_request.qtpl:28
}
//line app/vmalert/notifier/alertmanager_request.qtpl:26
//line app/vmalert/notifier/alertmanager_request.qtpl:29
}
//line app/vmalert/notifier/alertmanager_request.qtpl:26
//line app/vmalert/notifier/alertmanager_request.qtpl:29
qw422016.N().S(`}}`)
//line app/vmalert/notifier/alertmanager_request.qtpl:29
//line app/vmalert/notifier/alertmanager_request.qtpl:32
if i != len(alerts)-1 {
//line app/vmalert/notifier/alertmanager_request.qtpl:29
//line app/vmalert/notifier/alertmanager_request.qtpl:32
qw422016.N().S(`,`)
//line app/vmalert/notifier/alertmanager_request.qtpl:29
//line app/vmalert/notifier/alertmanager_request.qtpl:32
}
//line app/vmalert/notifier/alertmanager_request.qtpl:30
//line app/vmalert/notifier/alertmanager_request.qtpl:33
}
//line app/vmalert/notifier/alertmanager_request.qtpl:30
//line app/vmalert/notifier/alertmanager_request.qtpl:33
qw422016.N().S(`]`)
//line app/vmalert/notifier/alertmanager_request.qtpl:32
//line app/vmalert/notifier/alertmanager_request.qtpl:35
}
//line app/vmalert/notifier/alertmanager_request.qtpl:32
func writeamRequest(qq422016 qtio422016.Writer, alerts []Alert, generatorURL func(Alert) string) {
//line app/vmalert/notifier/alertmanager_request.qtpl:32
//line app/vmalert/notifier/alertmanager_request.qtpl:35
func writeamRequest(qq422016 qtio422016.Writer, alerts []Alert, generatorURL func(Alert) string, relabelCfg *promrelabel.ParsedConfigs) {
//line app/vmalert/notifier/alertmanager_request.qtpl:35
qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmalert/notifier/alertmanager_request.qtpl:32
streamamRequest(qw422016, alerts, generatorURL)
//line app/vmalert/notifier/alertmanager_request.qtpl:32
//line app/vmalert/notifier/alertmanager_request.qtpl:35
streamamRequest(qw422016, alerts, generatorURL, relabelCfg)
//line app/vmalert/notifier/alertmanager_request.qtpl:35
qt422016.ReleaseWriter(qw422016)
//line app/vmalert/notifier/alertmanager_request.qtpl:32
//line app/vmalert/notifier/alertmanager_request.qtpl:35
}
//line app/vmalert/notifier/alertmanager_request.qtpl:32
func amRequest(alerts []Alert, generatorURL func(Alert) string) string {
//line app/vmalert/notifier/alertmanager_request.qtpl:32
//line app/vmalert/notifier/alertmanager_request.qtpl:35
func amRequest(alerts []Alert, generatorURL func(Alert) string, relabelCfg *promrelabel.ParsedConfigs) string {
//line app/vmalert/notifier/alertmanager_request.qtpl:35
qb422016 := qt422016.AcquireByteBuffer()
//line app/vmalert/notifier/alertmanager_request.qtpl:32
writeamRequest(qb422016, alerts, generatorURL)
//line app/vmalert/notifier/alertmanager_request.qtpl:32
//line app/vmalert/notifier/alertmanager_request.qtpl:35
writeamRequest(qb422016, alerts, generatorURL, relabelCfg)
//line app/vmalert/notifier/alertmanager_request.qtpl:35
qs422016 := string(qb422016.B)
//line app/vmalert/notifier/alertmanager_request.qtpl:32
//line app/vmalert/notifier/alertmanager_request.qtpl:35
qt422016.ReleaseByteBuffer(qb422016)
//line app/vmalert/notifier/alertmanager_request.qtpl:32
//line app/vmalert/notifier/alertmanager_request.qtpl:35
return qs422016
//line app/vmalert/notifier/alertmanager_request.qtpl:32
//line app/vmalert/notifier/alertmanager_request.qtpl:35
}

View file

@ -14,7 +14,7 @@ import (
func TestAlertManager_Addr(t *testing.T) {
const addr = "http://localhost"
am, err := NewAlertManager(addr, nil, promauth.HTTPClientConfig{}, 0)
am, err := NewAlertManager(addr, nil, promauth.HTTPClientConfig{}, nil, 0)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
@ -89,7 +89,7 @@ func TestAlertManager_Send(t *testing.T) {
}
am, err := NewAlertManager(srv.URL+alertManagerPath, func(alert Alert) string {
return strconv.FormatUint(alert.GroupID, 10) + "/" + strconv.FormatUint(alert.ID, 10)
}, aCfg, 0)
}, aCfg, nil, 0)
if err != nil {
t.Errorf("unexpected error: %s", err)
}

View file

@ -34,9 +34,10 @@ type Config struct {
// HTTPClientConfig contains HTTP configuration for Notifier clients
HTTPClientConfig promauth.HTTPClientConfig `yaml:",inline"`
// RelabelConfigs contains list of relabeling rules
// RelabelConfigs contains list of relabeling rules for entities discovered via SD
RelabelConfigs []promrelabel.RelabelConfig `yaml:"relabel_configs,omitempty"`
// AlertRelabelConfigs contains list of relabeling rules alert labels
AlertRelabelConfigs []promrelabel.RelabelConfig `yaml:"alert_relabel_configs,omitempty"`
// The timeout used when sending alerts.
Timeout promutils.Duration `yaml:"timeout,omitempty"`
@ -52,6 +53,8 @@ type Config struct {
// stores already parsed RelabelConfigs object
parsedRelabelConfigs *promrelabel.ParsedConfigs
// stores already parsed AlertRelabelConfigs object
parsedAlertRelabelConfigs *promrelabel.ParsedConfigs
}
// StaticConfig contains list of static targets in the following form:
@ -78,6 +81,11 @@ func (cfg *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
return fmt.Errorf("failed to parse relabeling config: %w", err)
}
cfg.parsedRelabelConfigs = rCfg
arCfg, err := promrelabel.ParseRelabelConfigs(cfg.AlertRelabelConfigs, false)
if err != nil {
return fmt.Errorf("failed to parse alert relabeling config: %w", err)
}
cfg.parsedAlertRelabelConfigs = arCfg
b, err := yaml.Marshal(cfg)
if err != nil {

View file

@ -141,7 +141,7 @@ func targetsFromLabels(labelsFn getLabels, cfg *Config, genFn AlertURLGenerator)
}
duplicates[u] = struct{}{}
am, err := NewAlertManager(u, genFn, cfg.HTTPClientConfig, cfg.Timeout.Duration())
am, err := NewAlertManager(u, genFn, cfg.HTTPClientConfig, cfg.parsedAlertRelabelConfigs, cfg.Timeout.Duration())
if err != nil {
errors = append(errors, err)
continue
@ -165,7 +165,7 @@ func (cw *configWatcher) start() error {
if err != nil {
return fmt.Errorf("failed to parse labels for target %q: %s", target, err)
}
notifier, err := NewAlertManager(address, cw.genFn, cw.cfg.HTTPClientConfig, cw.cfg.Timeout.Duration())
notifier, err := NewAlertManager(address, cw.genFn, cw.cfg.HTTPClientConfig, cw.cfg.parsedRelabelConfigs, cw.cfg.Timeout.Duration())
if err != nil {
return fmt.Errorf("failed to init alertmanager for addr %q: %s", address, err)
}

View file

@ -138,7 +138,7 @@ func notifiersFromFlags(gen AlertURLGenerator) ([]Notifier, error) {
}
addr = strings.TrimSuffix(addr, "/")
am, err := NewAlertManager(addr+alertManagerPath, gen, authCfg, time.Minute)
am, err := NewAlertManager(addr+alertManagerPath, gen, authCfg, nil, time.Minute)
if err != nil {
return nil, err
}

View file

@ -10,4 +10,7 @@ relabel_configs:
- source_labels: [__meta_consul_tags]
regex: .*,__scheme__=([^,]+),.*
replacement: '${1}'
target_label: __scheme__
target_label: __scheme__
alert_relabel_configs:
- target_label: "foo"
replacement: "aaa"

View file

@ -2,3 +2,6 @@ static_configs:
- targets:
- localhost:9093
- localhost:9095
alert_relabel_configs:
- target_label: "foo"
replacement: "aaa"

View file

@ -97,6 +97,11 @@ var (
)
// InitRollupResultCache initializes the rollupResult cache
//
// if cachePath is empty, then the cache isn't stored to persistent disk.
//
// ResetRollupResultCache must be called when the cache must be reset.
// StopRollupResultCache must be called when the cache isn't needed anymore.
func InitRollupResultCache(cachePath string) {
rollupResultCachePath = cachePath
startTime := time.Now()
@ -133,16 +138,19 @@ func InitRollupResultCache(cachePath string) {
rollupResultCachePath, time.Since(startTime).Seconds(), fcs().EntriesCount, fcs().BytesSize)
}
metrics.NewGauge(`vm_cache_entries{type="promql/rollupResult"}`, func() float64 {
// Use metrics.GetOrCreateGauge instead of metrics.NewGauge,
// so InitRollupResultCache+StopRollupResultCache could be called multiple times in tests.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2406
metrics.GetOrCreateGauge(`vm_cache_entries{type="promql/rollupResult"}`, func() float64 {
return float64(fcs().EntriesCount)
})
metrics.NewGauge(`vm_cache_size_bytes{type="promql/rollupResult"}`, func() float64 {
metrics.GetOrCreateGauge(`vm_cache_size_bytes{type="promql/rollupResult"}`, func() float64 {
return float64(fcs().BytesSize)
})
metrics.NewGauge(`vm_cache_requests_total{type="promql/rollupResult"}`, func() float64 {
metrics.GetOrCreateGauge(`vm_cache_requests_total{type="promql/rollupResult"}`, func() float64 {
return float64(fcs().GetCalls)
})
metrics.NewGauge(`vm_cache_misses_total{type="promql/rollupResult"}`, func() float64 {
metrics.GetOrCreateGauge(`vm_cache_misses_total{type="promql/rollupResult"}`, func() float64 {
return float64(fcs().Misses)
})

View file

@ -3,11 +3,32 @@ package promql
import (
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
"github.com/VictoriaMetrics/metricsql"
)
func TestRollupResultCacheInitStop(t *testing.T) {
t.Run("inmemory", func(t *testing.T) {
for i := 0; i < 5; i++ {
InitRollupResultCache("")
StopRollupResultCache()
}
})
t.Run("file-based", func(t *testing.T) {
cacheFilePath := "test-rollup-result-cache"
for i := 0; i < 3; i++ {
InitRollupResultCache(cacheFilePath)
StopRollupResultCache()
}
fs.MustRemoveAll(cacheFilePath)
})
}
func TestRollupResultCache(t *testing.T) {
InitRollupResultCache("")
defer StopRollupResultCache()
ResetRollupResultCache()
window := int64(456)
ec := &EvalConfig{

View file

@ -1,7 +1,7 @@
{
"files": {
"main.css": "./static/css/main.d8362c27.css",
"main.js": "./static/js/main.040ed7ac.js",
"main.js": "./static/js/main.d940c8c2.js",
"static/js/362.1a2113d4.chunk.js": "./static/js/362.1a2113d4.chunk.js",
"static/js/27.939f971b.chunk.js": "./static/js/27.939f971b.chunk.js",
"static/media/README.md": "./static/media/README.5e5724daf3ee333540a3.md",
@ -9,6 +9,6 @@
},
"entrypoints": [
"static/css/main.d8362c27.css",
"static/js/main.040ed7ac.js"
"static/js/main.d940c8c2.js"
]
}

View file

@ -1 +1 @@
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><script defer="defer" src="./static/js/main.040ed7ac.js"></script><link href="./static/css/main.d8362c27.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><script defer="defer" src="./static/js/main.d940c8c2.js"></script><link href="./static/css/main.d8362c27.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -10,7 +10,7 @@
"dependencies": {
"@date-io/dayjs": "^2.13.1",
"@emotion/styled": "^11.8.1",
"@mui/icons-material": "^5.5.1",
"@mui/icons-material": "^5.6.0",
"@mui/lab": "^5.0.0-alpha.73",
"@mui/material": "^5.5.1",
"@mui/styles": "^5.5.1",
@ -25,7 +25,7 @@
"@types/node": "^17.0.21",
"@types/qs": "^6.9.7",
"@types/react": "^17.0.43",
"@types/react-dom": "^17.0.14",
"@types/react-dom": "^18.0.0",
"@types/react-measure": "^2.0.8",
"@types/react-router-dom": "^5.3.3",
"@types/webpack-env": "^1.16.3",
@ -33,8 +33,8 @@
"lodash.debounce": "^4.0.8",
"lodash.get": "^4.4.2",
"lodash.throttle": "^4.1.1",
"marked": "^4.0.12",
"preact": "^10.6.6",
"marked": "^4.0.14",
"preact": "^10.7.1",
"qs": "^6.10.3",
"react-router-dom": "^6.3.0",
"typescript": "~4.6.2",
@ -3285,9 +3285,9 @@
}
},
"node_modules/@mui/icons-material": {
"version": "5.5.1",
"resolved": "https://registry.npmjs.org/@mui/icons-material/-/icons-material-5.5.1.tgz",
"integrity": "sha512-40f68p5+Yhq3dCn3QYHqQt5RETPyR3AkDw+fma8PtcjqvZ+d+jF84kFmT6NqwA3he7TlwluEtkyAmPzUE4uPdA==",
"version": "5.6.0",
"resolved": "https://registry.npmjs.org/@mui/icons-material/-/icons-material-5.6.0.tgz",
"integrity": "sha512-2GDGt+/BbwM3oVkF84b9FFKQdQ9TxBJIRnTwT99vO2mimdfJaojxMRB2lkysm9tUY4HOf0yoU6O//X6GTC0Zhw==",
"dependencies": {
"@babel/runtime": "^7.17.2"
},
@ -3300,8 +3300,8 @@
},
"peerDependencies": {
"@mui/material": "^5.0.0",
"@types/react": "^16.8.6 || ^17.0.0",
"react": "^17.0.0"
"@types/react": "^16.8.6 || ^17.0.0 || ^18.0.0",
"react": "^17.0.0 || ^18.0.0"
},
"peerDependenciesMeta": {
"@types/react": {
@ -4608,9 +4608,9 @@
}
},
"node_modules/@types/react-dom": {
"version": "17.0.14",
"resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-17.0.14.tgz",
"integrity": "sha512-H03xwEP1oXmSfl3iobtmQ/2dHF5aBHr8aUMwyGZya6OW45G+xtdzmq6HkncefiBt5JU8DVyaWl/nWZbjZCnzAQ==",
"version": "18.0.0",
"resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.0.0.tgz",
"integrity": "sha512-49897Y0UiCGmxZqpC8Blrf6meL8QUla6eb+BBhn69dTXlmuOlzkfr7HHY/O8J25e1lTUMs+YYxSlVDAaGHCOLg==",
"dependencies": {
"@types/react": "*"
}
@ -13330,9 +13330,9 @@
}
},
"node_modules/marked": {
"version": "4.0.12",
"resolved": "https://registry.npmjs.org/marked/-/marked-4.0.12.tgz",
"integrity": "sha512-hgibXWrEDNBWgGiK18j/4lkS6ihTe9sxtV4Q1OQppb/0zzyPSzoFANBa5MfsG/zgsWklmNnhm0XACZOH/0HBiQ==",
"version": "4.0.14",
"resolved": "https://registry.npmjs.org/marked/-/marked-4.0.14.tgz",
"integrity": "sha512-HL5sSPE/LP6U9qKgngIIPTthuxC0jrfxpYMZ3LdGDD3vTnLs59m2Z7r6+LNDR3ToqEQdkKd6YaaEfJhodJmijQ==",
"bin": {
"marked": "bin/marked.js"
},
@ -15627,9 +15627,9 @@
"peer": true
},
"node_modules/preact": {
"version": "10.7.0",
"resolved": "https://registry.npmjs.org/preact/-/preact-10.7.0.tgz",
"integrity": "sha512-9MEURwzNMKpAil/t6+wabDIJI6oG6GnwypYxiJDvQnW+fHDTt51PYuLZ1QUM31hFr7sDaj9qTaShAF9VIxuxGQ==",
"version": "10.7.1",
"resolved": "https://registry.npmjs.org/preact/-/preact-10.7.1.tgz",
"integrity": "sha512-MufnRFz39aIhs9AMFisonjzTud1PK1bY+jcJLo6m2T9Uh8AqjD77w11eAAawmjUogoGOnipECq7e/1RClIKsxg==",
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/preact"
@ -21870,9 +21870,9 @@
}
},
"@mui/icons-material": {
"version": "5.5.1",
"resolved": "https://registry.npmjs.org/@mui/icons-material/-/icons-material-5.5.1.tgz",
"integrity": "sha512-40f68p5+Yhq3dCn3QYHqQt5RETPyR3AkDw+fma8PtcjqvZ+d+jF84kFmT6NqwA3he7TlwluEtkyAmPzUE4uPdA==",
"version": "5.6.0",
"resolved": "https://registry.npmjs.org/@mui/icons-material/-/icons-material-5.6.0.tgz",
"integrity": "sha512-2GDGt+/BbwM3oVkF84b9FFKQdQ9TxBJIRnTwT99vO2mimdfJaojxMRB2lkysm9tUY4HOf0yoU6O//X6GTC0Zhw==",
"requires": {
"@babel/runtime": "^7.17.2"
}
@ -22789,9 +22789,9 @@
}
},
"@types/react-dom": {
"version": "17.0.14",
"resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-17.0.14.tgz",
"integrity": "sha512-H03xwEP1oXmSfl3iobtmQ/2dHF5aBHr8aUMwyGZya6OW45G+xtdzmq6HkncefiBt5JU8DVyaWl/nWZbjZCnzAQ==",
"version": "18.0.0",
"resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.0.0.tgz",
"integrity": "sha512-49897Y0UiCGmxZqpC8Blrf6meL8QUla6eb+BBhn69dTXlmuOlzkfr7HHY/O8J25e1lTUMs+YYxSlVDAaGHCOLg==",
"requires": {
"@types/react": "*"
}
@ -29484,9 +29484,9 @@
}
},
"marked": {
"version": "4.0.12",
"resolved": "https://registry.npmjs.org/marked/-/marked-4.0.12.tgz",
"integrity": "sha512-hgibXWrEDNBWgGiK18j/4lkS6ihTe9sxtV4Q1OQppb/0zzyPSzoFANBa5MfsG/zgsWklmNnhm0XACZOH/0HBiQ=="
"version": "4.0.14",
"resolved": "https://registry.npmjs.org/marked/-/marked-4.0.14.tgz",
"integrity": "sha512-HL5sSPE/LP6U9qKgngIIPTthuxC0jrfxpYMZ3LdGDD3vTnLs59m2Z7r6+LNDR3ToqEQdkKd6YaaEfJhodJmijQ=="
},
"mdn-data": {
"version": "2.0.4",
@ -31087,9 +31087,9 @@
"peer": true
},
"preact": {
"version": "10.7.0",
"resolved": "https://registry.npmjs.org/preact/-/preact-10.7.0.tgz",
"integrity": "sha512-9MEURwzNMKpAil/t6+wabDIJI6oG6GnwypYxiJDvQnW+fHDTt51PYuLZ1QUM31hFr7sDaj9qTaShAF9VIxuxGQ=="
"version": "10.7.1",
"resolved": "https://registry.npmjs.org/preact/-/preact-10.7.1.tgz",
"integrity": "sha512-MufnRFz39aIhs9AMFisonjzTud1PK1bY+jcJLo6m2T9Uh8AqjD77w11eAAawmjUogoGOnipECq7e/1RClIKsxg=="
},
"prelude-ls": {
"version": "1.2.1",

View file

@ -6,7 +6,7 @@
"dependencies": {
"@date-io/dayjs": "^2.13.1",
"@emotion/styled": "^11.8.1",
"@mui/icons-material": "^5.5.1",
"@mui/icons-material": "^5.6.0",
"@mui/lab": "^5.0.0-alpha.73",
"@mui/material": "^5.5.1",
"@mui/styles": "^5.5.1",
@ -21,7 +21,7 @@
"@types/node": "^17.0.21",
"@types/qs": "^6.9.7",
"@types/react": "^17.0.43",
"@types/react-dom": "^17.0.14",
"@types/react-dom": "^18.0.0",
"@types/react-measure": "^2.0.8",
"@types/react-router-dom": "^5.3.3",
"@types/webpack-env": "^1.16.3",
@ -29,8 +29,8 @@
"lodash.debounce": "^4.0.8",
"lodash.get": "^4.4.2",
"lodash.throttle": "^4.1.1",
"marked": "^4.0.12",
"preact": "^10.6.6",
"marked": "^4.0.14",
"preact": "^10.7.1",
"qs": "^6.10.3",
"react-router-dom": "^6.3.0",
"typescript": "~4.6.2",

View file

@ -29,10 +29,14 @@ export const defaultOptions = {
};
export const formatTicks = (u: uPlot, ticks: number[], unit = ""): string[] => {
return ticks.map(v => {
const n = Math.abs(v);
return `${n > 1e-3 && n < 1e4 ? v.toString() : v.toExponential(1)} ${unit}`;
});
return ticks.map(v => `${formatPrettyNumber(v)} ${unit}`);
};
export const formatPrettyNumber = (n: number | null | undefined): string => {
if (n === undefined || n === null) {
return "";
}
return n.toLocaleString("en-US", { maximumSignificantDigits: 20 });
};
interface AxisExtend extends Axis {

View file

@ -1,6 +1,6 @@
import dayjs from "dayjs";
import {SetupTooltip} from "./types";
import {getColorLine} from "./helpers";
import {getColorLine,formatPrettyNumber} from "./helpers";
export const setTooltip = ({u, tooltipIdx, metrics, series, tooltip, tooltipOffset, unit = ""}: SetupTooltip): void => {
const {seriesIdx, dataIdx} = tooltipIdx;
@ -25,7 +25,7 @@ export const setTooltip = ({u, tooltipIdx, metrics, series, tooltip, tooltipOffs
const marker = `<div class="u-tooltip__marker" style="background: ${color}"></div>`;
tooltip.innerHTML = `<div>${date}</div>
<div class="u-tooltip-data">
${marker}${metric.__name__ || ""}: <b class="u-tooltip-data__value">${dataSeries}</b> ${unit}
${marker}${metric.__name__ || ""}: <b class="u-tooltip-data__value">${formatPrettyNumber(dataSeries)}</b> ${unit}
</div>
<div class="u-tooltip__info">${info}</div>`;
};

View file

@ -2,8 +2,8 @@
DOCKER_NAMESPACE := victoriametrics
ROOT_IMAGE ?= alpine:3.15.3
CERTS_IMAGE := alpine:3.15.3
ROOT_IMAGE ?= alpine:3.15.4
CERTS_IMAGE := alpine:3.15.4
GO_BUILDER_IMAGE := golang:1.18.0-alpine
BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr :/ __)-1
BASE_IMAGE := local/base:1.1.3-$(shell echo $(ROOT_IMAGE) | tr :/ __)-$(shell echo $(CERTS_IMAGE) | tr :/ __)

View file

@ -15,16 +15,35 @@ The following tip changes can be tested by building VictoriaMetrics components f
## tip
## [v1.76.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.76.1)
Released at 12-04-2022
**Update notes:** this release introduces backwards-incompatible changes to communication protocol between `vmselect` and `vmstorage` nodes in cluster version of VictoriaMetrics, so `vmselect` and `vmstorage` nodes may log communication errors during the upgrade. These errors should stop after all the `vmselect` and `vmstorage` nodes are updated to new release. It is safe to downgrade to previous releases.
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add support for `alert_relabel_configs` option at `-notifier.config`. This option allows configuring relabeling rules for alerts before sending them to configured notifiers. See [these docs](https://docs.victoriametrics.com/vmalert.html#notifier-configuration-file) for details.
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmalert.html): allow passing StatefulSet pod names to `-promscrape.cluster.memberNum` command-line flag. In this case the member number is automatically extracted from the pod name, which must end with the number in the range `0 ... promscrape.cluster.membersCount-1`. For example, `vmagent-0`, `vmagent-1`, etc. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2359) and [these docs](https://docs.victoriametrics.com/vmagent.html#scraping-big-number-of-targets).
* BUGFIX: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): properly propagate limits at `-search.max*` command-line flags from `vminsert` to `vmstorage`. The limits are `-search.maxUniqueTimeseries`, `-search.maxSeries`, `-search.maxFederateSeries`, `-search.maxExportSeries`, `-search.maxGraphiteSeries` and `-search.maxTSDBStatusSeries`. They weren't propagated to `vmstorage` because of the bug. These limits were introduced in [v1.76.0](https://docs.victoriametrics.com/CHANGELOG.html#v1760). See [this bug](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2450).
* BUGFIX: fix goroutine leak and possible deadlock when importing invalid data via [native binary format](https://docs.victoriametrics.com/#how-to-import-data-in-native-format). See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2423).
* BUGFIX: [Graphite Render API](https://docs.victoriametrics.com/#graphite-render-api-usage): properly calculate [hitCount](https://graphite.readthedocs.io/en/latest/functions.html#graphite.render.functions.hitcount) function. Previously it could return empty results if there were no original samples in some parts of the selected time range.
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): allow overriding built-in function names inside [WITH templates](https://play.victoriametrics.com/promql/expand-with-exprs). For example, `WITH (sum(a,b) = a + b + 1) sum(x,y)` now expands into `x + y + 1`. Previously such a query would fail with `cannot use reserved name` error. See [this bugreport](https://github.com/VictoriaMetrics/metricsql/issues/5).
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): properly display values greater than 1000 on Y axis. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2409).
## [v1.76.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.76.0)
Released at 07-04-2022
**Update notes:** this release introduces backwards-incompatible changes to communication protocol between `vmselect` and `vmstorage` nodes in cluster version of VictoriaMetrics, so `vmselect` and `vmstorage` nodes may log communication errors during the upgrade. These errors should stop after all the `vmselect` and `vmstorage` nodes are updated to new release.
* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): add ability to verify files obtained via [native export](https://docs.victoriametrics.com/#how-to-export-data-in-native-format). See [these docs](https://docs.victoriametrics.com/vmctl.html#verifying-exported-blocks-from-victoriametrics) and [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2362).
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): add pre-defined dasbhoards for per-job CPU usage, memory usage and disk IO usage. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2243) for details.
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): improve compatibility with [Prometheus Alert Generator specification](https://github.com/prometheus/compliance/blob/main/alert_generator/specification.md). See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2340).
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add `-datasource.disableKeepAlive` command-line flag, which can be used for disabling [HTTP keep-alive connections](https://en.wikipedia.org/wiki/HTTP_persistent_connection) to datasources. This option can be useful for distributing load among multiple datasources behind TCP proxy such as [HAProxy](http://www.haproxy.org/).
* FEATURE: [Cluster version of VictoriaMetrics](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): reduce memory usage by up to 50% for `vminsert` and `vmstorage` under high ingestion rate.
* FEATURE: [vmgateway](https://docs.victoriametrics.com/vmgateway.html): Allow to read `-ratelimit.config` file from URL. Also add `-atelimit.configCheckInterval` command-line option. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2241).
* FEATURE: [vmgateway](https://docs.victoriametrics.com/vmgateway.html): Allow to read `-ratelimit.config` file from URL. Also add `-ratelimit.configCheckInterval` command-line option. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2241).
* FEATURE: add the following command-line flags, which can be used for fine-grained limiting of CPU and memory usage during various API calls:
* `-search.maxFederateSeries` for limiting the number of time series, which can be returned from [/federate](https://docs.victoriametrics.com/#federation).

View file

@ -318,16 +318,19 @@ Some capacity planning tips for VictoriaMetrics cluster:
## High availability
The database is considered highly available if it continues accepting new data and processing incoming queries when some of its components are temporarily unavailable.
VictoriaMetrics cluster is highly available according to this definition - see [cluster availability docs](#cluster-availability).
It is recommended to run all the components for a single cluster in the same subnetwork with high bandwidth, low latency and low error rates.
This improves cluster performance and availability.
It isn't recommended spreading components for a single cluster across multiple availability zones, since cross-AZ network usually has lower bandwidth, higher latency
and higher error rates comparing the network inside AZ.
This improves cluster performance and availability. It isn't recommended spreading components for a single cluster
across multiple availability zones, since cross-AZ network usually has lower bandwidth, higher latency and higher
error rates comparing the network inside a single AZ.
If you need multi-AZ setup, then it is recommended running independed clusters in each AZ and setting up
[vmagent](https://docs.victoriametrics.com/vmagent.html) in front of these clusters, so it could replicate incoming data
into all the cluster. Then [promxy](https://github.com/jacksontj/promxy) could be used for querying the data from multiple clusters.
into all the cluster - see [these docs](https://docs.victoriametrics.com/vmagent.html#multitenancy) for details.
Then [promxy](https://github.com/jacksontj/promxy) could be used for querying the data from multiple clusters.
Another solution is to use [multi-level cluster setup](#multi-level-cluster-setup).
## Multi-level cluster setup
@ -678,6 +681,12 @@ Below is the output for `/path/to/vmselect -help`:
The maximum number of concurrent search requests. It shouldn't be high, since a single request can saturate all the CPU cores. See also -search.maxQueueDuration (default 8)
-search.maxExportDuration duration
The maximum duration for /api/v1/export call (default 720h0m0s)
-search.maxExportSeries int
The maximum number of time series, which can be returned from /api/v1/export* APIs. This option allows limiting memory usage (default 1000000)
-search.maxFederateSeries int
The maximum number of time series, which can be returned from /federate. This option allows limiting memory usage (default 300000)
-search.maxGraphiteSeries int
The maximum number of time series, which can be scanned during queries to Graphite Render API. See https://docs.victoriametrics.com/#graphite-render-api-usage (default 300000)
-search.maxLookback duration
Synonym to -search.lookback-delta from Prometheus. The value is dynamically detected from interval between time series datapoints if not set. It can be overridden on per-query basis via max_lookback arg. See also '-search.maxStalenessInterval' flag, which has the same meaining due to historical reasons
-search.maxPointsPerTimeseries int
@ -693,12 +702,18 @@ Below is the output for `/path/to/vmselect -help`:
The maximum number of raw samples a single query can process across all time series. This protects from heavy queries, which select unexpectedly high number of raw samples. See also -search.maxSamplesPerSeries (default 1000000000)
-search.maxSamplesPerSeries int
The maximum number of raw samples a single query can scan per each time series. See also -search.maxSamplesPerQuery (default 30000000)
-search.maxSeries int
The maximum number of time series, which can be returned from /api/v1/series. This option allows limiting memory usage (default 10000)
-search.maxStalenessInterval duration
The maximum interval for staleness calculations. By default it is automatically calculated from the median interval between samples. This flag could be useful for tuning Prometheus data model closer to Influx-style data model. See https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness for details. See also '-search.maxLookback' flag, which has the same meaning due to historical reasons
-search.maxStatusRequestDuration duration
The maximum duration for /api/v1/status/* requests (default 5m0s)
-search.maxStepForPointsAdjustment duration
The maximum step when /api/v1/query_range handler adjusts points with timestamps closer than -search.latencyOffset to the current time. The adjustment is needed because such points may contain incomplete data (default 1m0s)
-search.maxTSDBStatusSeries int
The maximum number of time series, which can be processed during the call to /api/v1/status/tsdb. This option allows limiting memory usage (default 1000000)
-search.maxUniqueTimeseries int
The maximum number of unique time series, which can be selected during /api/v1/query and /api/v1/query_range queries. This option allows limiting memory usage (default 300000)
-search.minStalenessInterval duration
The minimum interval for staleness calculations. This flag could be useful for removing gaps on graphs generated from time series with irregular intervals between samples. See also '-search.maxStalenessInterval'
-search.noStaleMarkers
@ -814,7 +829,7 @@ Below is the output for `/path/to/vmstorage -help`:
-search.maxTagValues int
The maximum number of tag values returned per search (default 100000)
-search.maxUniqueTimeseries int
The maximum number of unique time series a single query can process. This allows protecting against heavy queries, which select unexpectedly high number of series. See also -search.maxSamplesPerQuery and -search.maxSamplesPerSeries (default 300000)
The maximum number of unique time series, which can be scanned during every query. This allows protecting against heavy queries, which select unexpectedly high number of series. Zero means 'no limit'. See also -search.max* command-line flags at vmselect
-smallMergeConcurrency int
The maximum number of CPU cores to use for small merges. Default value is used if set to 0
-snapshotAuthKey string

View file

@ -2,7 +2,25 @@
sort: 13
---
# Quick Start
# Quick start
## Installation
Single-server-VictoriaMetrics VictoriaMetrics is available as:
* [Managed VictoriaMetrics at AWS](https://aws.amazon.com/marketplace/pp/prodview-4tbfq5icmbmyc)
* [Docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/)
* [Snap packages](https://snapcraft.io/victoriametrics)
* [Helm Charts](https://github.com/VictoriaMetrics/helm-charts#list-of-charts)
* [Binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)
* [Source code](https://github.com/VictoriaMetrics/VictoriaMetrics). See [How to build from sources](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-build-from-sources)
* [VictoriaMetrics on Linode](https://www.linode.com/marketplace/apps/victoriametrics/victoriametrics/)
* [VictoriaMetrics on DigitalOcean](https://marketplace.digitalocean.com/apps/victoriametrics-single)
Just download VictoriaMetrics and follow [these instructions](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-start-victoriametrics).
Then read [Prometheus setup](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#prometheus-setup) and [Grafana setup](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#grafana-setup) docs.
### Starting VM-Signle via Docker:
The following commands download the latest available [Docker image of VictoriaMetrics](https://hub.docker.com/r/victoriametrics/victoria-metrics) and start it at port 8428, while storing the ingested data at `victoria-metrics-data` subdirectory under the current directory:
@ -13,9 +31,47 @@ docker run -it --rm -v `pwd`/victoria-metrics-data:/victoria-metrics-data -p 842
Open `http://localhost:8428` in web browser and read [these docs](https://docs.victoriametrics.com/#operation).
VictoriaMetrics is also available in binaries (see [this page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)) and in source code (see [how to build VictoriaMetrics from sources](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-build-from-sources)).
There are also the following versions of VictoriaMetrics available:
* [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html) - horizontally scalable VictoriaMetrics, which scales to multiple nodes.
* [Managed VictoriaMetrics at AWS](https://aws.amazon.com/marketplace/pp/prodview-4tbfq5icmbmyc).
### Starting VM-Cluster via Docker:
The following commands clone the latest available [VictoriaMetrics cluster repository](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster) and start the docker container via 'docker-compose'. Further customization is possible by editing the [docker-compose.yaml](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/cluster/deployment/docker/docker-compose.yml) file.
```bash
git clone https://github.com/VictoriaMetrics/VictoriaMetrics --branch cluster && cd VictoriaMetrics/deployment/docker && docker-compose up
```
* [Cluster setup](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#cluster-setup)
## Writing data
Data can be written to VictoriaMetrics in the following ways:
* [DataDog agent](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-datadog-agent)
* [InfluxDB-compatible agents such as Telegraf](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf)
* [Graphite-compatible agents such as StatsD](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-graphite-compatible-agents-such-as-statsd)
* [OpenTSDB-compatible agents](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-opentsdb-compatible-agents)
* [Prometheus remote_write API](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write)
* [In JSON line format](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-data-in-json-line-format)
* [Imported in CSV format](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-csv-data)
* [Imported in Prometheus exposition format](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-data-in-prometheus-exposition-format)
* `/api/v1/import` for importing data obtained from [/api/v1/export](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-export-data-in-json-line-format).
See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-data-in-json-line-format) for details.
## Reading data
VictoriaMetrics various APIs for reading the data. [This document briefly describes these APIs](https://docs.victoriametrics.com/url-examples.html).
### Grafana setup:
Create [Prometheus datasource](http://docs.grafana.org/features/datasources/prometheus/) in Grafana with the following url:
```url
http://<victoriametrics-addr>:8428
```
Substitute `<victoriametrics-addr>` with the hostname or IP address of VictoriaMetrics.
Then build graphs and dashboards for the created datasource using [PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics/) or [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html).

View file

@ -538,7 +538,8 @@ Additionally VictoriaMetrics provides the following handlers:
## Graphite API usage
VictoriaMetrics supports the following Graphite APIs, which are needed for [Graphite datasource in Grafana](https://grafana.com/docs/grafana/latest/datasources/graphite/):
VictoriaMetrics supports data ingestion in Graphite protocol - see [these docs](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) for details.
VictoriaMetrics supports the following Graphite querying APIs, which are needed for [Graphite datasource in Grafana](https://grafana.com/docs/grafana/latest/datasources/graphite/):
* Render API - see [these docs](#graphite-render-api-usage).
* Metrics API - see [these docs](#graphite-metrics-api-usage).
@ -840,7 +841,7 @@ The [deduplication](#deduplication) isn't applied for the data exported in nativ
## How to import time series data
Time series data can be imported into VictoriaMetrics via any supported ingestion protocol:
Time series data can be imported into VictoriaMetrics via any supported data ingestion protocol:
* [Prometheus remote_write API](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write). See [these docs](#prometheus-setup) for details.
* DataDog `submit metrics` API. See [these docs](#how-to-send-data-from-datadog-agent) for details.

View file

@ -542,7 +542,8 @@ Additionally VictoriaMetrics provides the following handlers:
## Graphite API usage
VictoriaMetrics supports the following Graphite APIs, which are needed for [Graphite datasource in Grafana](https://grafana.com/docs/grafana/latest/datasources/graphite/):
VictoriaMetrics supports data ingestion in Graphite protocol - see [these docs](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) for details.
VictoriaMetrics supports the following Graphite querying APIs, which are needed for [Graphite datasource in Grafana](https://grafana.com/docs/grafana/latest/datasources/graphite/):
* Render API - see [these docs](#graphite-render-api-usage).
* Metrics API - see [these docs](#graphite-metrics-api-usage).
@ -844,7 +845,7 @@ The [deduplication](#deduplication) isn't applied for the data exported in nativ
## How to import time series data
Time series data can be imported into VictoriaMetrics via any supported ingestion protocol:
Time series data can be imported into VictoriaMetrics via any supported data ingestion protocol:
* [Prometheus remote_write API](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write). See [these docs](#prometheus-setup) for details.
* DataDog `submit metrics` API. See [these docs](#how-to-send-data-from-datadog-agent) for details.

View file

@ -0,0 +1,83 @@
# Multi-regional setup with VictoriaMetrics: Dedicated regions for monitoring
### Scenario
Let's cover the case. You have multiple regions with workloads and want to collect metrics.
The monitoring setup is in the dedicated regions as shown below:
<p align="center">
<img
src="multi-regional-setup-dedicated-regions.png"
width="800"
alt="Multi-regional setup with VictoriaMetrics: Dedicated regions for monitoring">
</p>
Every workload region (Earth, Mars, Venus) has a vmagent that sends data to multiple regions with a monitoring setup.
The monitoring setup (Ground Control 1,2) contains VictoriaMetrics Time Series Database(TSDB) cluster or single.
Using this schema, you can achieve:
* Global Querying View
* Querying all metrics from one monitoring installation
* High Availability
* You can lose one region, but your experience will be the same.
* Of course, that means you duplicate your traffic twice.
### How to write the data to Ground Control regions
* You need to specify two remote write URLs in vmagent configuration
```bash
/vmagent-prod
-remoteWrite.url=<ground-control-1-remote-write>
-remoteWrite.url=<ground-control-2-remote-write>
```
* If you use the Pull model for data collection, please specify -promscrape.config parameter as well
Here is a Quickstart guide for [vmagent](https://docs.victoriametrics.com/vmagent.html#quick-start)
### How to read the data from Ground Control regions
You can use one of the following options:
1. Regional endpoints - use one regional endpoint as default and switch to another if there is an issue.
2. Load balancer - that sends queries to a particular region. The benefit and disadvantage of this setup is that it's simple.
3. Promxy - proxy that reads data from multiple Prometheus-like sources. It allows reading data more intelligently to cover the region's unavailability out of the box. It doesn't support MetricsQL yet (please check this issue).
4. Global vmselect in cluster setup - you can set up an additional subset of vmselects that knows about all storages in all regions.
* The deduplication in 1ms on the vmselect side must be turned on. This setup allows you to query data using MetricsQL.
* The downside is that vmselect waits for a response from all storages in all regions.
### High Availability
The data is duplicated twice, and every region contains a full copy of the data. That means one region can be offline.
You don't need to set up a replication factor using the VictoriaMetrics cluster.
### Alerting
You can set up vmalert in each Ground control region that evaluates recording and alerting rules. As every region contains a full copy of the data, you don't need to synchronize recording rules from one region to another.
For alert deduplication, please use [cluster mode in Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/#high-availability).
We also recommend adopting these alerts:
* VictoriaMetrics Single - [https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/alerts.yml](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/alerts.yml)
* VictoriaMetrics Cluster - [https://github.com/VictoriaMetrics/VictoriaMetrics/blob/cluster/deployment/docker/alerts.yml](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/cluster/deployment/docker/alerts.yml)
### Monitoring
An additional VictoriaMetrics single can be set up in every region, scraping metrics from the main TSDB.
You also may evaluate the option to send these metrics to the neighbour region to achieve HA.
Additional context
* VictoriaMetrics Single - [https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#monitoring](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#monitoring)
* VictoriaMetrics Cluster - [https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#monitoring](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#monitoring)
### What more can we do?
Setup vmagents in Ground Control regions. That allows it to accept data close to storage and add more reliability if storage is temporarily offline.

Binary file not shown.

After

Width:  |  Height:  |  Size: 320 KiB

View file

@ -0,0 +1,40 @@
---
sort: 22
---
# Managed VictoriaMetrics
VictoriaMetrics is a fast and easy-to-use monitoring solution and time series database.
It integrates well with existing monitoring systems such as Grafana, Prometheus, Graphite,
InfluxDB, OpenTSDB and DataDog - see [these docs](https://docs.victoriametrics.com/#how-to-import-time-series-data) for details.
The most common use cases for VictoriaMetrics are:
* Long-term remote storage for Prometheus;
* More efficient drop-in replacement for Prometheus and Graphite
* Replacement for InfluxDB and OpenTSDB, which uses lower amounts of RAM, CPU and disk;
* Cost-efficient alternative for DataDog.
We are glad to announce the availability of Managed VictoriaMetrics
at AWS Marketplace - [try it right now](https://aws.amazon.com/marketplace/pp/prodview-4tbfq5icmbmyc)!
Managed VictoriaMetrics allows users running VictoriaMetrics at AWS without the need to perform typical
DevOps tasks such as proper configuration, monitoring, logs collection, access protection, software updates,
backups, etc.
We run Managed VictoriaMetrics instances in our environment at AWS while providing easy-to-use endpoints
for data ingestion and querying. And the VictoriaMetrics team takes care of optimal configuration and software
maintenance.
Managed VictoriaMetrics comes with the following features:
* It can be used as a Managed Prometheus - just configure Prometheus or vmagent to write data to Managed VictoriaMetrics and then use the provided endpoint as a Prometheus datasource in Grafana;
* Every Managed VictoriaMetrics instance runs in an isolated environment, so instances cannot interfere with each other;
* Managed VictoriaMetrics instance can be scaled up or scaled down in a few clicks;
* Automated backups;
* Pay only for the actually used compute resources - instance type, disk and network.
See more about Managed VictoriaMetrics in the following articles:
* [Managed VictoriaMetrics announcement](https://victoriametrics.com/blog/managed-victoriametrics-announcement)
* [Pricing comparison for Managed Prometheus](https://victoriametrics.com/blog/managed-prometheus-pricing/)
* [Monitoring Proxmox VE via Managed VictoriaMetrics and vmagent](https://victoriametrics.com/blog/proxmox-monitoring-with-dbaas/)

Binary file not shown.

After

Width:  |  Height:  |  Size: 296 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 156 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 88 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 426 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 256 KiB

View file

@ -0,0 +1,65 @@
# Quick Start
Managed VictoriaMetrics - is a database-as-a-service platform, where users can run the VictoriaMetrics
that they know and love on AWS without the need to perform typical DevOps tasks such as proper configuration,
monitoring, logs collection, access protection, software updates, backups, etc.
## How to register
Managed VictoriaMetrics id distributed via [AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-4tbfq5icmbmyc).
To start using the service, one should have already registered AWS account
and visit [VictoriaMetrics product page](https://aws.amazon.com/marketplace/pp/prodview-4tbfq5icmbmyc).
See more details [here](https://dbaas.victoriametrics.com/howToRegister).
## Creating instance
Instances is a page where user can list and manage VictoriaMetrics single-node instances.
To create an instance click on the button `Create`:
<p>
<img src="quickstart-instances.png" width="800" alt="">
</p>
In the opened form, choose parameters of the new instance such as:
* `Instance type` from preset of AWS instances (you always can change the type later);
* `Region` and `Zone` where instance should run;
* Desired `disk size` for storing metrics (you always can expand disk size later);
* `Retention` period for stored metrics.
<p>
<img src="quickstart-instance-create.png" width="800" alt="">
</p>
Once created, instance will remain for a short period of time in `PROVISIONING` status
while the hardware spins-up, just wait for a couple of minutes and reload the page.
You'll also be notified via email once provisioning is finished:
<p>
<img src="quickstart-instance-provisioning.png" width="800" alt="">
</p>
## Access
After transition from `PROVISIONING` to `RUNNING` state, VictoriaMetrics is fully operational
and ready to accept write or read requests. But first, click on instance name to get the access token:
<p>
<img src="quickstart-tokens.png" width="800" alt="">
</p>
Access tokens are used in token-based authentication to allow an application to access the VictoriaMetrics API.
Supported token types are `Read-Only`, `Write-Only` and `Read-Write`. Click on token created by default
to see usage examples:
<p>
<img src="quickstart-tokens-usage.png" width="800" alt="">
</p>
Follow usage example in order to configure access to VictoriaMetrics for your Prometheus,
Grafana or any other software.
## Modifying
Remember, you always can add, remove or modify existing instances by changing their type or increasing the disk space.
However, such an update requires an instance restart and may result into a couple of minutes of downtime.

View file

@ -85,7 +85,7 @@ More information:
## /api/v1/import
**Imports data obtained via /api/v1/export**
**Imports custom data as well as data obtained via /api/v1/export**
Single:
<div class="with-copy" markdown="1">
@ -105,6 +105,14 @@ curl --data-binary "@import.txt" -X POST 'http://<vminsert>:8480/insert/0/promet
</div>
<div class="with-copy" markdown="1">
```bash
curl -d 'metric_name{foo="bar"} 123' -X POST 'http://<vminsert>:8480/insert/0/prometheus/api/v1/import/prometheus'
```
</div>
Additional information:
* [How to import time series data](https://docs.victoriametrics.com/#how-to-import-time-series-data)
@ -322,7 +330,6 @@ curl -G 'http://<vmsingle>:8428/api/v1/targets'
</div>
cluster:
<div class="with-copy" markdown="1">
```bash

View file

@ -362,6 +362,8 @@ spread scrape targets among a cluster of two `vmagent` instances:
/path/to/vmagent -promscrape.cluster.membersCount=2 -promscrape.cluster.memberNum=1 -promscrape.config=/path/to/config.yml ...
```
The `-promscrape.cluster.memberNum` can be set to a StatefulSet pod name when `vmagent` runs in Kubernetes. The pod name must end with a number in the range `0 ... promscrape.cluster.memberNum-1`. For example, `-promscrape.cluster.memberNum=vmagent-0`.
By default each scrape target is scraped only by a single `vmagent` instance in the cluster. If there is a need for replicating scrape targets among multiple `vmagent` instances,
then `-promscrape.cluster.replicationFactor` command-line flag must be set to the desired number of replicas. For example, the following commands
start a cluster of three `vmagent` instances, where each target is scraped by two `vmagent` instances:

View file

@ -915,12 +915,17 @@ static_configs:
consul_sd_configs:
[ - <consul_sd_config> ... ]
# List of relabel configurations.
# List of relabel configurations for entities discovered via service discovery.
# Supports the same relabeling features as the rest of VictoriaMetrics components.
# See https://docs.victoriametrics.com/vmagent.html#relabeling
relabel_configs:
[ - <relabel_config> ... ]
# List of relabel configurations for alert labels sent via Notifier.
# Supports the same relabeling features as the rest of VictoriaMetrics components.
# See https://docs.victoriametrics.com/vmagent.html#relabeling
alert_relabel_configs:
[ - <relabel_config> ... ]
```
The configuration file can be [hot-reloaded](#hot-config-reload).

19
go.mod
View file

@ -3,15 +3,15 @@ module github.com/VictoriaMetrics/VictoriaMetrics
go 1.17
require (
cloud.google.com/go/storage v1.21.0
cloud.google.com/go/storage v1.22.0
github.com/VictoriaMetrics/fastcache v1.10.0
// Do not use the original github.com/valyala/fasthttp because of issues
// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
github.com/VictoriaMetrics/fasthttp v1.1.0
github.com/VictoriaMetrics/metrics v1.18.1
github.com/VictoriaMetrics/metricsql v0.40.0
github.com/aws/aws-sdk-go v1.43.34
github.com/VictoriaMetrics/metricsql v0.41.0
github.com/aws/aws-sdk-go v1.43.37
github.com/cespare/xxhash/v2 v2.1.2
github.com/cheggaaa/pb/v3 v3.0.8
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
@ -31,9 +31,9 @@ require (
github.com/valyala/fasttemplate v1.2.1
github.com/valyala/gozstd v1.16.0
github.com/valyala/quicktemplate v1.7.0
golang.org/x/net v0.0.0-20220403103023-749bd193bc2b
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a
golang.org/x/sys v0.0.0-20220406163625-3f8b81556e12
golang.org/x/net v0.0.0-20220412020605-290c469a71a5
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5
golang.org/x/sys v0.0.0-20220412071739-889880a91fd5
google.golang.org/api v0.74.0
gopkg.in/yaml.v2 v2.4.0
)
@ -49,7 +49,8 @@ require (
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-cmp v0.5.7 // indirect
github.com/googleapis/gax-go/v2 v2.2.0 // indirect
github.com/googleapis/gax-go/v2 v2.3.0 // indirect
github.com/googleapis/go-type-adapters v1.0.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
@ -66,9 +67,9 @@ require (
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220405205423-9d709892a2bf // indirect
google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac // indirect
google.golang.org/grpc v1.45.0 // indirect
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect

47
go.sum
View file

@ -29,7 +29,6 @@ cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+Y
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U=
cloud.google.com/go v0.100.2 h1:t9Iw5QH5v4XtlEQaCtUY7x6sCABps8sW0acw7e2WQ6Y=
cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
@ -41,13 +40,11 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7
cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o=
cloud.google.com/go/bigtable v1.10.1/go.mod h1:cyHeKlx6dcZCO0oSQucYdauseD8kIENGuDOJPKMCVg8=
cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
cloud.google.com/go/compute v1.2.0/go.mod h1:xlogom/6gr8RJGBe7nT2eGsQYAFUbbv8dbC29qE3Xmw=
cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
cloud.google.com/go/compute v1.5.0 h1:b1zWmYuuHz7gO9kDcM/EpHGr06UgsYNRpNJzI2kFiLM=
cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/iam v0.1.1/go.mod h1:CKqrcnI/suGpybEHxZ7BMehL0oA4LpdyJdUlTl9jVMw=
cloud.google.com/go/iam v0.3.0 h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc=
cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
@ -59,8 +56,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.21.0 h1:HwnT2u2D309SFDHQII6m18HlrCi3jAXhUMTLOWXYH14=
cloud.google.com/go/storage v1.21.0/go.mod h1:XmRlxkgPjlBONznT2dDUU/5XlpU2OjMnKuqnZI01LAA=
cloud.google.com/go/storage v1.22.0 h1:NUV0NNp9nkBuW66BFRLuMgldN60C57ET3dhbwLIYio8=
cloud.google.com/go/storage v1.22.0/go.mod h1:GbaLEoMqbVm6sx3Z0R++gSiBlgMv6yUi2q1DeGFKQgE=
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go v41.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
@ -119,8 +116,8 @@ github.com/VictoriaMetrics/fasthttp v1.1.0 h1:3crd4YWHsMwu60GUXRH6OstowiFvqrwS4a
github.com/VictoriaMetrics/fasthttp v1.1.0/go.mod h1:/7DMcogqd+aaD3G3Hg5kFgoFwlR2uydjiWvoLp5ZTqQ=
github.com/VictoriaMetrics/metrics v1.18.1 h1:OZ0+kTTto8oPfHnVAnTOoyl0XlRhRkoQrD2n2cOuRw0=
github.com/VictoriaMetrics/metrics v1.18.1/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA=
github.com/VictoriaMetrics/metricsql v0.40.0 h1:QDzuhzsP2cZJyrijIptDJ6gnxd3qWGzQi4Fhj8mOLHo=
github.com/VictoriaMetrics/metricsql v0.40.0/go.mod h1:6pP1ZeLVJHqJrHlF6Ij3gmpQIznSsgktEcZgsAWYel0=
github.com/VictoriaMetrics/metricsql v0.41.0 h1:fhWnSE9ZXVEbiXXGFY73YPLdovTaDRaDaFdxC3TTRZs=
github.com/VictoriaMetrics/metricsql v0.41.0/go.mod h1:6pP1ZeLVJHqJrHlF6Ij3gmpQIznSsgktEcZgsAWYel0=
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
@ -165,8 +162,8 @@ github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZve
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
github.com/aws/aws-sdk-go v1.43.34 h1:8+P+773CDgQqN1eLH1QHT6XgXHUbME3sAbDGszzjajY=
github.com/aws/aws-sdk-go v1.43.34/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/aws/aws-sdk-go v1.43.37 h1:kyZ7UjaPZaCik+asF33UFOOYSwr9liDRr/UM/vuw8yY=
github.com/aws/aws-sdk-go v1.43.37/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o=
@ -526,11 +523,14 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
github.com/googleapis/gax-go/v2 v2.2.0 h1:s7jOdKSaksJVOxE0Y/S32otcfiP+UQ0cL8/GTKaONwE=
github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
github.com/googleapis/gax-go/v2 v2.3.0 h1:nRJtk3y8Fm770D42QV6T90ZnvFZyk7agSo3Q+Z9p3WI=
github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/googleapis/go-type-adapters v1.0.0 h1:9XdMn+d/G57qq1s8dNc5IesGCXHf6V2HZ2JwRxfA2tA=
github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
github.com/gophercloud/gophercloud v0.10.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss=
github.com/gophercloud/gophercloud v0.14.0/go.mod h1:VX0Ibx85B60B5XOrZr6kaNwrmPUzcmMpwxvQ1WQIIWM=
@ -1181,8 +1181,8 @@ golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220403103023-749bd193bc2b h1:vI32FkLJNAWtGD4BwkThwEy6XS7ZLLMHkSkYfF8M0W0=
golang.org/x/net v0.0.0-20220403103023-749bd193bc2b/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220412020605-290c469a71a5 h1:bRb386wvrE+oBNdF1d/Xh9mQrfQ4ecYhW5qJ5GvTGT4=
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1201,8 +1201,9 @@ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a h1:qfl7ob3DIEs3Ml9oLuPwY2N04gymzAW04WsUQHIClgM=
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 h1:OSnWWcOd/CtWQC2cYSBgbTSJv3ciqd8r54ySIW2y3RE=
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -1319,8 +1320,8 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220406163625-3f8b81556e12 h1:QyVthZKMsyaQwBTJE04jdNN0Pp5Fn9Qga0mrgxyERQM=
golang.org/x/sys v0.0.0-20220406163625-3f8b81556e12/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412071739-889880a91fd5 h1:NubxfvTRuNb4RVzWrIDAUzUvREH1HkCD4JjyQTSG9As=
golang.org/x/sys v0.0.0-20220412071739-889880a91fd5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@ -1428,8 +1429,9 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f h1:GGU+dLjvlC3qDwqYgL6UgRmHXhOOgns0bZu2Ty5mm6U=
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=
@ -1470,10 +1472,7 @@ google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
google.golang.org/api v0.64.0/go.mod h1:931CdxA8Rm4t6zqTFGSsgwbAEZ2+GMYurbndwSimebM=
google.golang.org/api v0.66.0/go.mod h1:I1dmXYpX7HGwz/ejRxwQp2qj5bFAz93HiCU1C1oYd9M=
google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
google.golang.org/api v0.69.0/go.mod h1:boanBiw+h5c3s+tBPgEzLDRHfFLWV0qXxRHz3ws7C80=
google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
google.golang.org/api v0.74.0 h1:ExR2D+5TYIrMphWgs5JCgwRhEDlPDXXrLwHHMgPHTXE=
@ -1531,6 +1530,7 @@ google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
@ -1557,21 +1557,16 @@ google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ6
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220114231437-d2e6a121cae0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220201184016-50beb8ab5c44/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220211171837-173942840c17/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220216160803-4663080d8bc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
google.golang.org/genproto v0.0.0-20220405205423-9d709892a2bf h1:JTjwKJX9erVpsw17w+OIPP7iAgEkN/r8urhWSunEDTs=
google.golang.org/genproto v0.0.0-20220405205423-9d709892a2bf/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac h1:qSNTkEN+L2mvWcLgJOR+8bdHX9rN/IdU3A1Ghpfb1Rg=
google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=

View file

@ -14,7 +14,7 @@ func MarshalUint16(dst []byte, u uint16) []byte {
// UnmarshalUint16 returns unmarshaled uint32 from src.
func UnmarshalUint16(src []byte) uint16 {
// This is faster than the manual conversion.
return binary.BigEndian.Uint16(src)
return binary.BigEndian.Uint16(src[:2])
}
// MarshalUint32 appends marshaled v to dst and returns the result.
@ -25,7 +25,7 @@ func MarshalUint32(dst []byte, u uint32) []byte {
// UnmarshalUint32 returns unmarshaled uint32 from src.
func UnmarshalUint32(src []byte) uint32 {
// This is faster than the manual conversion.
return binary.BigEndian.Uint32(src)
return binary.BigEndian.Uint32(src[:4])
}
// MarshalUint64 appends marshaled v to dst and returns the result.
@ -36,7 +36,7 @@ func MarshalUint64(dst []byte, u uint64) []byte {
// UnmarshalUint64 returns unmarshaled uint64 from src.
func UnmarshalUint64(src []byte) uint64 {
// This is faster than the manual conversion.
return binary.BigEndian.Uint64(src)
return binary.BigEndian.Uint64(src[:8])
}
// MarshalInt16 appends marshaled v to dst and returns the result.
@ -50,7 +50,7 @@ func MarshalInt16(dst []byte, v int16) []byte {
// UnmarshalInt16 returns unmarshaled int16 from src.
func UnmarshalInt16(src []byte) int16 {
// This is faster than the manual conversion.
u := binary.BigEndian.Uint16(src)
u := binary.BigEndian.Uint16(src[:2])
v := int16(u>>1) ^ (int16(u<<15) >> 15) // zig-zag decoding without branching.
return v
}
@ -66,7 +66,7 @@ func MarshalInt64(dst []byte, v int64) []byte {
// UnmarshalInt64 returns unmarshaled int64 from src.
func UnmarshalInt64(src []byte) int64 {
// This is faster than the manual conversion.
u := binary.BigEndian.Uint64(src)
u := binary.BigEndian.Uint64(src[:8])
v := int64(u>>1) ^ (int64(u<<63) >> 63) // zig-zag decoding without branching.
return v
}

View file

@ -49,12 +49,30 @@ var (
clusterMembersCount = flag.Int("promscrape.cluster.membersCount", 0, "The number of members in a cluster of scrapers. "+
"Each member must have an unique -promscrape.cluster.memberNum in the range 0 ... promscrape.cluster.membersCount-1 . "+
"Each member then scrapes roughly 1/N of all the targets. By default cluster scraping is disabled, i.e. a single scraper scrapes all the targets")
clusterMemberNum = flag.Int("promscrape.cluster.memberNum", 0, "The number of number in the cluster of scrapers. "+
"It must be an unique value in the range 0 ... promscrape.cluster.membersCount-1 across scrapers in the cluster")
clusterMemberNum = flag.String("promscrape.cluster.memberNum", "0", "The number of number in the cluster of scrapers. "+
"It must be an unique value in the range 0 ... promscrape.cluster.membersCount-1 across scrapers in the cluster. "+
"Can be specified as pod name of Kubernetes StatefulSet - pod-name-Num, where Num is a numeric part of pod name")
clusterReplicationFactor = flag.Int("promscrape.cluster.replicationFactor", 1, "The number of members in the cluster, which scrape the same targets. "+
"If the replication factor is greater than 2, then the deduplication must be enabled at remote storage side. See https://docs.victoriametrics.com/#deduplication")
)
var clusterMemberID int
func mustInitClusterMemberID() {
s := *clusterMemberNum
// special case for kubernetes deployment, where pod-name formatted at some-pod-name-1
// obtain memberNum from last segment
// https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2359
if idx := strings.LastIndexByte(s, '-'); idx >= 0 {
s = s[idx+1:]
}
n, err := strconv.ParseInt(s, 10, 64)
if err != nil {
logger.Fatalf("cannot parse -promscrape.cluster.memberNum=%q: %s", *clusterMemberNum, err)
}
clusterMemberID = int(n)
}
// Config represents essential parts from Prometheus config defined at https://prometheus.io/docs/prometheus/latest/configuration/configuration/
type Config struct {
Global GlobalConfig `yaml:"global,omitempty"`
@ -996,7 +1014,7 @@ func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabel
if *clusterMembersCount > 1 {
bb := scrapeWorkKeyBufPool.Get()
bb.B = appendScrapeWorkKey(bb.B[:0], labels)
needSkip := needSkipScrapeWork(bytesutil.ToUnsafeString(bb.B), *clusterMembersCount, *clusterReplicationFactor, *clusterMemberNum)
needSkip := needSkipScrapeWork(bytesutil.ToUnsafeString(bb.B), *clusterMembersCount, *clusterReplicationFactor, clusterMemberID)
scrapeWorkKeyBufPool.Put(bb)
if needSkip {
return nil, nil

View file

@ -52,6 +52,7 @@ func CheckConfig() error {
//
// Scraped data is passed to pushData.
func Init(pushData func(wr *prompbmarshal.WriteRequest)) {
mustInitClusterMemberID()
globalStopChan = make(chan struct{})
scraperWG.Add(1)
go func() {

View file

@ -276,7 +276,7 @@ func (sw *scrapeWork) run(stopCh <-chan struct{}, globalStopCh <-chan struct{})
// scrapes replicated targets at different time offsets. This guarantees that the deduplication consistently leaves samples
// received from the same vmagent replica.
// See https://docs.victoriametrics.com/vmagent.html#scraping-big-number-of-targets
key := fmt.Sprintf("ClusterMemberNum=%d, ScrapeURL=%s, Labels=%s", *clusterMemberNum, sw.Config.ScrapeURL, sw.Config.LabelsString())
key := fmt.Sprintf("ClusterMemberNum=%d, ScrapeURL=%s, Labels=%s", clusterMemberID, sw.Config.ScrapeURL, sw.Config.LabelsString())
h := xxhash.Sum64(bytesutil.ToUnsafeBytes(key))
randSleep = uint64(float64(scrapeInterval) * (float64(h) / (1 << 64)))
sleepOffset := uint64(time.Now().UnixNano()) % uint64(scrapeInterval)

View file

@ -8,7 +8,6 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
"github.com/VictoriaMetrics/metrics"
@ -43,49 +42,37 @@ func ParseStream(r io.Reader, isGzip bool, callback func(block *Block) error) er
// Read native blocks and feed workers with work.
sizeBuf := make([]byte, 4)
var wg sync.WaitGroup
var (
callbackErrLock sync.Mutex
callbackErr error
)
ctx := &streamContext{}
for {
uw := getUnmarshalWork()
uw.tr = tr
uw.callback = func(block *Block) {
if err := callback(block); err != nil {
processErrors.Inc()
callbackErrLock.Lock()
if callbackErr == nil {
callbackErr = fmt.Errorf("error when processing native block: %w", err)
}
callbackErrLock.Unlock()
}
wg.Done()
}
uw.ctx = ctx
uw.callback = callback
// Read uw.metricNameBuf
if _, err := io.ReadFull(br, sizeBuf); err != nil {
if err == io.EOF {
// End of stream
putUnmarshalWork(uw)
wg.Wait()
return callbackErr
ctx.wg.Wait()
return ctx.err
}
readErrors.Inc()
wg.Wait()
ctx.wg.Wait()
return fmt.Errorf("cannot read metricName size: %w", err)
}
readCalls.Inc()
bufSize := encoding.UnmarshalUint32(sizeBuf)
if bufSize > 1024*1024 {
parseErrors.Inc()
wg.Wait()
ctx.wg.Wait()
return fmt.Errorf("too big metricName size; got %d; shouldn't exceed %d", bufSize, 1024*1024)
}
uw.metricNameBuf = bytesutil.ResizeNoCopyMayOverallocate(uw.metricNameBuf, int(bufSize))
if _, err := io.ReadFull(br, uw.metricNameBuf); err != nil {
readErrors.Inc()
wg.Wait()
ctx.wg.Wait()
return fmt.Errorf("cannot read metricName with size %d bytes: %w", bufSize, err)
}
readCalls.Inc()
@ -93,30 +80,36 @@ func ParseStream(r io.Reader, isGzip bool, callback func(block *Block) error) er
// Read uw.blockBuf
if _, err := io.ReadFull(br, sizeBuf); err != nil {
readErrors.Inc()
wg.Wait()
ctx.wg.Wait()
return fmt.Errorf("cannot read native block size: %w", err)
}
readCalls.Inc()
bufSize = encoding.UnmarshalUint32(sizeBuf)
if bufSize > 1024*1024 {
parseErrors.Inc()
wg.Wait()
ctx.wg.Wait()
return fmt.Errorf("too big native block size; got %d; shouldn't exceed %d", bufSize, 1024*1024)
}
uw.blockBuf = bytesutil.ResizeNoCopyMayOverallocate(uw.blockBuf, int(bufSize))
if _, err := io.ReadFull(br, uw.blockBuf); err != nil {
readErrors.Inc()
wg.Wait()
ctx.wg.Wait()
return fmt.Errorf("cannot read native block with size %d bytes: %w", bufSize, err)
}
readCalls.Inc()
blocksRead.Inc()
wg.Add(1)
ctx.wg.Add(1)
common.ScheduleUnmarshalWork(uw)
}
}
type streamContext struct {
wg sync.WaitGroup
errLock sync.Mutex
err error
}
// Block is a single block from `/api/v1/import/native` request.
type Block struct {
MetricName storage.MetricName
@ -142,13 +135,15 @@ var (
type unmarshalWork struct {
tr storage.TimeRange
callback func(block *Block)
ctx *streamContext
callback func(block *Block) error
metricNameBuf []byte
blockBuf []byte
block Block
}
func (uw *unmarshalWork) reset() {
uw.ctx = nil
uw.callback = nil
uw.metricNameBuf = uw.metricNameBuf[:0]
uw.blockBuf = uw.blockBuf[:0]
@ -157,13 +152,22 @@ func (uw *unmarshalWork) reset() {
// Unmarshal implements common.UnmarshalWork
func (uw *unmarshalWork) Unmarshal() {
if err := uw.unmarshal(); err != nil {
err := uw.unmarshal()
if err != nil {
parseErrors.Inc()
logger.Errorf("error when unmarshaling native block: %s", err)
putUnmarshalWork(uw)
return
} else {
err = uw.callback(&uw.block)
}
uw.callback(&uw.block)
ctx := uw.ctx
if err != nil {
processErrors.Inc()
ctx.errLock.Lock()
if ctx.err == nil {
ctx.err = fmt.Errorf("error when processing native block: %w", err)
}
ctx.errLock.Unlock()
}
ctx.wg.Done()
putUnmarshalWork(uw)
}

View file

@ -2227,7 +2227,8 @@ func (is *indexSearch) searchMetricIDsInternal(tfss []*TagFilters, tr TimeRange,
return nil, err
}
if metricIDs.Len() > maxMetrics {
return nil, fmt.Errorf("the number of matching unique timeseries exceeds %d; either narrow down the search or increase -search.maxUniqueTimeseries", maxMetrics)
return nil, fmt.Errorf("the number of matching timeseries exceeds %d; either narrow down the search "+
"or increase -search.max* command-line flag values at vmselect", maxMetrics)
}
}
return metricIDs, nil
@ -2247,6 +2248,10 @@ func (is *indexSearch) updateMetricIDsForTagFilters(metricIDs *uint64set.Set, tf
atomic.AddUint64(&is.db.globalSearchCalls, 1)
m, err := is.getMetricIDsForDateAndFilters(0, tfs, maxMetrics)
if err != nil {
if errors.Is(err, errFallbackToGlobalSearch) {
return fmt.Errorf("the number of matching timeseries exceeds %d; either narrow down the search "+
"or increase -search.max* command-line flag values at vmselect", maxMetrics)
}
return err
}
metricIDs.UnionMayOwn(m)

View file

@ -0,0 +1,3 @@
{
"storage": "1.22.0"
}

View file

@ -1,6 +1,19 @@
# Changes
## [1.22.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.21.0...storage/v1.22.0) (2022-03-31)
### Features
* **storage:** allow specifying includeTrailingDelimiter ([#5617](https://github.com/googleapis/google-cloud-go/issues/5617)) ([a34503b](https://github.com/googleapis/google-cloud-go/commit/a34503bc0f0b95399285e8db66976b227e3b0072))
* **storage:** set versionClient to module version ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9))
### Bug Fixes
* **storage:** respect STORAGE_EMULATOR_HOST in signedURL ([#5673](https://github.com/googleapis/google-cloud-go/issues/5673)) ([1c249ae](https://github.com/googleapis/google-cloud-go/commit/1c249ae5b4980cf53fa74635943ca8bf6a96a341))
## [1.21.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.20.0...storage/v1.21.0) (2022-02-17)

View file

@ -243,10 +243,10 @@ func toObjectACLRules(items []*raw.ObjectAccessControl) []ACLRule {
return rs
}
func fromProtoToObjectACLRules(items []*storagepb.ObjectAccessControl) []ACLRule {
func toObjectACLRulesFromProto(items []*storagepb.ObjectAccessControl) []ACLRule {
var rs []ACLRule
for _, item := range items {
rs = append(rs, fromProtoToObjectACLRule(item))
rs = append(rs, toObjectACLRuleFromProto(item))
}
return rs
}
@ -259,6 +259,14 @@ func toBucketACLRules(items []*raw.BucketAccessControl) []ACLRule {
return rs
}
func toBucketACLRulesFromProto(items []*storagepb.BucketAccessControl) []ACLRule {
var rs []ACLRule
for _, item := range items {
rs = append(rs, toBucketACLRuleFromProto(item))
}
return rs
}
func toObjectACLRule(a *raw.ObjectAccessControl) ACLRule {
return ACLRule{
Entity: ACLEntity(a.Entity),
@ -270,14 +278,14 @@ func toObjectACLRule(a *raw.ObjectAccessControl) ACLRule {
}
}
func fromProtoToObjectACLRule(a *storagepb.ObjectAccessControl) ACLRule {
func toObjectACLRuleFromProto(a *storagepb.ObjectAccessControl) ACLRule {
return ACLRule{
Entity: ACLEntity(a.GetEntity()),
EntityID: a.GetEntityId(),
Role: ACLRole(a.GetRole()),
Domain: a.GetDomain(),
Email: a.GetEmail(),
ProjectTeam: fromProtoToObjectProjectTeam(a.GetProjectTeam()),
ProjectTeam: toProjectTeamFromProto(a.GetProjectTeam()),
}
}
@ -292,6 +300,17 @@ func toBucketACLRule(a *raw.BucketAccessControl) ACLRule {
}
}
func toBucketACLRuleFromProto(a *storagepb.BucketAccessControl) ACLRule {
return ACLRule{
Entity: ACLEntity(a.GetEntity()),
EntityID: a.GetEntityId(),
Role: ACLRole(a.GetRole()),
Domain: a.GetDomain(),
Email: a.GetEmail(),
ProjectTeam: toProjectTeamFromProto(a.GetProjectTeam()),
}
}
func toRawObjectACL(rules []ACLRule) []*raw.ObjectAccessControl {
if len(rules) == 0 {
return nil
@ -325,6 +344,17 @@ func toRawBucketACL(rules []ACLRule) []*raw.BucketAccessControl {
return r
}
func toProtoBucketACL(rules []ACLRule) []*storagepb.BucketAccessControl {
if len(rules) == 0 {
return nil
}
r := make([]*storagepb.BucketAccessControl, 0, len(rules))
for _, rule := range rules {
r = append(r, rule.toProtoBucketAccessControl())
}
return r
}
func (r ACLRule) toRawBucketAccessControl(bucket string) *raw.BucketAccessControl {
return &raw.BucketAccessControl{
Bucket: bucket,
@ -351,6 +381,14 @@ func (r ACLRule) toProtoObjectAccessControl(bucket string) *storagepb.ObjectAcce
}
}
func (r ACLRule) toProtoBucketAccessControl() *storagepb.BucketAccessControl {
return &storagepb.BucketAccessControl{
Entity: string(r.Entity),
Role: string(r.Role),
// The other fields are not settable.
}
}
func toBucketProjectTeam(p *raw.BucketAccessControlProjectTeam) *ProjectTeam {
if p == nil {
return nil
@ -361,6 +399,16 @@ func toBucketProjectTeam(p *raw.BucketAccessControlProjectTeam) *ProjectTeam {
}
}
func toProjectTeamFromProto(p *storagepb.ProjectTeam) *ProjectTeam {
if p == nil {
return nil
}
return &ProjectTeam{
ProjectNumber: p.GetProjectNumber(),
Team: p.GetTeam(),
}
}
func toObjectProjectTeam(p *raw.ObjectAccessControlProjectTeam) *ProjectTeam {
if p == nil {
return nil
@ -370,13 +418,3 @@ func toObjectProjectTeam(p *raw.ObjectAccessControlProjectTeam) *ProjectTeam {
Team: p.Team,
}
}
func fromProtoToObjectProjectTeam(p *storagepb.ProjectTeam) *ProjectTeam {
if p == nil {
return nil
}
return &ProjectTeam{
ProjectNumber: p.GetProjectNumber(),
Team: p.GetTeam(),
}
}

View file

@ -27,12 +27,16 @@ import (
"cloud.google.com/go/compute/metadata"
"cloud.google.com/go/internal/optional"
"cloud.google.com/go/internal/trace"
"github.com/googleapis/go-type-adapters/adapters"
"golang.org/x/xerrors"
"google.golang.org/api/googleapi"
"google.golang.org/api/iamcredentials/v1"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
raw "google.golang.org/api/storage/v1"
"google.golang.org/genproto/googleapis/storage/v2"
storagepb "google.golang.org/genproto/googleapis/storage/v2"
"google.golang.org/protobuf/proto"
)
// BucketHandle provides operations on a Google Cloud Storage bucket.
@ -801,6 +805,36 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
}, nil
}
func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs {
if b == nil {
return nil
}
return &BucketAttrs{
Name: parseBucketName(b.GetName()),
Location: b.GetLocation(),
MetaGeneration: b.GetMetageneration(),
DefaultEventBasedHold: b.GetDefaultEventBasedHold(),
StorageClass: b.GetStorageClass(),
Created: b.GetCreateTime().AsTime(),
VersioningEnabled: b.GetVersioning().GetEnabled(),
ACL: toBucketACLRulesFromProto(b.GetAcl()),
DefaultObjectACL: toObjectACLRulesFromProto(b.GetDefaultObjectAcl()),
Labels: b.GetLabels(),
RequesterPays: b.GetBilling().GetRequesterPays(),
Lifecycle: toLifecycleFromProto(b.GetLifecycle()),
RetentionPolicy: toRetentionPolicyFromProto(b.GetRetentionPolicy()),
CORS: toCORSFromProto(b.GetCors()),
Encryption: toBucketEncryptionFromProto(b.GetEncryption()),
Logging: toBucketLoggingFromProto(b.GetLogging()),
Website: toBucketWebsiteFromProto(b.GetWebsite()),
BucketPolicyOnly: toBucketPolicyOnlyFromProto(b.GetIamConfig()),
UniformBucketLevelAccess: toUniformBucketLevelAccessFromProto(b.GetIamConfig()),
PublicAccessPrevention: toPublicAccessPreventionFromProto(b.GetIamConfig()),
LocationType: b.GetLocationType(),
RPO: toRPOFromProto(b),
}
}
// toRawBucket copies the editable attribute from b to the raw library's Bucket type.
func (b *BucketAttrs) toRawBucket() *raw.Bucket {
// Copy label map.
@ -854,6 +888,66 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket {
}
}
func (b *BucketAttrs) toProtoBucket() *storagepb.Bucket {
if b == nil {
return &storagepb.Bucket{}
}
// Copy label map.
var labels map[string]string
if len(b.Labels) > 0 {
labels = make(map[string]string, len(b.Labels))
for k, v := range b.Labels {
labels[k] = v
}
}
// Ignore VersioningEnabled if it is false. This is OK because
// we only call this method when creating a bucket, and by default
// new buckets have versioning off.
var v *storagepb.Bucket_Versioning
if b.VersioningEnabled {
v = &storagepb.Bucket_Versioning{Enabled: true}
}
var bb *storagepb.Bucket_Billing
if b.RequesterPays {
bb = &storage.Bucket_Billing{RequesterPays: true}
}
var bktIAM *storagepb.Bucket_IamConfig
if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled || b.PublicAccessPrevention != PublicAccessPreventionUnknown {
bktIAM = &storagepb.Bucket_IamConfig{}
if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled {
bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{
Enabled: true,
}
}
// TODO(noahdietz): This will be switched to a string.
//
// if b.PublicAccessPrevention != PublicAccessPreventionUnknown {
// bktIAM.PublicAccessPrevention = b.PublicAccessPrevention.String()
// }
}
return &storagepb.Bucket{
Name: b.Name,
Location: b.Location,
StorageClass: b.StorageClass,
Acl: toProtoBucketACL(b.ACL),
DefaultObjectAcl: toProtoObjectACL(b.DefaultObjectACL),
Versioning: v,
Labels: labels,
Billing: bb,
Lifecycle: toProtoLifecycle(b.Lifecycle),
RetentionPolicy: b.RetentionPolicy.toProtoRetentionPolicy(),
Cors: toProtoCORS(b.CORS),
Encryption: b.Encryption.toProtoBucketEncryption(),
Logging: b.Logging.toProtoBucketLogging(),
Website: b.Website.toProtoBucketWebsite(),
IamConfig: bktIAM,
Rpo: b.RPO.String(),
}
}
// CORS is the bucket's Cross-Origin Resource Sharing (CORS) configuration.
type CORS struct {
// MaxAge is the value to return in the Access-Control-Max-Age
@ -1190,6 +1284,32 @@ func applyBucketConds(method string, conds *BucketConditions, call interface{})
return nil
}
// applyBucketConds modifies the provided request message using the conditions
// in conds. msg is a protobuf Message that has fields if_metageneration_match
// and if_metageneration_not_match.
func applyBucketCondsProto(method string, conds *BucketConditions, msg proto.Message) error {
rmsg := msg.ProtoReflect()
if conds == nil {
return nil
}
if err := conds.validate(method); err != nil {
return err
}
switch {
case conds.MetagenerationMatch != 0:
if !setConditionProtoField(rmsg, "if_metageneration_match", conds.MetagenerationMatch) {
return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method)
}
case conds.MetagenerationNotMatch != 0:
if !setConditionProtoField(rmsg, "if_metageneration_not_match", conds.MetagenerationNotMatch) {
return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method)
}
}
return nil
}
func (rp *RetentionPolicy) toRawRetentionPolicy() *raw.BucketRetentionPolicy {
if rp == nil {
return nil
@ -1199,6 +1319,15 @@ func (rp *RetentionPolicy) toRawRetentionPolicy() *raw.BucketRetentionPolicy {
}
}
func (rp *RetentionPolicy) toProtoRetentionPolicy() *storagepb.Bucket_RetentionPolicy {
if rp == nil {
return nil
}
return &storagepb.Bucket_RetentionPolicy{
RetentionPeriod: int64(rp.RetentionPeriod / time.Second),
}
}
func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error) {
if rp == nil {
return nil, nil
@ -1214,6 +1343,17 @@ func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error)
}, nil
}
func toRetentionPolicyFromProto(rp *storagepb.Bucket_RetentionPolicy) *RetentionPolicy {
if rp == nil {
return nil
}
return &RetentionPolicy{
RetentionPeriod: time.Duration(rp.GetRetentionPeriod()) * time.Second,
EffectiveTime: rp.GetEffectiveTime().AsTime(),
IsLocked: rp.GetIsLocked(),
}
}
func toRawCORS(c []CORS) []*raw.BucketCors {
var out []*raw.BucketCors
for _, v := range c {
@ -1227,6 +1367,19 @@ func toRawCORS(c []CORS) []*raw.BucketCors {
return out
}
func toProtoCORS(c []CORS) []*storagepb.Bucket_Cors {
var out []*storagepb.Bucket_Cors
for _, v := range c {
out = append(out, &storagepb.Bucket_Cors{
MaxAgeSeconds: int32(v.MaxAge / time.Second),
Method: v.Methods,
Origin: v.Origins,
ResponseHeader: v.ResponseHeaders,
})
}
return out
}
func toCORS(rc []*raw.BucketCors) []CORS {
var out []CORS
for _, v := range rc {
@ -1240,6 +1393,19 @@ func toCORS(rc []*raw.BucketCors) []CORS {
return out
}
func toCORSFromProto(rc []*storagepb.Bucket_Cors) []CORS {
var out []CORS
for _, v := range rc {
out = append(out, CORS{
MaxAge: time.Duration(v.GetMaxAgeSeconds()) * time.Second,
Methods: v.GetMethod(),
Origins: v.GetOrigin(),
ResponseHeaders: v.GetResponseHeader(),
})
}
return out
}
func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle {
var rl raw.BucketLifecycle
if len(l.Rules) == 0 {
@ -1283,6 +1449,51 @@ func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle {
return &rl
}
func toProtoLifecycle(l Lifecycle) *storagepb.Bucket_Lifecycle {
var rl storagepb.Bucket_Lifecycle
for _, r := range l.Rules {
rr := &storagepb.Bucket_Lifecycle_Rule{
Action: &storagepb.Bucket_Lifecycle_Rule_Action{
Type: r.Action.Type,
StorageClass: r.Action.StorageClass,
},
Condition: &storagepb.Bucket_Lifecycle_Rule_Condition{
// Note: The Apiary types use int64 (even though the Discovery
// doc states "format: int32"), so the client types used int64,
// but the proto uses int32 so we have a potentially lossy
// conversion.
AgeDays: proto.Int32(int32(r.Condition.AgeInDays)),
DaysSinceCustomTime: proto.Int32(int32(r.Condition.DaysSinceCustomTime)),
DaysSinceNoncurrentTime: proto.Int32(int32(r.Condition.DaysSinceNoncurrentTime)),
MatchesStorageClass: r.Condition.MatchesStorageClasses,
NumNewerVersions: proto.Int32(int32(r.Condition.NumNewerVersions)),
},
}
switch r.Condition.Liveness {
case LiveAndArchived:
rr.Condition.IsLive = nil
case Live:
rr.Condition.IsLive = proto.Bool(true)
case Archived:
rr.Condition.IsLive = proto.Bool(false)
}
if !r.Condition.CreatedBefore.IsZero() {
rr.Condition.CreatedBefore = adapters.TimeToProtoDate(r.Condition.CreatedBefore)
}
if !r.Condition.CustomTimeBefore.IsZero() {
rr.Condition.CustomTimeBefore = adapters.TimeToProtoDate(r.Condition.CustomTimeBefore)
}
if !r.Condition.NoncurrentTimeBefore.IsZero() {
rr.Condition.NoncurrentTimeBefore = adapters.TimeToProtoDate(r.Condition.NoncurrentTimeBefore)
}
rl.Rule = append(rl.Rule, rr)
}
return &rl
}
func toLifecycle(rl *raw.BucketLifecycle) Lifecycle {
var l Lifecycle
if rl == nil {
@ -1325,6 +1536,48 @@ func toLifecycle(rl *raw.BucketLifecycle) Lifecycle {
return l
}
func toLifecycleFromProto(rl *storagepb.Bucket_Lifecycle) Lifecycle {
var l Lifecycle
if rl == nil {
return l
}
for _, rr := range rl.GetRule() {
r := LifecycleRule{
Action: LifecycleAction{
Type: rr.GetAction().GetType(),
StorageClass: rr.GetAction().GetStorageClass(),
},
Condition: LifecycleCondition{
AgeInDays: int64(rr.GetCondition().GetAgeDays()),
DaysSinceCustomTime: int64(rr.GetCondition().GetDaysSinceCustomTime()),
DaysSinceNoncurrentTime: int64(rr.GetCondition().GetDaysSinceNoncurrentTime()),
MatchesStorageClasses: rr.GetCondition().GetMatchesStorageClass(),
NumNewerVersions: int64(rr.GetCondition().GetNumNewerVersions()),
},
}
if rr.GetCondition().IsLive == nil {
r.Condition.Liveness = LiveAndArchived
} else if rr.GetCondition().GetIsLive() {
r.Condition.Liveness = Live
} else {
r.Condition.Liveness = Archived
}
if rr.GetCondition().GetCreatedBefore() != nil {
r.Condition.CreatedBefore = adapters.ProtoDateToUTCTime(rr.GetCondition().GetCreatedBefore())
}
if rr.GetCondition().GetCustomTimeBefore() != nil {
r.Condition.CustomTimeBefore = adapters.ProtoDateToUTCTime(rr.GetCondition().GetCustomTimeBefore())
}
if rr.GetCondition().GetNoncurrentTimeBefore() != nil {
r.Condition.NoncurrentTimeBefore = adapters.ProtoDateToUTCTime(rr.GetCondition().GetNoncurrentTimeBefore())
}
l.Rules = append(l.Rules, r)
}
return l
}
func (e *BucketEncryption) toRawBucketEncryption() *raw.BucketEncryption {
if e == nil {
return nil
@ -1334,6 +1587,15 @@ func (e *BucketEncryption) toRawBucketEncryption() *raw.BucketEncryption {
}
}
func (e *BucketEncryption) toProtoBucketEncryption() *storagepb.Bucket_Encryption {
if e == nil {
return nil
}
return &storagepb.Bucket_Encryption{
DefaultKmsKey: e.DefaultKMSKeyName,
}
}
func toBucketEncryption(e *raw.BucketEncryption) *BucketEncryption {
if e == nil {
return nil
@ -1341,6 +1603,13 @@ func toBucketEncryption(e *raw.BucketEncryption) *BucketEncryption {
return &BucketEncryption{DefaultKMSKeyName: e.DefaultKmsKeyName}
}
func toBucketEncryptionFromProto(e *storagepb.Bucket_Encryption) *BucketEncryption {
if e == nil {
return nil
}
return &BucketEncryption{DefaultKMSKeyName: e.GetDefaultKmsKey()}
}
func (b *BucketLogging) toRawBucketLogging() *raw.BucketLogging {
if b == nil {
return nil
@ -1351,6 +1620,16 @@ func (b *BucketLogging) toRawBucketLogging() *raw.BucketLogging {
}
}
func (b *BucketLogging) toProtoBucketLogging() *storagepb.Bucket_Logging {
if b == nil {
return nil
}
return &storagepb.Bucket_Logging{
LogBucket: b.LogBucket,
LogObjectPrefix: b.LogObjectPrefix,
}
}
func toBucketLogging(b *raw.BucketLogging) *BucketLogging {
if b == nil {
return nil
@ -1361,6 +1640,16 @@ func toBucketLogging(b *raw.BucketLogging) *BucketLogging {
}
}
func toBucketLoggingFromProto(b *storagepb.Bucket_Logging) *BucketLogging {
if b == nil {
return nil
}
return &BucketLogging{
LogBucket: b.GetLogBucket(),
LogObjectPrefix: b.GetLogObjectPrefix(),
}
}
func (w *BucketWebsite) toRawBucketWebsite() *raw.BucketWebsite {
if w == nil {
return nil
@ -1371,6 +1660,16 @@ func (w *BucketWebsite) toRawBucketWebsite() *raw.BucketWebsite {
}
}
func (w *BucketWebsite) toProtoBucketWebsite() *storagepb.Bucket_Website {
if w == nil {
return nil
}
return &storagepb.Bucket_Website{
MainPageSuffix: w.MainPageSuffix,
NotFoundPage: w.NotFoundPage,
}
}
func toBucketWebsite(w *raw.BucketWebsite) *BucketWebsite {
if w == nil {
return nil
@ -1381,6 +1680,16 @@ func toBucketWebsite(w *raw.BucketWebsite) *BucketWebsite {
}
}
func toBucketWebsiteFromProto(w *storagepb.Bucket_Website) *BucketWebsite {
if w == nil {
return nil
}
return &BucketWebsite{
MainPageSuffix: w.GetMainPageSuffix(),
NotFoundPage: w.GetNotFoundPage(),
}
}
func toBucketPolicyOnly(b *raw.BucketIamConfiguration) BucketPolicyOnly {
if b == nil || b.BucketPolicyOnly == nil || !b.BucketPolicyOnly.Enabled {
return BucketPolicyOnly{}
@ -1397,6 +1706,16 @@ func toBucketPolicyOnly(b *raw.BucketIamConfiguration) BucketPolicyOnly {
}
}
func toBucketPolicyOnlyFromProto(b *storagepb.Bucket_IamConfig) BucketPolicyOnly {
if b == nil || !b.GetUniformBucketLevelAccess().GetEnabled() {
return BucketPolicyOnly{}
}
return BucketPolicyOnly{
Enabled: true,
LockedTime: b.GetUniformBucketLevelAccess().GetLockTime().AsTime(),
}
}
func toUniformBucketLevelAccess(b *raw.BucketIamConfiguration) UniformBucketLevelAccess {
if b == nil || b.UniformBucketLevelAccess == nil || !b.UniformBucketLevelAccess.Enabled {
return UniformBucketLevelAccess{}
@ -1413,6 +1732,16 @@ func toUniformBucketLevelAccess(b *raw.BucketIamConfiguration) UniformBucketLeve
}
}
func toUniformBucketLevelAccessFromProto(b *storagepb.Bucket_IamConfig) UniformBucketLevelAccess {
if b == nil || !b.GetUniformBucketLevelAccess().GetEnabled() {
return UniformBucketLevelAccess{}
}
return UniformBucketLevelAccess{
Enabled: true,
LockedTime: b.GetUniformBucketLevelAccess().GetLockTime().AsTime(),
}
}
func toPublicAccessPrevention(b *raw.BucketIamConfiguration) PublicAccessPrevention {
if b == nil {
return PublicAccessPreventionUnknown
@ -1427,6 +1756,20 @@ func toPublicAccessPrevention(b *raw.BucketIamConfiguration) PublicAccessPrevent
}
}
func toPublicAccessPreventionFromProto(b *storagepb.Bucket_IamConfig) PublicAccessPrevention {
if b == nil {
return PublicAccessPreventionUnknown
}
switch b.GetPublicAccessPrevention() {
case publicAccessPreventionInherited, publicAccessPreventionUnspecified:
return PublicAccessPreventionInherited
case publicAccessPreventionEnforced:
return PublicAccessPreventionEnforced
default:
return PublicAccessPreventionUnknown
}
}
func toRPO(b *raw.Bucket) RPO {
if b == nil {
return RPOUnknown
@ -1441,6 +1784,20 @@ func toRPO(b *raw.Bucket) RPO {
}
}
func toRPOFromProto(b *storagepb.Bucket) RPO {
if b == nil {
return RPOUnknown
}
switch b.GetRpo() {
case rpoDefault:
return RPODefault
case rpoAsyncTurbo:
return RPOAsyncTurbo
default:
return RPOUnknown
}
}
// Objects returns an iterator over the objects in the bucket that match the
// Query q. If q is nil, no filtering is done. Objects will be iterated over
// lexicographically by name.
@ -1543,6 +1900,7 @@ func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error)
req.StartOffset(it.query.StartOffset)
req.EndOffset(it.query.EndOffset)
req.Versions(it.query.Versions)
req.IncludeTrailingDelimiter(it.query.IncludeTrailingDelimiter)
if len(it.query.fieldSelection) > 0 {
req.Fields("nextPageToken", googleapi.Field(it.query.fieldSelection))
}

241
vendor/cloud.google.com/go/storage/client.go generated vendored Normal file
View file

@ -0,0 +1,241 @@
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
gax "github.com/googleapis/gax-go/v2"
"google.golang.org/api/option"
iampb "google.golang.org/genproto/googleapis/iam/v1"
)
// TODO(noahdietz): Move existing factory methods to this file.
// storageClient is an internal-only interface designed to separate the
// transport-specific logic of making Storage API calls from the logic of the
// client library.
//
// Implementation requirements beyond implementing the interface include:
// * factory method(s) must accept a `userProject string` param
// * `settings` must be retained per instance
// * `storageOption`s must be resolved in the order they are received
// * all API errors must be wrapped in the gax-go APIError type
// * any unimplemented interface methods must return a StorageUnimplementedErr
//
// TODO(noahdietz): This interface is currently not used in the production code
// paths
type storageClient interface {
// Top-level methods.
GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error)
CreateBucket(ctx context.Context, project string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error)
ListBuckets(ctx context.Context, project string, opts ...storageOption) (*BucketIterator, error)
Close() error
// Bucket methods.
DeleteBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error
GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error)
UpdateBucket(ctx context.Context, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error)
LockBucketRetentionPolicy(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error
ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) (*ObjectIterator, error)
// Object metadata methods.
DeleteObject(ctx context.Context, bucket, object string, conds *Conditions, opts ...storageOption) error
GetObject(ctx context.Context, bucket, object string, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error)
UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error)
// Default Object ACL methods.
DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error
ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error)
UpdateDefaultObjectACL(ctx context.Context, opts ...storageOption) (*ACLRule, error)
// Bucket ACL methods.
DeleteBucketACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error
ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error)
UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error)
// Object ACL methods.
DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error
ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error)
UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error)
// Media operations.
ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error)
RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error)
OpenReader(ctx context.Context, r *Reader, opts ...storageOption) error
OpenWriter(ctx context.Context, w *Writer, opts ...storageOption) error
// IAM methods.
GetIamPolicy(ctx context.Context, resource string, version int32, opts ...storageOption) (*iampb.Policy, error)
SetIamPolicy(ctx context.Context, resource string, policy *iampb.Policy, opts ...storageOption) error
TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error)
// HMAC Key methods.
GetHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) (*HMACKey, error)
ListHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) *HMACKeysIterator
UpdateHMACKey(ctx context.Context, desc *hmacKeyDesc, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error)
CreateHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) (*HMACKey, error)
DeleteHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) error
}
// settings contains transport-agnostic configuration for API calls made via
// the storageClient inteface. All implementations must utilize settings
// and respect those that are applicable.
type settings struct {
// retry is the complete retry configuration to use when evaluating if an
// API call should be retried.
retry *retryConfig
// gax is a set of gax.CallOption to be conveyed to gax.Invoke.
// Note: Not all storageClient interfaces will must use gax.Invoke.
gax []gax.CallOption
// idempotent indicates if the call is idempotent or not when considering
// if the call should be retired or not.
idempotent bool
// clientOption is a set of option.ClientOption to be used during client
// transport initialization. See https://pkg.go.dev/google.golang.org/api/option
// for a list of supported options.
clientOption []option.ClientOption
// userProject is the user project that should be billed for the request.
userProject string
}
func initSettings(opts ...storageOption) *settings {
s := &settings{}
resolveOptions(s, opts...)
return s
}
func resolveOptions(s *settings, opts ...storageOption) {
for _, o := range opts {
o.Apply(s)
}
}
// callSettings is a helper for resolving storage options against the settings
// in the context of an individual call. This is to ensure that client-level
// default settings are not mutated by two different calls getting options.
//
// Example: s := callSettings(c.settings, opts...)
func callSettings(defaults *settings, opts ...storageOption) *settings {
if defaults == nil {
return nil
}
// This does not make a deep copy of the pointer/slice fields, but all
// options replace the settings fields rather than modify their values in
// place.
cs := *defaults
resolveOptions(&cs, opts...)
return &cs
}
// storageOption is the transport-agnostic call option for the storageClient
// interface.
type storageOption interface {
Apply(s *settings)
}
func withGAXOptions(opts ...gax.CallOption) storageOption {
return &gaxOption{opts}
}
type gaxOption struct {
opts []gax.CallOption
}
func (o *gaxOption) Apply(s *settings) { s.gax = o.opts }
func withRetryConfig(rc *retryConfig) storageOption {
return &retryOption{rc}
}
type retryOption struct {
rc *retryConfig
}
func (o *retryOption) Apply(s *settings) { s.retry = o.rc }
func idempotent(i bool) storageOption {
return &idempotentOption{i}
}
type idempotentOption struct {
idempotency bool
}
func (o *idempotentOption) Apply(s *settings) { s.idempotent = o.idempotency }
func withClientOptions(opts ...option.ClientOption) storageOption {
return &clientOption{opts: opts}
}
type clientOption struct {
opts []option.ClientOption
}
func (o *clientOption) Apply(s *settings) { s.clientOption = o.opts }
func withUserProject(project string) storageOption {
return &userProjectOption{project}
}
type userProjectOption struct {
project string
}
func (o *userProjectOption) Apply(s *settings) { s.userProject = o.project }
type composeObjectRequest struct {
dstBucket string
dstObject string
srcs []string
gen int64
conds *Conditions
predefinedACL string
}
type rewriteObjectRequest struct {
srcBucket string
srcObject string
dstBucket string
dstObject string
dstKeyName string
attrs *ObjectAttrs
gen int64
conds *Conditions
predefinedACL string
token string
}
type rewriteObjectResponse struct {
resource *ObjectAttrs
done bool
written int64
token string
}

View file

@ -214,7 +214,17 @@ since you read it. Here is how to express that:
Signed URLs
You can obtain a URL that lets anyone read or write an object for a limited time.
You don't need to create a client to do this. See the documentation of
Signing a URL requires credentials authorized to sign a URL. To use the same
authentication that was used when instantiating the Storage client, use the
BucketHandle.SignedURL method.
url, err := client.Bucket(bucketName).SignedURL(objectName, opts)
if err != nil {
// TODO: Handle error.
}
fmt.Println(url)
You can also sign a URL wihout creating a client. See the documentation of
SignedURL for details.
url, err := storage.SignedURL(bucketName, "shared-object", opts)
@ -230,9 +240,9 @@ temporary permission. Conditions can be applied to restrict how the HTML form is
by a user.
For more information, please see https://cloud.google.com/storage/docs/xml-api/post-object as well
as the documentation of GenerateSignedPostPolicyV4.
as the documentation of BucketHandle.GenerateSignedPostPolicyV4.
pv4, err := storage.GenerateSignedPostPolicyV4(bucketName, objectName, opts)
pv4, err := client.Bucket(bucketName).GenerateSignedPostPolicyV4(objectName, opts)
if err != nil {
// TODO: Handle error.
}

View file

@ -32,23 +32,42 @@ if [ "$minor_ver" -lt "$min_minor_ver" ]; then
fi
export STORAGE_EMULATOR_HOST="http://localhost:9000"
export STORAGE_EMULATOR_HOST_GRPC="localhost:8888"
DEFAULT_IMAGE_NAME='gcr.io/cloud-devrel-public-resources/storage-testbench'
DEFAULT_IMAGE_TAG='latest'
DOCKER_IMAGE=${DEFAULT_IMAGE_NAME}:${DEFAULT_IMAGE_TAG}
CONTAINER_NAME=storage_testbench
# Get the docker image for the testbench
docker pull $DOCKER_IMAGE
# Start the testbench
# Note: --net=host makes the container bind directly to the Docker hosts network,
# with no network isolation. If we were to use port-mapping instead, reset connection errors
# would be captured differently and cause unexpected test behaviour.
# The host networking driver works only on Linux hosts.
# See more about using host networking: https://docs.docker.com/network/host/
docker run --name $CONTAINER_NAME --rm --net=host $DOCKER_IMAGE &
DOCKER_NETWORK="--net=host"
# Note: We do not expect the RetryConformanceTest suite to pass on darwin due to
# differences in the network errors emitted by the system.
if [ `go env GOOS` == 'darwin' ]; then
DOCKER_NETWORK="-p 9000:9000 -p 8888:8888"
fi
# Get the docker image for the testbench
docker pull $DOCKER_IMAGE
# Start the testbench
docker run --name $CONTAINER_NAME --rm -d $DOCKER_NETWORK $DOCKER_IMAGE
echo "Running the Cloud Storage testbench: $STORAGE_EMULATOR_HOST"
sleep 1
# Stop the testbench & cleanup environment variables
function cleanup() {
echo "Cleanup testbench"
docker stop $CONTAINER_NAME
unset STORAGE_EMULATOR_HOST;
unset STORAGE_EMULATOR_HOST_GRPC;
}
trap cleanup EXIT
# Check that the server is running - retry several times to allow for start-up time
response=$(curl -w "%{http_code}\n" $STORAGE_EMULATOR_HOST --retry-connrefused --retry 5 -o /dev/null)
@ -59,13 +78,15 @@ then
exit 1
fi
# Stop the testbench & cleanup environment variables
function cleanup() {
echo "Cleanup testbench"
docker stop $CONTAINER_NAME
unset STORAGE_EMULATOR_HOST;
}
trap cleanup EXIT
# Start the gRPC server on port 8888.
echo "Starting the gRPC server on port 8888"
response=$(curl -w "%{http_code}\n" --retry 5 --retry-max-time 40 -o /dev/null "$STORAGE_EMULATOR_HOST/start_grpc?port=8888")
if [[ $response != 200 ]]
then
echo "Testbench gRPC server did not start correctly"
exit 1
fi
# Run tests
go test -v -timeout 10m ./ -run="TestRetryConformance" -short 2>&1 | tee -a sponge_log.log
go test -v -timeout 10m ./ -run="^Test(RetryConformance|.*Emulated)$" -short 2>&1 | tee -a sponge_log.log

346
vendor/cloud.google.com/go/storage/grpc_client.go generated vendored Normal file
View file

@ -0,0 +1,346 @@
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"os"
gapic "cloud.google.com/go/storage/internal/apiv2"
"google.golang.org/api/option"
iampb "google.golang.org/genproto/googleapis/iam/v1"
storagepb "google.golang.org/genproto/googleapis/storage/v2"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
const (
// defaultConnPoolSize is the default number of connections
// to initialize in the GAPIC gRPC connection pool. A larger
// connection pool may be necessary for jobs that require
// high throughput and/or leverage many concurrent streams.
//
// This is an experimental API and not intended for public use.
defaultConnPoolSize = 4
// globalProjectAlias is the project ID alias used for global buckets.
//
// This is only used for the gRPC API.
globalProjectAlias = "_"
)
// defaultGRPCOptions returns a set of the default client options
// for gRPC client initialization.
//
// This is an experimental API and not intended for public use.
func defaultGRPCOptions() []option.ClientOption {
defaults := []option.ClientOption{
option.WithGRPCConnectionPool(defaultConnPoolSize),
}
// Set emulator options for gRPC if an emulator was specified. Note that in a
// hybrid client, STORAGE_EMULATOR_HOST will set the host to use for HTTP and
// STORAGE_EMULATOR_HOST_GRPC will set the host to use for gRPC (when using a
// local emulator, HTTP and gRPC must use different ports, so this is
// necessary).
//
// TODO: When the newHybridClient is not longer used, remove
// STORAGE_EMULATOR_HOST_GRPC and use STORAGE_EMULATOR_HOST for both the
// HTTP and gRPC based clients.
if host := os.Getenv("STORAGE_EMULATOR_HOST_GRPC"); host != "" {
// Strip the scheme from the emulator host. WithEndpoint does not take a
// scheme for gRPC.
host = stripScheme(host)
defaults = append(defaults,
option.WithEndpoint(host),
option.WithGRPCDialOption(grpc.WithInsecure()),
option.WithoutAuthentication(),
)
}
return defaults
}
// grpcStorageClient is the gRPC API implementation of the transport-agnostic
// storageClient interface.
//
// This is an experimental API and not intended for public use.
type grpcStorageClient struct {
raw *gapic.Client
settings *settings
}
// newGRPCStorageClient initializes a new storageClient that uses the gRPC
// Storage API.
//
// This is an experimental API and not intended for public use.
func newGRPCStorageClient(ctx context.Context, opts ...storageOption) (storageClient, error) {
s := initSettings(opts...)
s.clientOption = append(defaultGRPCOptions(), s.clientOption...)
g, err := gapic.NewClient(ctx, s.clientOption...)
if err != nil {
return nil, err
}
return &grpcStorageClient{
raw: g,
settings: s,
}, nil
}
func (c *grpcStorageClient) Close() error {
return c.raw.Close()
}
// Top-level methods.
func (c *grpcStorageClient) GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) {
return "", errMethodNotSupported
}
func (c *grpcStorageClient) CreateBucket(ctx context.Context, project string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) {
s := callSettings(c.settings, opts...)
b := attrs.toProtoBucket()
// If there is lifecycle information but no location, explicitly set
// the location. This is a GCS quirk/bug.
if b.GetLocation() == "" && b.GetLifecycle() != nil {
b.Location = "US"
}
req := &storagepb.CreateBucketRequest{
Parent: toProjectResource(project),
Bucket: b,
BucketId: b.GetName(),
// TODO(noahdietz): This will be switched to a string.
//
// PredefinedAcl: attrs.PredefinedACL,
// PredefinedDefaultObjectAcl: attrs.PredefinedDefaultObjectACL,
}
var battrs *BucketAttrs
err := run(ctx, func() error {
res, err := c.raw.CreateBucket(ctx, req, s.gax...)
battrs = newBucketFromProto(res)
return err
}, s.retry, s.idempotent)
return battrs, err
}
func (c *grpcStorageClient) ListBuckets(ctx context.Context, project string, opts ...storageOption) (*BucketIterator, error) {
return nil, errMethodNotSupported
}
// Bucket methods.
func (c *grpcStorageClient) DeleteBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error {
s := callSettings(c.settings, opts...)
req := &storagepb.DeleteBucketRequest{
Name: bucketResourceName(globalProjectAlias, bucket),
}
if err := applyBucketCondsProto("grpcStorageClient.DeleteBucket", conds, req); err != nil {
return err
}
if s.userProject != "" {
req.CommonRequestParams = &storagepb.CommonRequestParams{
UserProject: toProjectResource(s.userProject),
}
}
return run(ctx, func() error {
return c.raw.DeleteBucket(ctx, req, s.gax...)
}, s.retry, s.idempotent)
}
func (c *grpcStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) {
s := callSettings(c.settings, opts...)
req := &storagepb.GetBucketRequest{
Name: bucketResourceName(globalProjectAlias, bucket),
}
if err := applyBucketCondsProto("grpcStorageClient.GetBucket", conds, req); err != nil {
return nil, err
}
if s.userProject != "" {
req.CommonRequestParams = &storagepb.CommonRequestParams{
UserProject: toProjectResource(s.userProject),
}
}
var battrs *BucketAttrs
err := run(ctx, func() error {
res, err := c.raw.GetBucket(ctx, req, s.gax...)
battrs = newBucketFromProto(res)
return err
}, s.retry, s.idempotent)
if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
return nil, ErrBucketNotExist
}
return battrs, err
}
func (c *grpcStorageClient) UpdateBucket(ctx context.Context, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) {
return nil, errMethodNotSupported
}
func (c *grpcStorageClient) LockBucketRetentionPolicy(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error {
return errMethodNotSupported
}
func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) (*ObjectIterator, error) {
return nil, errMethodNotSupported
}
// Object metadata methods.
func (c *grpcStorageClient) DeleteObject(ctx context.Context, bucket, object string, conds *Conditions, opts ...storageOption) error {
return errMethodNotSupported
}
func (c *grpcStorageClient) GetObject(ctx context.Context, bucket, object string, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) {
return nil, errMethodNotSupported
}
func (c *grpcStorageClient) UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) {
return nil, errMethodNotSupported
}
// Default Object ACL methods.
func (c *grpcStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error {
return errMethodNotSupported
}
func (c *grpcStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) {
return nil, errMethodNotSupported
}
func (c *grpcStorageClient) UpdateDefaultObjectACL(ctx context.Context, opts ...storageOption) (*ACLRule, error) {
return nil, errMethodNotSupported
}
// Bucket ACL methods.
func (c *grpcStorageClient) DeleteBucketACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error {
return errMethodNotSupported
}
func (c *grpcStorageClient) ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) {
return nil, errMethodNotSupported
}
func (c *grpcStorageClient) UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error) {
return nil, errMethodNotSupported
}
// Object ACL methods.
func (c *grpcStorageClient) DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error {
return errMethodNotSupported
}
func (c *grpcStorageClient) ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error) {
return nil, errMethodNotSupported
}
func (c *grpcStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error) {
return nil, errMethodNotSupported
}
// Media operations.
func (c *grpcStorageClient) ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error) {
return nil, errMethodNotSupported
}
func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error) {
return nil, errMethodNotSupported
}
func (c *grpcStorageClient) OpenReader(ctx context.Context, r *Reader, opts ...storageOption) error {
return errMethodNotSupported
}
func (c *grpcStorageClient) OpenWriter(ctx context.Context, w *Writer, opts ...storageOption) error {
return errMethodNotSupported
}
// IAM methods.
func (c *grpcStorageClient) GetIamPolicy(ctx context.Context, resource string, version int32, opts ...storageOption) (*iampb.Policy, error) {
// TODO: Need a way to set UserProject, potentially in X-Goog-User-Project system parameter.
s := callSettings(c.settings, opts...)
req := &iampb.GetIamPolicyRequest{
Resource: bucketResourceName(globalProjectAlias, resource),
Options: &iampb.GetPolicyOptions{
RequestedPolicyVersion: version,
},
}
var rp *iampb.Policy
err := run(ctx, func() error {
var err error
rp, err = c.raw.GetIamPolicy(ctx, req, s.gax...)
return err
}, s.retry, s.idempotent)
return rp, err
}
func (c *grpcStorageClient) SetIamPolicy(ctx context.Context, resource string, policy *iampb.Policy, opts ...storageOption) error {
// TODO: Need a way to set UserProject, potentially in X-Goog-User-Project system parameter.
s := callSettings(c.settings, opts...)
req := &iampb.SetIamPolicyRequest{
Resource: bucketResourceName(globalProjectAlias, resource),
Policy: policy,
}
return run(ctx, func() error {
_, err := c.raw.SetIamPolicy(ctx, req, s.gax...)
return err
}, s.retry, s.idempotent)
}
func (c *grpcStorageClient) TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) {
// TODO: Need a way to set UserProject, potentially in X-Goog-User-Project system parameter.
s := callSettings(c.settings, opts...)
req := &iampb.TestIamPermissionsRequest{
Resource: bucketResourceName(globalProjectAlias, resource),
Permissions: permissions,
}
var res *iampb.TestIamPermissionsResponse
err := run(ctx, func() error {
var err error
res, err = c.raw.TestIamPermissions(ctx, req, s.gax...)
return err
}, s.retry, s.idempotent)
if err != nil {
return nil, err
}
return res.Permissions, nil
}
// HMAC Key methods.
func (c *grpcStorageClient) GetHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) (*HMACKey, error) {
return nil, errMethodNotSupported
}
func (c *grpcStorageClient) ListHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) *HMACKeysIterator {
return &HMACKeysIterator{}
}
func (c *grpcStorageClient) UpdateHMACKey(ctx context.Context, desc *hmacKeyDesc, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) {
return nil, errMethodNotSupported
}
func (c *grpcStorageClient) CreateHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) (*HMACKey, error) {
return nil, errMethodNotSupported
}
func (c *grpcStorageClient) DeleteHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) error {
return errMethodNotSupported
}

369
vendor/cloud.google.com/go/storage/http_client.go generated vendored Normal file
View file

@ -0,0 +1,369 @@
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"errors"
"fmt"
"net/http"
"net/url"
"os"
"strings"
"golang.org/x/oauth2/google"
"google.golang.org/api/googleapi"
"google.golang.org/api/option"
"google.golang.org/api/option/internaloption"
raw "google.golang.org/api/storage/v1"
"google.golang.org/api/transport"
htransport "google.golang.org/api/transport/http"
iampb "google.golang.org/genproto/googleapis/iam/v1"
)
// httpStorageClient is the HTTP-JSON API implementation of the transport-agnostic
// storageClient interface.
//
// This is an experimental API and not intended for public use.
type httpStorageClient struct {
creds *google.Credentials
hc *http.Client
readHost string
raw *raw.Service
scheme string
settings *settings
}
// newHTTPStorageClient initializes a new storageClient that uses the HTTP-JSON
// Storage API.
//
// This is an experimental API and not intended for public use.
func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageClient, error) {
s := initSettings(opts...)
o := s.clientOption
var creds *google.Credentials
// In general, it is recommended to use raw.NewService instead of htransport.NewClient
// since raw.NewService configures the correct default endpoints when initializing the
// internal http client. However, in our case, "NewRangeReader" in reader.go needs to
// access the http client directly to make requests, so we create the client manually
// here so it can be re-used by both reader.go and raw.NewService. This means we need to
// manually configure the default endpoint options on the http client. Furthermore, we
// need to account for STORAGE_EMULATOR_HOST override when setting the default endpoints.
if host := os.Getenv("STORAGE_EMULATOR_HOST"); host == "" {
// Prepend default options to avoid overriding options passed by the user.
o = append([]option.ClientOption{option.WithScopes(ScopeFullControl, "https://www.googleapis.com/auth/cloud-platform"), option.WithUserAgent(userAgent)}, o...)
o = append(o, internaloption.WithDefaultEndpoint("https://storage.googleapis.com/storage/v1/"))
o = append(o, internaloption.WithDefaultMTLSEndpoint("https://storage.mtls.googleapis.com/storage/v1/"))
// Don't error out here. The user may have passed in their own HTTP
// client which does not auth with ADC or other common conventions.
c, err := transport.Creds(ctx, o...)
if err == nil {
creds = c
o = append(o, internaloption.WithCredentials(creds))
}
} else {
var hostURL *url.URL
if strings.Contains(host, "://") {
h, err := url.Parse(host)
if err != nil {
return nil, err
}
hostURL = h
} else {
// Add scheme for user if not supplied in STORAGE_EMULATOR_HOST
// URL is only parsed correctly if it has a scheme, so we build it ourselves
hostURL = &url.URL{Scheme: "http", Host: host}
}
hostURL.Path = "storage/v1/"
endpoint := hostURL.String()
// Append the emulator host as default endpoint for the user
o = append([]option.ClientOption{option.WithoutAuthentication()}, o...)
o = append(o, internaloption.WithDefaultEndpoint(endpoint))
o = append(o, internaloption.WithDefaultMTLSEndpoint(endpoint))
}
s.clientOption = o
// htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint.
hc, ep, err := htransport.NewClient(ctx, s.clientOption...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}
// RawService should be created with the chosen endpoint to take account of user override.
rawService, err := raw.NewService(ctx, option.WithEndpoint(ep), option.WithHTTPClient(hc))
if err != nil {
return nil, fmt.Errorf("storage client: %v", err)
}
// Update readHost and scheme with the chosen endpoint.
u, err := url.Parse(ep)
if err != nil {
return nil, fmt.Errorf("supplied endpoint %q is not valid: %v", ep, err)
}
return &httpStorageClient{
creds: creds,
hc: hc,
readHost: u.Host,
raw: rawService,
scheme: u.Scheme,
settings: s,
}, nil
}
func (c *httpStorageClient) Close() error {
c.hc.CloseIdleConnections()
return nil
}
// Top-level methods.
func (c *httpStorageClient) GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) {
return "", errMethodNotSupported
}
func (c *httpStorageClient) CreateBucket(ctx context.Context, project string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) {
s := callSettings(c.settings, opts...)
var bkt *raw.Bucket
if attrs != nil {
bkt = attrs.toRawBucket()
} else {
bkt = &raw.Bucket{}
}
// If there is lifecycle information but no location, explicitly set
// the location. This is a GCS quirk/bug.
if bkt.Location == "" && bkt.Lifecycle != nil {
bkt.Location = "US"
}
req := c.raw.Buckets.Insert(project, bkt)
setClientHeader(req.Header())
if attrs != nil && attrs.PredefinedACL != "" {
req.PredefinedAcl(attrs.PredefinedACL)
}
if attrs != nil && attrs.PredefinedDefaultObjectACL != "" {
req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL)
}
var battrs *BucketAttrs
err := run(ctx, func() error {
b, err := req.Context(ctx).Do()
if err != nil {
return err
}
battrs, err = newBucket(b)
return err
}, s.retry, s.idempotent)
return battrs, err
}
func (c *httpStorageClient) ListBuckets(ctx context.Context, project string, opts ...storageOption) (*BucketIterator, error) {
return nil, errMethodNotSupported
}
// Bucket methods.
func (c *httpStorageClient) DeleteBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error {
s := callSettings(c.settings, opts...)
req := c.raw.Buckets.Delete(bucket)
setClientHeader(req.Header())
if err := applyBucketConds("httpStorageClient.DeleteBucket", conds, req); err != nil {
return err
}
if s.userProject != "" {
req.UserProject(s.userProject)
}
return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent)
}
func (c *httpStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) {
s := callSettings(c.settings, opts...)
req := c.raw.Buckets.Get(bucket).Projection("full")
setClientHeader(req.Header())
err := applyBucketConds("httpStorageClient.GetBucket", conds, req)
if err != nil {
return nil, err
}
if s.userProject != "" {
req.UserProject(s.userProject)
}
var resp *raw.Bucket
err = run(ctx, func() error {
resp, err = req.Context(ctx).Do()
return err
}, s.retry, s.idempotent)
var e *googleapi.Error
if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound {
return nil, ErrBucketNotExist
}
if err != nil {
return nil, err
}
return newBucket(resp)
}
func (c *httpStorageClient) UpdateBucket(ctx context.Context, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) {
return nil, errMethodNotSupported
}
func (c *httpStorageClient) LockBucketRetentionPolicy(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error {
return errMethodNotSupported
}
func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) (*ObjectIterator, error) {
return nil, errMethodNotSupported
}
// Object metadata methods.
func (c *httpStorageClient) DeleteObject(ctx context.Context, bucket, object string, conds *Conditions, opts ...storageOption) error {
return errMethodNotSupported
}
func (c *httpStorageClient) GetObject(ctx context.Context, bucket, object string, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) {
return nil, errMethodNotSupported
}
func (c *httpStorageClient) UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) {
return nil, errMethodNotSupported
}
// Default Object ACL methods.
func (c *httpStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error {
return errMethodNotSupported
}
func (c *httpStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) {
return nil, errMethodNotSupported
}
func (c *httpStorageClient) UpdateDefaultObjectACL(ctx context.Context, opts ...storageOption) (*ACLRule, error) {
return nil, errMethodNotSupported
}
// Bucket ACL methods.
func (c *httpStorageClient) DeleteBucketACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error {
return errMethodNotSupported
}
func (c *httpStorageClient) ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) {
return nil, errMethodNotSupported
}
func (c *httpStorageClient) UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error) {
return nil, errMethodNotSupported
}
// Object ACL methods.
func (c *httpStorageClient) DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error {
return errMethodNotSupported
}
func (c *httpStorageClient) ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error) {
return nil, errMethodNotSupported
}
func (c *httpStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error) {
return nil, errMethodNotSupported
}
// Media operations.
func (c *httpStorageClient) ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error) {
return nil, errMethodNotSupported
}
func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error) {
return nil, errMethodNotSupported
}
func (c *httpStorageClient) OpenReader(ctx context.Context, r *Reader, opts ...storageOption) error {
return errMethodNotSupported
}
func (c *httpStorageClient) OpenWriter(ctx context.Context, w *Writer, opts ...storageOption) error {
return errMethodNotSupported
}
// IAM methods.
func (c *httpStorageClient) GetIamPolicy(ctx context.Context, resource string, version int32, opts ...storageOption) (*iampb.Policy, error) {
s := callSettings(c.settings, opts...)
call := c.raw.Buckets.GetIamPolicy(resource).OptionsRequestedPolicyVersion(int64(version))
setClientHeader(call.Header())
if s.userProject != "" {
call.UserProject(s.userProject)
}
var rp *raw.Policy
err := run(ctx, func() error {
var err error
rp, err = call.Context(ctx).Do()
return err
}, s.retry, s.idempotent)
if err != nil {
return nil, err
}
return iamFromStoragePolicy(rp), nil
}
func (c *httpStorageClient) SetIamPolicy(ctx context.Context, resource string, policy *iampb.Policy, opts ...storageOption) error {
s := callSettings(c.settings, opts...)
rp := iamToStoragePolicy(policy)
call := c.raw.Buckets.SetIamPolicy(resource, rp)
setClientHeader(call.Header())
if s.userProject != "" {
call.UserProject(s.userProject)
}
return run(ctx, func() error {
_, err := call.Context(ctx).Do()
return err
}, s.retry, s.idempotent)
}
func (c *httpStorageClient) TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) {
s := callSettings(c.settings, opts...)
call := c.raw.Buckets.TestIamPermissions(resource, permissions)
setClientHeader(call.Header())
if s.userProject != "" {
call.UserProject(s.userProject)
}
var res *raw.TestIamPermissionsResponse
err := run(ctx, func() error {
var err error
res, err = call.Context(ctx).Do()
return err
}, s.retry, s.idempotent)
if err != nil {
return nil, err
}
return res.Permissions, nil
}
// HMAC Key methods.
func (c *httpStorageClient) GetHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) (*HMACKey, error) {
return nil, errMethodNotSupported
}
func (c *httpStorageClient) ListHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) *HMACKeysIterator {
return &HMACKeysIterator{}
}
func (c *httpStorageClient) UpdateHMACKey(ctx context.Context, desc *hmacKeyDesc, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) {
return nil, errMethodNotSupported
}
func (c *httpStorageClient) CreateHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) (*HMACKey, error) {
return nil, errMethodNotSupported
}
func (c *httpStorageClient) DeleteHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) error {
return errMethodNotSupported
}

View file

@ -84,7 +84,14 @@ import (
type clientHookParams struct{}
type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
const versionClient = "20220216"
var versionClient string
func getVersionClient() string {
if versionClient == "" {
return "UNKNOWN"
}
return versionClient
}
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
out, _ := metadata.FromOutgoingContext(ctx)

View file

@ -19,7 +19,6 @@ package storage
import (
"context"
"math"
"time"
gax "github.com/googleapis/gax-go/v2"
"google.golang.org/api/iterator"
@ -29,7 +28,6 @@ import (
iampb "google.golang.org/genproto/googleapis/iam/v1"
storagepb "google.golang.org/genproto/googleapis/storage/v2"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/protobuf/proto"
)
@ -83,354 +81,35 @@ func defaultGRPCClientOptions() []option.ClientOption {
func defaultCallOptions() *CallOptions {
return &CallOptions{
DeleteBucket: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
GetBucket: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
CreateBucket: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
ListBuckets: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
LockBucketRetentionPolicy: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
GetIamPolicy: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
SetIamPolicy: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
TestIamPermissions: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
UpdateBucket: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
DeleteNotification: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
GetNotification: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
CreateNotification: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
ListNotifications: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
ComposeObject: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
DeleteObject: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
GetObject: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
ReadObject: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
UpdateObject: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
WriteObject: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
ListObjects: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
RewriteObject: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
StartResumableWrite: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
QueryWriteStatus: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
GetServiceAccount: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
CreateHmacKey: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
DeleteHmacKey: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
GetHmacKey: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
ListHmacKeys: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
UpdateHmacKey: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 1000 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 2.00,
})
}),
},
DeleteBucket: []gax.CallOption{},
GetBucket: []gax.CallOption{},
CreateBucket: []gax.CallOption{},
ListBuckets: []gax.CallOption{},
LockBucketRetentionPolicy: []gax.CallOption{},
GetIamPolicy: []gax.CallOption{},
SetIamPolicy: []gax.CallOption{},
TestIamPermissions: []gax.CallOption{},
UpdateBucket: []gax.CallOption{},
DeleteNotification: []gax.CallOption{},
GetNotification: []gax.CallOption{},
CreateNotification: []gax.CallOption{},
ListNotifications: []gax.CallOption{},
ComposeObject: []gax.CallOption{},
DeleteObject: []gax.CallOption{},
GetObject: []gax.CallOption{},
ReadObject: []gax.CallOption{},
UpdateObject: []gax.CallOption{},
WriteObject: []gax.CallOption{},
ListObjects: []gax.CallOption{},
RewriteObject: []gax.CallOption{},
StartResumableWrite: []gax.CallOption{},
QueryWriteStatus: []gax.CallOption{},
GetServiceAccount: []gax.CallOption{},
CreateHmacKey: []gax.CallOption{},
DeleteHmacKey: []gax.CallOption{},
GetHmacKey: []gax.CallOption{},
ListHmacKeys: []gax.CallOption{},
UpdateHmacKey: []gax.CallOption{},
}
}
@ -807,7 +486,7 @@ func (c *gRPCClient) Connection() *grpc.ClientConn {
// use by Google-written clients.
func (c *gRPCClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", versionGo()}, keyval...)
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
@ -818,11 +497,6 @@ func (c *gRPCClient) Close() error {
}
func (c *gRPCClient) DeleteBucket(ctx context.Context, req *storagepb.DeleteBucketRequest, opts ...gax.CallOption) error {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append((*c.CallOptions).DeleteBucket[0:len((*c.CallOptions).DeleteBucket):len((*c.CallOptions).DeleteBucket)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -834,11 +508,6 @@ func (c *gRPCClient) DeleteBucket(ctx context.Context, req *storagepb.DeleteBuck
}
func (c *gRPCClient) GetBucket(ctx context.Context, req *storagepb.GetBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append((*c.CallOptions).GetBucket[0:len((*c.CallOptions).GetBucket):len((*c.CallOptions).GetBucket)], opts...)
var resp *storagepb.Bucket
@ -854,11 +523,6 @@ func (c *gRPCClient) GetBucket(ctx context.Context, req *storagepb.GetBucketRequ
}
func (c *gRPCClient) CreateBucket(ctx context.Context, req *storagepb.CreateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append((*c.CallOptions).CreateBucket[0:len((*c.CallOptions).CreateBucket):len((*c.CallOptions).CreateBucket)], opts...)
var resp *storagepb.Bucket
@ -917,11 +581,6 @@ func (c *gRPCClient) ListBuckets(ctx context.Context, req *storagepb.ListBuckets
}
func (c *gRPCClient) LockBucketRetentionPolicy(ctx context.Context, req *storagepb.LockBucketRetentionPolicyRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append((*c.CallOptions).LockBucketRetentionPolicy[0:len((*c.CallOptions).LockBucketRetentionPolicy):len((*c.CallOptions).LockBucketRetentionPolicy)], opts...)
var resp *storagepb.Bucket
@ -937,11 +596,6 @@ func (c *gRPCClient) LockBucketRetentionPolicy(ctx context.Context, req *storage
}
func (c *gRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...)
var resp *iampb.Policy
@ -957,11 +611,6 @@ func (c *gRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRe
}
func (c *gRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...)
var resp *iampb.Policy
@ -977,11 +626,6 @@ func (c *gRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRe
}
func (c *gRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...)
var resp *iampb.TestIamPermissionsResponse
@ -997,11 +641,6 @@ func (c *gRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamP
}
func (c *gRPCClient) UpdateBucket(ctx context.Context, req *storagepb.UpdateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append((*c.CallOptions).UpdateBucket[0:len((*c.CallOptions).UpdateBucket):len((*c.CallOptions).UpdateBucket)], opts...)
var resp *storagepb.Bucket
@ -1017,11 +656,6 @@ func (c *gRPCClient) UpdateBucket(ctx context.Context, req *storagepb.UpdateBuck
}
func (c *gRPCClient) DeleteNotification(ctx context.Context, req *storagepb.DeleteNotificationRequest, opts ...gax.CallOption) error {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append((*c.CallOptions).DeleteNotification[0:len((*c.CallOptions).DeleteNotification):len((*c.CallOptions).DeleteNotification)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -1033,11 +667,6 @@ func (c *gRPCClient) DeleteNotification(ctx context.Context, req *storagepb.Dele
}
func (c *gRPCClient) GetNotification(ctx context.Context, req *storagepb.GetNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append((*c.CallOptions).GetNotification[0:len((*c.CallOptions).GetNotification):len((*c.CallOptions).GetNotification)], opts...)
var resp *storagepb.Notification
@ -1053,11 +682,6 @@ func (c *gRPCClient) GetNotification(ctx context.Context, req *storagepb.GetNoti
}
func (c *gRPCClient) CreateNotification(ctx context.Context, req *storagepb.CreateNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append((*c.CallOptions).CreateNotification[0:len((*c.CallOptions).CreateNotification):len((*c.CallOptions).CreateNotification)], opts...)
var resp *storagepb.Notification
@ -1116,11 +740,6 @@ func (c *gRPCClient) ListNotifications(ctx context.Context, req *storagepb.ListN
}
func (c *gRPCClient) ComposeObject(ctx context.Context, req *storagepb.ComposeObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append((*c.CallOptions).ComposeObject[0:len((*c.CallOptions).ComposeObject):len((*c.CallOptions).ComposeObject)], opts...)
var resp *storagepb.Object
@ -1136,11 +755,6 @@ func (c *gRPCClient) ComposeObject(ctx context.Context, req *storagepb.ComposeOb
}
func (c *gRPCClient) DeleteObject(ctx context.Context, req *storagepb.DeleteObjectRequest, opts ...gax.CallOption) error {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append((*c.CallOptions).DeleteObject[0:len((*c.CallOptions).DeleteObject):len((*c.CallOptions).DeleteObject)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -1152,11 +766,6 @@ func (c *gRPCClient) DeleteObject(ctx context.Context, req *storagepb.DeleteObje
}
func (c *gRPCClient) GetObject(ctx context.Context, req *storagepb.GetObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append((*c.CallOptions).GetObject[0:len((*c.CallOptions).GetObject):len((*c.CallOptions).GetObject)], opts...)
var resp *storagepb.Object
@ -1186,11 +795,6 @@ func (c *gRPCClient) ReadObject(ctx context.Context, req *storagepb.ReadObjectRe
}
func (c *gRPCClient) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append((*c.CallOptions).UpdateObject[0:len((*c.CallOptions).UpdateObject):len((*c.CallOptions).UpdateObject)], opts...)
var resp *storagepb.Object
@ -1264,11 +868,6 @@ func (c *gRPCClient) ListObjects(ctx context.Context, req *storagepb.ListObjects
}
func (c *gRPCClient) RewriteObject(ctx context.Context, req *storagepb.RewriteObjectRequest, opts ...gax.CallOption) (*storagepb.RewriteResponse, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append((*c.CallOptions).RewriteObject[0:len((*c.CallOptions).RewriteObject):len((*c.CallOptions).RewriteObject)], opts...)
var resp *storagepb.RewriteResponse
@ -1284,11 +883,6 @@ func (c *gRPCClient) RewriteObject(ctx context.Context, req *storagepb.RewriteOb
}
func (c *gRPCClient) StartResumableWrite(ctx context.Context, req *storagepb.StartResumableWriteRequest, opts ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append((*c.CallOptions).StartResumableWrite[0:len((*c.CallOptions).StartResumableWrite):len((*c.CallOptions).StartResumableWrite)], opts...)
var resp *storagepb.StartResumableWriteResponse
@ -1304,11 +898,6 @@ func (c *gRPCClient) StartResumableWrite(ctx context.Context, req *storagepb.Sta
}
func (c *gRPCClient) QueryWriteStatus(ctx context.Context, req *storagepb.QueryWriteStatusRequest, opts ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append((*c.CallOptions).QueryWriteStatus[0:len((*c.CallOptions).QueryWriteStatus):len((*c.CallOptions).QueryWriteStatus)], opts...)
var resp *storagepb.QueryWriteStatusResponse
@ -1324,11 +913,6 @@ func (c *gRPCClient) QueryWriteStatus(ctx context.Context, req *storagepb.QueryW
}
func (c *gRPCClient) GetServiceAccount(ctx context.Context, req *storagepb.GetServiceAccountRequest, opts ...gax.CallOption) (*storagepb.ServiceAccount, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append((*c.CallOptions).GetServiceAccount[0:len((*c.CallOptions).GetServiceAccount):len((*c.CallOptions).GetServiceAccount)], opts...)
var resp *storagepb.ServiceAccount
@ -1344,11 +928,6 @@ func (c *gRPCClient) GetServiceAccount(ctx context.Context, req *storagepb.GetSe
}
func (c *gRPCClient) CreateHmacKey(ctx context.Context, req *storagepb.CreateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append((*c.CallOptions).CreateHmacKey[0:len((*c.CallOptions).CreateHmacKey):len((*c.CallOptions).CreateHmacKey)], opts...)
var resp *storagepb.CreateHmacKeyResponse
@ -1364,11 +943,6 @@ func (c *gRPCClient) CreateHmacKey(ctx context.Context, req *storagepb.CreateHma
}
func (c *gRPCClient) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHmacKeyRequest, opts ...gax.CallOption) error {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append((*c.CallOptions).DeleteHmacKey[0:len((*c.CallOptions).DeleteHmacKey):len((*c.CallOptions).DeleteHmacKey)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -1380,11 +954,6 @@ func (c *gRPCClient) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHma
}
func (c *gRPCClient) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append((*c.CallOptions).GetHmacKey[0:len((*c.CallOptions).GetHmacKey):len((*c.CallOptions).GetHmacKey)], opts...)
var resp *storagepb.HmacKeyMetadata
@ -1443,11 +1012,6 @@ func (c *gRPCClient) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKe
}
func (c *gRPCClient) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) {
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
defer cancel()
ctx = cctx
}
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append((*c.CallOptions).UpdateHmacKey[0:len((*c.CallOptions).UpdateHmacKey):len((*c.CallOptions).UpdateHmacKey)], opts...)
var resp *storagepb.HmacKeyMetadata

View file

@ -0,0 +1,23 @@
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by gapicgen. DO NOT EDIT.
package storage
import "cloud.google.com/go/storage/internal"
func init() {
versionClient = internal.Version
}

View file

@ -15,4 +15,4 @@
package internal
// Version is the current tagged release of the library.
const Version = "1.21.0"
const Version = "1.22.0"

View file

@ -239,6 +239,8 @@ func conditionStatusCodeOnSuccess(statusCode int) PostPolicyV4Condition {
// GenerateSignedPostPolicyV4 generates a PostPolicyV4 value from bucket, object and opts.
// The generated URL and fields will then allow an unauthenticated client to perform multipart uploads.
// If initializing a Storage Client, instead use the Bucket.GenerateSignedPostPolicyV4
// method which uses the Client's credentials to handle authentication.
func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) {
if bucket == "" {
return nil, errors.New("storage: bucket must be non-empty")

View file

@ -0,0 +1,11 @@
{
"release-type": "go-yoshi",
"separate-pull-requests": true,
"include-component-in-tag": true,
"tag-separator": "/",
"packages": {
"storage": {
"component": "storage"
}
}
}

View file

@ -52,7 +52,6 @@ import (
"google.golang.org/api/transport"
htransport "google.golang.org/api/transport/http"
storagepb "google.golang.org/genproto/googleapis/storage/v2"
"google.golang.org/grpc"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/types/known/timestamppb"
@ -66,6 +65,9 @@ var (
ErrBucketNotExist = errors.New("storage: bucket doesn't exist")
// ErrObjectNotExist indicates that the object does not exist.
ErrObjectNotExist = errors.New("storage: object doesn't exist")
// errMethodNotSupported indicates that the method called is not currently supported by the client.
// TODO: Export this error when launching the transport-agnostic client.
errMethodNotSupported = errors.New("storage: method is not currently supported")
// errMethodNotValid indicates that given HTTP method is not valid.
errMethodNotValid = fmt.Errorf("storage: HTTP method should be one of %v", reflect.ValueOf(signedURLMethods).MapKeys())
)
@ -84,12 +86,6 @@ const (
// ScopeReadWrite grants permissions to manage your
// data in Google Cloud Storage.
ScopeReadWrite = raw.DevstorageReadWriteScope
// defaultConnPoolSize is the default number of connections
// to initialize in the GAPIC gRPC connection pool. A larger
// connection pool may be necessary for jobs that require
// high throughput and/or leverage many concurrent streams.
defaultConnPoolSize = 4
)
var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), internal.Version)
@ -220,27 +216,6 @@ func newHybridClient(ctx context.Context, opts *hybridClientOptions) (*Client, e
return nil, err
}
// Set emulator options for gRPC if an emulator was specified. Note that in a
// hybrid client, STORAGE_EMULATOR_HOST will set the host to use for HTTP and
// STORAGE_EMULATOR_HOST_GRPC will set the host to use for gRPC (when using a
// local emulator, HTTP and gRPC must use different ports, so this is
// necessary).
// TODO: when full gRPC client is available, remove STORAGE_EMULATOR_HOST_GRPC
// and use STORAGE_EMULATOR_HOST for both the HTTP and gRPC based clients.
if host := os.Getenv("STORAGE_EMULATOR_HOST_GRPC"); host != "" {
// Strip the scheme from the emulator host. WithEndpoint does not take a
// scheme for gRPC.
if strings.Contains(host, "://") {
host = strings.SplitN(host, "://", 2)[1]
}
opts.GRPCOpts = append(opts.GRPCOpts,
option.WithEndpoint(host),
option.WithGRPCDialOption(grpc.WithInsecure()),
option.WithoutAuthentication(),
)
}
g, err := gapic.NewClient(ctx, opts.GRPCOpts...)
if err != nil {
return nil, err
@ -250,14 +225,6 @@ func newHybridClient(ctx context.Context, opts *hybridClientOptions) (*Client, e
return c, nil
}
// defaultGRPCOptions returns a set of the default client options
// for gRPC client initialization.
func defaultGRPCOptions() []option.ClientOption {
return []option.ClientOption{
option.WithGRPCConnectionPool(defaultConnPoolSize),
}
}
// Close closes the Client.
//
// Close need not be called at program exit.
@ -309,10 +276,18 @@ type bucketBoundHostname struct {
}
func (s pathStyle) host(bucket string) string {
if host := os.Getenv("STORAGE_EMULATOR_HOST"); host != "" {
return stripScheme(host)
}
return "storage.googleapis.com"
}
func (s virtualHostedStyle) host(bucket string) string {
if host := os.Getenv("STORAGE_EMULATOR_HOST"); host != "" {
return bucket + "." + stripScheme(host)
}
return bucket + ".storage.googleapis.com"
}
@ -360,6 +335,14 @@ func BucketBoundHostname(hostname string) URLStyle {
return bucketBoundHostname{hostname: hostname}
}
// Strips the scheme from a host if it contains it
func stripScheme(host string) string {
if strings.Contains(host, "://") {
host = strings.SplitN(host, "://", 2)[1]
}
return host
}
// SignedURLOptions allows you to restrict the access to the signed URL.
type SignedURLOptions struct {
// GoogleAccessID represents the authorizer of the signed URL generation.
@ -586,6 +569,8 @@ func v4SanitizeHeaders(hdrs []string) []string {
// access to a restricted resource for a limited time without needing a
// Google account or signing in. For more information about signed URLs, see
// https://cloud.google.com/storage/docs/accesscontrol#signed_urls_query_string_authentication
// If initializing a Storage Client, instead use the Bucket.SignedURL method
// which uses the Client's credentials to handle authentication.
func SignedURL(bucket, object string, opts *SignedURLOptions) (string, error) {
now := utcNow()
if err := validateOptions(opts, now); err != nil {
@ -856,7 +841,7 @@ func signedURLV2(bucket, name string, opts *SignedURLOptions) (string, error) {
}
encoded := base64.StdEncoding.EncodeToString(b)
u.Scheme = "https"
u.Host = "storage.googleapis.com"
u.Host = PathStyle().host(bucket)
q := u.Query()
q.Set("GoogleAccessId", opts.GoogleAccessID)
q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix()))
@ -1326,6 +1311,9 @@ type ObjectAttrs struct {
// Composer. In those cases, if the SendCRC32C field in the Writer or Composer
// is set to is true, the uploaded data is rejected if its CRC32C hash does
// not match this field.
//
// Note: For a Writer, SendCRC32C must be set to true BEFORE the first call to
// Writer.Write() in order to send the checksum.
CRC32C uint32
// MediaLink is an URL to the object's content. This field is read-only.
@ -1485,7 +1473,7 @@ func newObjectFromProto(o *storagepb.Object) *ObjectAttrs {
EventBasedHold: o.GetEventBasedHold(),
TemporaryHold: o.TemporaryHold,
RetentionExpirationTime: convertProtoTime(o.GetRetentionExpireTime()),
ACL: fromProtoToObjectACLRules(o.GetAcl()),
ACL: toObjectACLRulesFromProto(o.GetAcl()),
Owner: o.GetOwner().GetEntity(),
ContentEncoding: o.ContentEncoding,
ContentDisposition: o.ContentDisposition,
@ -1588,6 +1576,14 @@ type Query struct {
// which returns all properties. Passing ProjectionNoACL will omit Owner and ACL,
// which may improve performance when listing many objects.
Projection Projection
// IncludeTrailingDelimiter controls how objects which end in a single
// instance of Delimiter (for example, if Query.Delimiter = "/" and the
// object name is "foo/bar/") are included in the results. By default, these
// objects only show up as prefixes. If IncludeTrailingDelimiter is set to
// true, they will also be included as objects and their metadata will be
// populated in the returned ObjectAttrs.
IncludeTrailingDelimiter bool
}
// attrToFieldMap maps the field names of ObjectAttrs to the underlying field
@ -2037,7 +2033,14 @@ func bucketResourceName(p, b string) string {
// parseBucketName strips the leading resource path segment and returns the
// bucket ID, which is the simple Bucket name typical of the v1 API.
func parseBucketName(b string) string {
return strings.TrimPrefix(b, "projects/_/buckets/")
sep := strings.LastIndex(b, "/")
return b[sep+1:]
}
// toProjectResource accepts a project ID and formats it as a Project resource
// name.
func toProjectResource(project string) string {
return fmt.Sprintf("projects/%s", project)
}
// setConditionProtoField uses protobuf reflection to set named condition field

View file

@ -48,11 +48,15 @@ type Writer struct {
// attributes are ignored.
ObjectAttrs
// SendCRC specifies whether to transmit a CRC32C field. It should be set
// SendCRC32C specifies whether to transmit a CRC32C field. It should be set
// to true in addition to setting the Writer's CRC32C field, because zero
// is a valid CRC and normally a zero would not be transmitted.
// If a CRC32C is sent, and the data written does not match the checksum,
// the write will be rejected.
//
// Note: SendCRC32C must be set to true BEFORE the first call to
// Writer.Write() in order to send the checksum. If it is set after that
// point, the checksum will be ignored.
SendCRC32C bool
// ChunkSize controls the maximum number of bytes of the object that the

View file

@ -306,9 +306,6 @@ func (p *parser) parseWithArgExpr() (*withArgExpr, error) {
return nil, fmt.Errorf(`withArgExpr: unexpected token %q; want "ident"`, p.lex.Token)
}
wa.Name = unescapeIdent(p.lex.Token)
if isAggrFunc(wa.Name) || IsRollupFunc(wa.Name) || IsTransformFunc(wa.Name) || isWith(wa.Name) {
return nil, fmt.Errorf(`withArgExpr: cannot use reserved name %q`, wa.Name)
}
if err := p.lex.Next(); err != nil {
return nil, err
}
@ -682,17 +679,21 @@ func expandWithExpr(was []*withArgExpr, e Expr) (Expr, error) {
return nil, err
}
wa := getWithArgExpr(was, t.Name)
if wa == nil {
fe := *t
fe.Args = args
return &fe, nil
if wa != nil {
return expandWithExprExt(was, wa, args)
}
return expandWithExprExt(was, wa, args)
fe := *t
fe.Args = args
return &fe, nil
case *AggrFuncExpr:
args, err := expandWithArgs(was, t.Args)
if err != nil {
return nil, err
}
wa := getWithArgExpr(was, t.Name)
if wa != nil {
return expandWithExprExt(was, wa, args)
}
modifierArgs, err := expandModifierArgs(was, t.Modifier.Args)
if err != nil {
return nil, err

View file

@ -18551,6 +18551,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
Region: "ap-northeast-3",
}: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@ -18560,6 +18563,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.43.34"
const SDKVersion = "1.43.37"

View file

@ -14,9 +14,9 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.27.1
// protoc-gen-go v1.28.0
// protoc v3.15.8
// source: error.proto
// source: apierror/internal/proto/error.proto
package jsonerror
@ -55,7 +55,7 @@ type Error struct {
func (x *Error) Reset() {
*x = Error{}
if protoimpl.UnsafeEnabled {
mi := &file_error_proto_msgTypes[0]
mi := &file_apierror_internal_proto_error_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -68,7 +68,7 @@ func (x *Error) String() string {
func (*Error) ProtoMessage() {}
func (x *Error) ProtoReflect() protoreflect.Message {
mi := &file_error_proto_msgTypes[0]
mi := &file_apierror_internal_proto_error_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -81,7 +81,7 @@ func (x *Error) ProtoReflect() protoreflect.Message {
// Deprecated: Use Error.ProtoReflect.Descriptor instead.
func (*Error) Descriptor() ([]byte, []int) {
return file_error_proto_rawDescGZIP(), []int{0}
return file_apierror_internal_proto_error_proto_rawDescGZIP(), []int{0}
}
func (x *Error) GetError() *Error_Status {
@ -112,7 +112,7 @@ type Error_Status struct {
func (x *Error_Status) Reset() {
*x = Error_Status{}
if protoimpl.UnsafeEnabled {
mi := &file_error_proto_msgTypes[1]
mi := &file_apierror_internal_proto_error_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -125,7 +125,7 @@ func (x *Error_Status) String() string {
func (*Error_Status) ProtoMessage() {}
func (x *Error_Status) ProtoReflect() protoreflect.Message {
mi := &file_error_proto_msgTypes[1]
mi := &file_apierror_internal_proto_error_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -138,7 +138,7 @@ func (x *Error_Status) ProtoReflect() protoreflect.Message {
// Deprecated: Use Error_Status.ProtoReflect.Descriptor instead.
func (*Error_Status) Descriptor() ([]byte, []int) {
return file_error_proto_rawDescGZIP(), []int{0, 0}
return file_apierror_internal_proto_error_proto_rawDescGZIP(), []int{0, 0}
}
func (x *Error_Status) GetCode() int32 {
@ -169,53 +169,55 @@ func (x *Error_Status) GetDetails() []*anypb.Any {
return nil
}
var File_error_proto protoreflect.FileDescriptor
var File_apierror_internal_proto_error_proto protoreflect.FileDescriptor
var file_error_proto_rawDesc = []byte{
0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65,
0x72, 0x72, 0x6f, 0x72, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc5, 0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72,
0x12, 0x29, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x13, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x53, 0x74,
0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x90, 0x01, 0x0a, 0x06,
0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01,
0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65,
0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x12, 0x28, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04,
0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70,
0x63, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2e,
0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32,
0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x43,
0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76,
0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72,
0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72,
0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
var file_apierror_internal_proto_error_proto_rawDesc = []byte{
0x0a, 0x23, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72,
0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x19, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e,
0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc5,
0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f,
0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e,
0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x65, 0x72,
0x72, 0x6f, 0x72, 0x1a, 0x90, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12,
0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f,
0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20,
0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x28, 0x0a, 0x06,
0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x06,
0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c,
0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64,
0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x43, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f,
0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72,
0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x33,
}
var (
file_error_proto_rawDescOnce sync.Once
file_error_proto_rawDescData = file_error_proto_rawDesc
file_apierror_internal_proto_error_proto_rawDescOnce sync.Once
file_apierror_internal_proto_error_proto_rawDescData = file_apierror_internal_proto_error_proto_rawDesc
)
func file_error_proto_rawDescGZIP() []byte {
file_error_proto_rawDescOnce.Do(func() {
file_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_error_proto_rawDescData)
func file_apierror_internal_proto_error_proto_rawDescGZIP() []byte {
file_apierror_internal_proto_error_proto_rawDescOnce.Do(func() {
file_apierror_internal_proto_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_apierror_internal_proto_error_proto_rawDescData)
})
return file_error_proto_rawDescData
return file_apierror_internal_proto_error_proto_rawDescData
}
var file_error_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_error_proto_goTypes = []interface{}{
var file_apierror_internal_proto_error_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_apierror_internal_proto_error_proto_goTypes = []interface{}{
(*Error)(nil), // 0: error.Error
(*Error_Status)(nil), // 1: error.Error.Status
(code.Code)(0), // 2: google.rpc.Code
(*anypb.Any)(nil), // 3: google.protobuf.Any
}
var file_error_proto_depIdxs = []int32{
var file_apierror_internal_proto_error_proto_depIdxs = []int32{
1, // 0: error.Error.error:type_name -> error.Error.Status
2, // 1: error.Error.Status.status:type_name -> google.rpc.Code
3, // 2: error.Error.Status.details:type_name -> google.protobuf.Any
@ -226,13 +228,13 @@ var file_error_proto_depIdxs = []int32{
0, // [0:3] is the sub-list for field type_name
}
func init() { file_error_proto_init() }
func file_error_proto_init() {
if File_error_proto != nil {
func init() { file_apierror_internal_proto_error_proto_init() }
func file_apierror_internal_proto_error_proto_init() {
if File_apierror_internal_proto_error_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
file_apierror_internal_proto_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Error); i {
case 0:
return &v.state
@ -244,7 +246,7 @@ func file_error_proto_init() {
return nil
}
}
file_error_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
file_apierror_internal_proto_error_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Error_Status); i {
case 0:
return &v.state
@ -261,18 +263,18 @@ func file_error_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_error_proto_rawDesc,
RawDescriptor: file_apierror_internal_proto_error_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_error_proto_goTypes,
DependencyIndexes: file_error_proto_depIdxs,
MessageInfos: file_error_proto_msgTypes,
GoTypes: file_apierror_internal_proto_error_proto_goTypes,
DependencyIndexes: file_apierror_internal_proto_error_proto_depIdxs,
MessageInfos: file_apierror_internal_proto_error_proto_msgTypes,
}.Build()
File_error_proto = out.File
file_error_proto_rawDesc = nil
file_error_proto_goTypes = nil
file_error_proto_depIdxs = nil
File_apierror_internal_proto_error_proto = out.File
file_apierror_internal_proto_error_proto_rawDesc = nil
file_apierror_internal_proto_error_proto_goTypes = nil
file_apierror_internal_proto_error_proto_depIdxs = nil
}

View file

@ -173,6 +173,21 @@ func (o grpcOpt) Resolve(s *CallSettings) {
s.GRPC = o
}
type pathOpt struct {
p string
}
func (p pathOpt) Resolve(s *CallSettings) {
s.Path = p.p
}
// WithPath applies a Path override to the HTTP-based APICall.
//
// This is for internal use only.
func WithPath(p string) CallOption {
return &pathOpt{p: p}
}
// WithGRPCOptions allows passing gRPC call options during client creation.
func WithGRPCOptions(opt ...grpc.CallOption) CallOption {
return grpcOpt(append([]grpc.CallOption(nil), opt...))
@ -186,4 +201,7 @@ type CallSettings struct {
// CallOptions to be forwarded to GRPC.
GRPC []grpc.CallOption
// Path is an HTTP override for an APICall.
Path string
}

View file

@ -36,4 +36,4 @@
package gax
// Version specifies the gax-go version being used.
const Version = "2.2.0"
const Version = "2.3.0"

202
vendor/github.com/googleapis/go-type-adapters/LICENSE generated vendored Normal file
View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -0,0 +1,81 @@
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package adapters
import (
"image/color"
"math"
cpb "google.golang.org/genproto/googleapis/type/color"
wpb "google.golang.org/protobuf/types/known/wrapperspb"
)
// ProtoColorToRGBA returns an RGBA based on the provided google.type.Color.
// If alpha is not set in the proto, full opacity is assumed.
//
// Note: Converting between a float using [0, 1] to an int using [0, 256)
// causes some cognitive dissonance between accuracy and user expectations.
// For example, most people writing CSS use 0x80 (decimal 128) to mean "half",
// but it is not actually half (it is slightly over). There is actually no
// way to precisely specify the 0.5 float value in a [0, 256) range of
// integers.
//
// This function uses math.Round to address this, meaning that 0.5 will be
// rounded up to 128 rather than rounded down to 127.
//
// Because of this fuzziness and precision loss, it is NOT guaranteed that
// ProtoColorToRGBA and RGBAToProtoColor are exact inverses, and both functions
// will lose precision.
func ProtoColorToRGBA(c *cpb.Color) *color.RGBA {
// Determine the appropriate alpha value.
// If alpha is unset, full opacity is the proper default.
alpha := uint8(255)
if c.Alpha != nil {
alpha = uint8(math.Round(float64(c.GetAlpha().GetValue() * 255)))
}
// Return the RGBA.
return &color.RGBA{
R: uint8(math.Round(float64(c.GetRed()) * 255)),
G: uint8(math.Round(float64(c.GetGreen()) * 255)),
B: uint8(math.Round(float64(c.GetBlue()) * 255)),
A: alpha,
}
}
// RGBAToProtoColor returns a google.type.Color based on the provided RGBA.
//
// Note: Converting between ints using [0, 256) and a float using [0, 1]
// causes some cognitive dissonance between accuracy and user expectations.
// For example, most people using CSS use 0x80 (decimal 128) to mean "half",
// but it is not actually half (it is slightly over). These is actually no
// way to precisely specify the 0.5 float value in a [0, 256) range of
// integers.
//
// This function addresses this by limiting decimal precision to 0.01, on
// the rationale that most precision beyond this point is probably
// unintentional.
//
// Because of this fuzziness and precision loss, it is NOT guaranteed that
// ProtoColorToRGBA and RGBAToProtoColor are exact inverses, and both functions
// will lose precision.
func RGBAToProtoColor(rgba *color.RGBA) *cpb.Color {
return &cpb.Color{
Red: float32(int(rgba.R)*100/255) / 100,
Green: float32(int(rgba.G)*100/255) / 100,
Blue: float32(int(rgba.B)*100/255) / 100,
Alpha: &wpb.FloatValue{Value: float32(int(rgba.A)*100/255) / 100},
}
}

View file

@ -0,0 +1,54 @@
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package adapters
import (
"time"
dpb "google.golang.org/genproto/googleapis/type/date"
)
// ProtoDateToLocalTime returns a new Time based on the google.type.Date, in
// the system's time zone.
//
// Hours, minues, seconds, and nanoseconds are set to 0.
func ProtoDateToLocalTime(d *dpb.Date) time.Time {
return ProtoDateToTime(d, time.Local)
}
// ProtoDateToUTCTime returns a new Time based on the google.type.Date, in UTC.
//
// Hours, minutes, seconds, and nanoseconds are set to 0.
func ProtoDateToUTCTime(d *dpb.Date) time.Time {
return ProtoDateToTime(d, time.UTC)
}
// ProtoDateToTime returns a new Time based on the google.type.Date and provided
// *time.Location.
//
// Hours, minutes, seconds, and nanoseconds are set to 0.
func ProtoDateToTime(d *dpb.Date, l *time.Location) time.Time {
return time.Date(int(d.GetYear()), time.Month(d.GetMonth()), int(d.GetDay()), 0, 0, 0, 0, l)
}
// TimeToProtoDate returns a new google.type.Date based on the provided time.Time.
// The location is ignored, as is anything more precise than the day.
func TimeToProtoDate(t time.Time) *dpb.Date {
return &dpb.Date{
Year: int32(t.Year()),
Month: int32(t.Month()),
Day: int32(t.Day()),
}
}

View file

@ -0,0 +1,93 @@
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package adapters
import (
"fmt"
"regexp"
"strconv"
"time"
dtpb "google.golang.org/genproto/googleapis/type/datetime"
durpb "google.golang.org/protobuf/types/known/durationpb"
)
// ProtoDateTimeToTime returns a new Time based on the google.type.DateTime.
//
// It errors if it gets invalid time zone information.
func ProtoDateTimeToTime(d *dtpb.DateTime) (time.Time, error) {
var err error
// Determine the location.
loc := time.UTC
if tz := d.GetTimeZone(); tz != nil {
loc, err = time.LoadLocation(tz.GetId())
if err != nil {
return time.Time{}, err
}
}
if offset := d.GetUtcOffset(); offset != nil {
hours := int(offset.GetSeconds()) / 3600
loc = time.FixedZone(fmt.Sprintf("UTC%+d", hours), hours)
}
// Return the Time.
return time.Date(
int(d.GetYear()),
time.Month(d.GetMonth()),
int(d.GetDay()),
int(d.GetHours()),
int(d.GetMinutes()),
int(d.GetSeconds()),
int(d.GetNanos()),
loc,
), nil
}
// TimeToProtoDateTime returns a new google.type.DateTime based on the
// provided time.Time.
//
// It errors if it gets invalid time zone information.
func TimeToProtoDateTime(t time.Time) (*dtpb.DateTime, error) {
dt := &dtpb.DateTime{
Year: int32(t.Year()),
Month: int32(t.Month()),
Day: int32(t.Day()),
Hours: int32(t.Hour()),
Minutes: int32(t.Minute()),
Seconds: int32(t.Second()),
Nanos: int32(t.Nanosecond()),
}
// If the location is a UTC offset, encode it as such in the proto.
loc := t.Location().String()
if match := offsetRegexp.FindStringSubmatch(loc); len(match) > 0 {
offsetInt, err := strconv.Atoi(match[1])
if err != nil {
return nil, err
}
dt.TimeOffset = &dtpb.DateTime_UtcOffset{
UtcOffset: &durpb.Duration{Seconds: int64(offsetInt) * 3600},
}
} else if loc != "" {
dt.TimeOffset = &dtpb.DateTime_TimeZone{
TimeZone: &dtpb.TimeZone{Id: loc},
}
}
return dt, nil
}
var offsetRegexp = regexp.MustCompile(`^UTC([+-][\d]{1,2})$`)

View file

@ -0,0 +1,68 @@
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package adapters
import (
"fmt"
"math"
"math/big"
"regexp"
"strings"
dpb "google.golang.org/genproto/googleapis/type/decimal"
)
// ProtoDecimalToFloat converts the provided google.type.Decimal to a big.Float.
func ProtoDecimalToFloat(d *dpb.Decimal) (*big.Float, error) {
value := strings.ToLower(d.GetValue())
// Determine the required precision.
v := value
if strings.ContainsRune(v, 'e') {
v = v[0:strings.IndexRune(v, 'e')]
}
v = nan.ReplaceAllLiteralString(v, "")
prec := uint(math.Pow(2, float64(len(v)+1)))
// Parse and return a big.Float.
f, _, err := big.ParseFloat(value, 10, prec, big.AwayFromZero)
return f, err
}
// ProtoDecimalToFloat64 converts the provided google.type.Decimal to a float64.
func ProtoDecimalToFloat64(d *dpb.Decimal) (float64, big.Accuracy, error) {
f, err := ProtoDecimalToFloat(d)
if err != nil {
return 0.0, big.Exact, err
}
f64, accuracy := f.Float64()
return f64, accuracy, nil
}
// Float64ToProtoDecimal converts the provided float64 to a google.type.Decimal.
func Float64ToProtoDecimal(f float64) *dpb.Decimal {
return &dpb.Decimal{
Value: fmt.Sprintf("%f", f),
}
}
// FloatToProtoDecimal converts the provided big.Float to a google.type.Decimal.
func FloatToProtoDecimal(f *big.Float) *dpb.Decimal {
return &dpb.Decimal{
Value: f.String(),
}
}
var nan = regexp.MustCompile(`[^\d]`)

View file

@ -0,0 +1,17 @@
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// package adapters provides helper functions for the google.type protobuf
// messages (Decimal, Fraction, etc.).
package adapters

View file

@ -0,0 +1,35 @@
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package adapters
import (
"math/big"
fpb "google.golang.org/genproto/googleapis/type/fraction"
)
// ProtoFractionToRat returns a math/big Rat (rational number) based on the given
// google.type.fraction.
func ProtoFractionToRat(f *fpb.Fraction) *big.Rat {
return big.NewRat(f.GetNumerator(), f.GetDenominator())
}
// RatToProtoFraction returns a google.type.Fraction from a math/big Rat.
func RatToProtoFraction(r *big.Rat) *fpb.Fraction {
return &fpb.Fraction{
Numerator: r.Num().Int64(),
Denominator: r.Denom().Int64(),
}
}

View file

@ -0,0 +1,31 @@
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package adapters
import (
"time"
mpb "google.golang.org/genproto/googleapis/type/month"
)
// ToMonth converts a google.type.Month to a golang Month.
func ToMonth(m mpb.Month) time.Month {
return time.Month(m.Number())
}
// ToProtoMonth converts a golang Month to a google.type.Month.
func ToProtoMonth(m time.Month) mpb.Month {
return mpb.Month(m)
}

View file

@ -21,9 +21,9 @@
// explicitly to each function that needs it. The Context should be the first
// parameter, typically named ctx:
//
// func DoSomething(ctx context.Context, arg Arg) error {
// // ... use ctx ...
// }
// func DoSomething(ctx context.Context, arg Arg) error {
// // ... use ctx ...
// }
//
// Do not pass a nil Context, even if a function permits it. Pass context.TODO
// if you are unsure about which Context to use.

View file

@ -54,11 +54,11 @@ func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete:
//
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
// defer cancel() // releases resources if slowOperation completes before timeout elapses
// return slowOperation(ctx)
// }
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
// defer cancel() // releases resources if slowOperation completes before timeout elapses
// return slowOperation(ctx)
// }
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return WithDeadline(parent, time.Now().Add(timeout))
}

View file

@ -264,11 +264,11 @@ func (c *timerCtx) cancel(removeFromParent bool, err error) {
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete:
//
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
// defer cancel() // releases resources if slowOperation completes before timeout elapses
// return slowOperation(ctx)
// }
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
// defer cancel() // releases resources if slowOperation completes before timeout elapses
// return slowOperation(ctx)
// }
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return WithDeadline(parent, time.Now().Add(timeout))
}

View file

@ -173,13 +173,15 @@ func tokenEqual(t1, t2 string) bool {
// isLWS reports whether b is linear white space, according
// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
// LWS = [CRLF] 1*( SP | HT )
//
// LWS = [CRLF] 1*( SP | HT )
func isLWS(b byte) bool { return b == ' ' || b == '\t' }
// isCTL reports whether b is a control byte, according
// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
// CTL = <any US-ASCII control character
// (octets 0 - 31) and DEL (127)>
//
// CTL = <any US-ASCII control character
// (octets 0 - 31) and DEL (127)>
func isCTL(b byte) bool {
const del = 0x7f // a CTL
return b < ' ' || b == del
@ -189,12 +191,13 @@ func isCTL(b byte) bool {
// HTTP/2 imposes the additional restriction that uppercase ASCII
// letters are not allowed.
//
// RFC 7230 says:
// header-field = field-name ":" OWS field-value OWS
// field-name = token
// token = 1*tchar
// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA
// RFC 7230 says:
//
// header-field = field-name ":" OWS field-value OWS
// field-name = token
// token = 1*tchar
// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA
func ValidHeaderFieldName(v string) bool {
if len(v) == 0 {
return false
@ -267,27 +270,28 @@ var validHostByte = [256]bool{
// ValidHeaderFieldValue reports whether v is a valid "field-value" according to
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 :
//
// message-header = field-name ":" [ field-value ]
// field-value = *( field-content | LWS )
// field-content = <the OCTETs making up the field-value
// and consisting of either *TEXT or combinations
// of token, separators, and quoted-string>
// message-header = field-name ":" [ field-value ]
// field-value = *( field-content | LWS )
// field-content = <the OCTETs making up the field-value
// and consisting of either *TEXT or combinations
// of token, separators, and quoted-string>
//
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 :
//
// TEXT = <any OCTET except CTLs,
// but including LWS>
// LWS = [CRLF] 1*( SP | HT )
// CTL = <any US-ASCII control character
// (octets 0 - 31) and DEL (127)>
// TEXT = <any OCTET except CTLs,
// but including LWS>
// LWS = [CRLF] 1*( SP | HT )
// CTL = <any US-ASCII control character
// (octets 0 - 31) and DEL (127)>
//
// RFC 7230 says:
// field-value = *( field-content / obs-fold )
// obj-fold = N/A to http2, and deprecated
// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
// field-vchar = VCHAR / obs-text
// obs-text = %x80-FF
// VCHAR = "any visible [USASCII] character"
//
// field-value = *( field-content / obs-fold )
// obj-fold = N/A to http2, and deprecated
// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
// field-vchar = VCHAR / obs-text
// obs-text = %x80-FF
// VCHAR = "any visible [USASCII] character"
//
// http2 further says: "Similarly, HTTP/2 allows header field values
// that are not valid. While most of the values that can be encoded

View file

@ -13,7 +13,6 @@
// See https://http2.github.io/ for more information on HTTP/2.
//
// See https://http2.golang.org/ for a test server running this code.
//
package http2 // import "golang.org/x/net/http2"
import (
@ -176,10 +175,11 @@ func (s SettingID) String() string {
// name (key). See httpguts.ValidHeaderName for the base rules.
//
// Further, http2 says:
// "Just as in HTTP/1.x, header field names are strings of ASCII
// characters that are compared in a case-insensitive
// fashion. However, header field names MUST be converted to
// lowercase prior to their encoding in HTTP/2. "
//
// "Just as in HTTP/1.x, header field names are strings of ASCII
// characters that are compared in a case-insensitive
// fashion. However, header field names MUST be converted to
// lowercase prior to their encoding in HTTP/2. "
func validWireHeaderFieldName(v string) bool {
if len(v) == 0 {
return false
@ -365,8 +365,8 @@ func (s *sorter) SortStrings(ss []string) {
// validPseudoPath reports whether v is a valid :path pseudo-header
// value. It must be either:
//
// *) a non-empty string starting with '/'
// *) the string '*', for OPTIONS requests.
// - a non-empty string starting with '/'
// - the string '*', for OPTIONS requests.
//
// For now this is only used a quick check for deciding when to clean
// up Opaque URLs before sending requests from the Transport.

View file

@ -2546,8 +2546,9 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
// prior to the headers being written. If the set of trailers is fixed
// or known before the header is written, the normal Go trailers mechanism
// is preferred:
// https://golang.org/pkg/net/http/#ResponseWriter
// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
//
// https://golang.org/pkg/net/http/#ResponseWriter
// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
const TrailerPrefix = "Trailer:"
// promoteUndeclaredTrailers permits http.Handlers to set trailers

View file

@ -17,23 +17,23 @@ package idna
//
// The per-rune values have the following format:
//
// if mapped {
// if inlinedXOR {
// 15..13 inline XOR marker
// 12..11 unused
// 10..3 inline XOR mask
// } else {
// 15..3 index into xor or mapping table
// }
// } else {
// 15..14 unused
// 13 mayNeedNorm
// 12..11 attributes
// 10..8 joining type
// 7..3 category type
// }
// 2 use xor pattern
// 1..0 mapped category
// if mapped {
// if inlinedXOR {
// 15..13 inline XOR marker
// 12..11 unused
// 10..3 inline XOR mask
// } else {
// 15..3 index into xor or mapping table
// }
// } else {
// 15..14 unused
// 13 mayNeedNorm
// 12..11 attributes
// 10..8 joining type
// 7..3 category type
// }
// 2 use xor pattern
// 1..0 mapped category
//
// See the definitions below for a more detailed description of the various
// bits.

View file

@ -94,20 +94,20 @@ func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSourc
// It looks for credentials in the following places,
// preferring the first location found:
//
// 1. A JSON file whose path is specified by the
// GOOGLE_APPLICATION_CREDENTIALS environment variable.
// For workload identity federation, refer to
// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation on
// how to generate the JSON configuration file for on-prem/non-Google cloud
// platforms.
// 2. A JSON file in a location known to the gcloud command-line tool.
// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json.
// On other systems, $HOME/.config/gcloud/application_default_credentials.json.
// 3. On Google App Engine standard first generation runtimes (<= Go 1.9) it uses
// the appengine.AccessToken function.
// 4. On Google Compute Engine, Google App Engine standard second generation runtimes
// (>= Go 1.11), and Google App Engine flexible environment, it fetches
// credentials from the metadata server.
// 1. A JSON file whose path is specified by the
// GOOGLE_APPLICATION_CREDENTIALS environment variable.
// For workload identity federation, refer to
// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation on
// how to generate the JSON configuration file for on-prem/non-Google cloud
// platforms.
// 2. A JSON file in a location known to the gcloud command-line tool.
// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json.
// On other systems, $HOME/.config/gcloud/application_default_credentials.json.
// 3. On Google App Engine standard first generation runtimes (<= Go 1.9) it uses
// the appengine.AccessToken function.
// 4. On Google Compute Engine, Google App Engine standard second generation runtimes
// (>= Go 1.11), and Google App Engine flexible environment, it fetches
// credentials from the metadata server.
func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsParams) (*Credentials, error) {
// Make defensive copy of the slices in params.
params = params.deepCopy()

View file

@ -15,14 +15,14 @@
// For more information on using workload identity federation, refer to
// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation.
//
// OAuth2 Configs
// # OAuth2 Configs
//
// Two functions in this package return golang.org/x/oauth2.Config values from Google credential
// data. Google supports two JSON formats for OAuth2 credentials: one is handled by ConfigFromJSON,
// the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or
// create an http.Client.
//
// Workload Identity Federation
// # Workload Identity Federation
//
// Using workload identity federation, your application can access Google Cloud
// resources from Amazon Web Services (AWS), Microsoft Azure or any identity
@ -36,9 +36,9 @@
// Follow the detailed instructions on how to configure Workload Identity Federation
// in various platforms:
//
// Amazon Web Services (AWS): https://cloud.google.com/iam/docs/access-resources-aws
// Microsoft Azure: https://cloud.google.com/iam/docs/access-resources-azure
// OIDC identity provider: https://cloud.google.com/iam/docs/access-resources-oidc
// Amazon Web Services (AWS): https://cloud.google.com/iam/docs/access-resources-aws
// Microsoft Azure: https://cloud.google.com/iam/docs/access-resources-azure
// OIDC identity provider: https://cloud.google.com/iam/docs/access-resources-oidc
//
// For OIDC providers, the library can retrieve OIDC tokens either from a
// local file location (file-sourced credentials) or from a local server
@ -51,8 +51,7 @@
// return the OIDC token. The response can be in plain text or JSON.
// Additional required request headers can also be specified.
//
//
// Credentials
// # Credentials
//
// The Credentials type represents Google credentials, including Application Default
// Credentials.

54
vendor/golang.org/x/sys/unix/asm_linux_loong64.s generated vendored Normal file
View file

@ -0,0 +1,54 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux && loong64 && gc
// +build linux
// +build loong64
// +build gc
#include "textflag.h"
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB)
TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
JAL runtime·entersyscall(SB)
MOVV a1+8(FP), R4
MOVV a2+16(FP), R5
MOVV a3+24(FP), R6
MOVV R0, R7
MOVV R0, R8
MOVV R0, R9
MOVV trap+0(FP), R11 // syscall entry
SYSCALL
MOVV R4, r1+32(FP)
MOVV R5, r2+40(FP)
JAL runtime·exitsyscall(SB)
RET
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB)
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
MOVV a1+8(FP), R4
MOVV a2+16(FP), R5
MOVV a3+24(FP), R6
MOVV R0, R7
MOVV R0, R8
MOVV R0, R9
MOVV trap+0(FP), R11 // syscall entry
SYSCALL
MOVV R4, r1+32(FP)
MOVV R5, r2+40(FP)
RET

View file

@ -215,6 +215,7 @@ struct ltchars {
#include <linux/ethtool_netlink.h>
#include <linux/falloc.h>
#include <linux/fanotify.h>
#include <linux/fib_rules.h>
#include <linux/filter.h>
#include <linux/fs.h>
#include <linux/fscrypt.h>
@ -613,6 +614,7 @@ ccflags="$@"
$2 ~ /^OTP/ ||
$2 ~ /^MEM/ ||
$2 ~ /^WG/ ||
$2 ~ /^FIB_RULE_/ ||
$2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)}
$2 ~ /^__WCOREFLAG$/ {next}
$2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)}

View file

@ -1829,6 +1829,9 @@ func Dup2(oldfd, newfd int) error {
//sys Fremovexattr(fd int, attr string) (err error)
//sys Fsetxattr(fd int, attr string, dest []byte, flags int) (err error)
//sys Fsync(fd int) (err error)
//sys Fsmount(fd int, flags int, mountAttrs int) (fsfd int, err error)
//sys Fsopen(fsName string, flags int) (fd int, err error)
//sys Fspick(dirfd int, pathName string, flags int) (fd int, err error)
//sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64
//sysnb Getpgid(pid int) (pgid int, err error)

View file

@ -873,6 +873,13 @@ const (
FD_CLOEXEC = 0x1
FD_SETSIZE = 0x400
FF0 = 0x0
FIB_RULE_DEV_DETACHED = 0x8
FIB_RULE_FIND_SADDR = 0x10000
FIB_RULE_IIF_DETACHED = 0x8
FIB_RULE_INVERT = 0x2
FIB_RULE_OIF_DETACHED = 0x10
FIB_RULE_PERMANENT = 0x1
FIB_RULE_UNRESOLVED = 0x4
FIDEDUPERANGE = 0xc0189436
FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8
FSCRYPT_KEY_DESC_PREFIX = "fscrypt:"

View file

@ -828,6 +828,49 @@ func Fsync(fd int) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fsmount(fd int, flags int, mountAttrs int) (fsfd int, err error) {
r0, _, e1 := Syscall(SYS_FSMOUNT, uintptr(fd), uintptr(flags), uintptr(mountAttrs))
fsfd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fsopen(fsName string, flags int) (fd int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(fsName)
if err != nil {
return
}
r0, _, e1 := Syscall(SYS_FSOPEN, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fspick(dirfd int, pathName string, flags int) (fd int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(pathName)
if err != nil {
return
}
r0, _, e1 := Syscall(SYS_FSPICK, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags))
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getdents(fd int, buf []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {

View file

@ -764,6 +764,15 @@ const (
MOVE_MOUNT_T_AUTOMOUNTS = 0x20
MOVE_MOUNT_T_EMPTY_PATH = 0x40
MOVE_MOUNT_SET_GROUP = 0x100
FSOPEN_CLOEXEC = 0x1
FSPICK_CLOEXEC = 0x1
FSPICK_SYMLINK_NOFOLLOW = 0x2
FSPICK_NO_AUTOMOUNT = 0x4
FSPICK_EMPTY_PATH = 0x8
FSMOUNT_CLOEXEC = 0x1
)
type OpenHow struct {
@ -5542,3 +5551,40 @@ const (
NL80211_WPA_VERSION_2 = 0x2
NL80211_WPA_VERSION_3 = 0x4
)
const (
FRA_UNSPEC = 0x0
FRA_DST = 0x1
FRA_SRC = 0x2
FRA_IIFNAME = 0x3
FRA_GOTO = 0x4
FRA_UNUSED2 = 0x5
FRA_PRIORITY = 0x6
FRA_UNUSED3 = 0x7
FRA_UNUSED4 = 0x8
FRA_UNUSED5 = 0x9
FRA_FWMARK = 0xa
FRA_FLOW = 0xb
FRA_TUN_ID = 0xc
FRA_SUPPRESS_IFGROUP = 0xd
FRA_SUPPRESS_PREFIXLEN = 0xe
FRA_TABLE = 0xf
FRA_FWMASK = 0x10
FRA_OIFNAME = 0x11
FRA_PAD = 0x12
FRA_L3MDEV = 0x13
FRA_UID_RANGE = 0x14
FRA_PROTOCOL = 0x15
FRA_IP_PROTO = 0x16
FRA_SPORT_RANGE = 0x17
FRA_DPORT_RANGE = 0x18
FR_ACT_UNSPEC = 0x0
FR_ACT_TO_TBL = 0x1
FR_ACT_GOTO = 0x2
FR_ACT_NOP = 0x3
FR_ACT_RES3 = 0x4
FR_ACT_RES4 = 0x5
FR_ACT_BLACKHOLE = 0x6
FR_ACT_UNREACHABLE = 0x7
FR_ACT_PROHIBIT = 0x8
)

Some files were not shown because too many files have changed in this diff Show more