Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files

This commit is contained in:
Aliaksandr Valialkin 2021-02-16 22:29:45 +02:00
commit 687eb4ab00
71 changed files with 1305 additions and 560 deletions

View file

@ -1424,7 +1424,7 @@ See also [high availability docs](#high-availability) and [backup docs](#backups
VictoriaMetrics supports backups via [vmbackup](https://victoriametrics.github.io/vmbackup.html) VictoriaMetrics supports backups via [vmbackup](https://victoriametrics.github.io/vmbackup.html)
and [vmrestore](https://victoriametrics.github.io/vmrestore.html) tools. and [vmrestore](https://victoriametrics.github.io/vmrestore.html) tools.
We also provide `vmbackuper` tool for paid enterprise subscribers - see [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/466) for details. We also provide `vmbackupmanager` tool for paid enterprise subscribers - see [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/466) for details.
## Profiling ## Profiling

View file

@ -19,7 +19,7 @@ Backed up data can be restored with [vmrestore](https://victoriametrics.github.i
See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883) for more details. See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883) for more details.
See also [vmbackuper](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/466) tool built on top of `vmbackup`. This tool simplifies See also [vmbackupmanager](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/466) tool built on top of `vmbackup`. This tool simplifies
creation of hourly, daily, weekly and monthly backups. creation of hourly, daily, weekly and monthly backups.
@ -89,7 +89,7 @@ or from any day (`YYYYMMDD` backups). Note that hourly backup shouldn't run when
Do not forget removing old snapshots and backups when they are no longer needed for saving storage costs. Do not forget removing old snapshots and backups when they are no longer needed for saving storage costs.
See also [vmbackuper tool](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/466) for automating smart backups. See also [vmbackupmanager tool](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/466) for automating smart backups.
## How does it work? ## How does it work?

View file

@ -91,6 +91,34 @@ type timeseriesWork struct {
rowsProcessed int rowsProcessed int
} }
func (tsw *timeseriesWork) reset() {
tsw.mustStop = 0
tsw.rss = nil
tsw.pts = nil
tsw.f = nil
if n := len(tsw.doneCh); n > 0 {
logger.Panicf("BUG: tsw.doneCh must be empty during reset; it contains %d items instead", n)
}
tsw.rowsProcessed = 0
}
func getTimeseriesWork() *timeseriesWork {
v := tswPool.Get()
if v == nil {
v = &timeseriesWork{
doneCh: make(chan error, 1),
}
}
return v.(*timeseriesWork)
}
func putTimeseriesWork(tsw *timeseriesWork) {
tsw.reset()
tswPool.Put(tsw)
}
var tswPool sync.Pool
func init() { func init() {
for i := 0; i < gomaxprocs; i++ { for i := 0; i < gomaxprocs; i++ {
go timeseriesWorker(uint(i)) go timeseriesWorker(uint(i))
@ -144,12 +172,10 @@ func (rss *Results) RunParallel(f func(rs *Result, workerID uint) error) error {
// Feed workers with work. // Feed workers with work.
tsws := make([]*timeseriesWork, len(rss.packedTimeseries)) tsws := make([]*timeseriesWork, len(rss.packedTimeseries))
for i := range rss.packedTimeseries { for i := range rss.packedTimeseries {
tsw := &timeseriesWork{ tsw := getTimeseriesWork()
rss: rss, tsw.rss = rss
pts: &rss.packedTimeseries[i], tsw.pts = &rss.packedTimeseries[i]
f: f, tsw.f = f
doneCh: make(chan error, 1),
}
timeseriesWorkCh <- tsw timeseriesWorkCh <- tsw
tsws[i] = tsw tsws[i] = tsw
} }
@ -160,7 +186,8 @@ func (rss *Results) RunParallel(f func(rs *Result, workerID uint) error) error {
var firstErr error var firstErr error
rowsProcessedTotal := 0 rowsProcessedTotal := 0
for _, tsw := range tsws { for _, tsw := range tsws {
if err := <-tsw.doneCh; err != nil && firstErr == nil { err := <-tsw.doneCh
if err != nil && firstErr == nil {
// Return just the first error, since other errors // Return just the first error, since other errors
// are likely duplicate the first error. // are likely duplicate the first error.
firstErr = err firstErr = err
@ -170,6 +197,7 @@ func (rss *Results) RunParallel(f func(rs *Result, workerID uint) error) error {
} }
} }
rowsProcessedTotal += tsw.rowsProcessed rowsProcessedTotal += tsw.rowsProcessed
putTimeseriesWork(tsw)
} }
perQueryRowsProcessed.Update(float64(rowsProcessedTotal)) perQueryRowsProcessed.Update(float64(rowsProcessedTotal))
@ -970,14 +998,15 @@ func ProcessSearchQuery(sq *storage.SearchQuery, fetchData bool, deadline search
return nil, fmt.Errorf("cannot write %d bytes to temporary file: %w", len(buf), err) return nil, fmt.Errorf("cannot write %d bytes to temporary file: %w", len(buf), err)
} }
metricName := sr.MetricBlockRef.MetricName metricName := sr.MetricBlockRef.MetricName
brs := m[string(metricName)] metricNameStrUnsafe := bytesutil.ToUnsafeString(metricName)
brs := m[metricNameStrUnsafe]
brs = append(brs, blockRef{ brs = append(brs, blockRef{
partRef: sr.MetricBlockRef.BlockRef.PartRef(), partRef: sr.MetricBlockRef.BlockRef.PartRef(),
addr: addr, addr: addr,
}) })
if len(brs) > 1 { if len(brs) > 1 {
// An optimization: do not allocate a string for already existing metricName key in m // An optimization: do not allocate a string for already existing metricName key in m
m[string(metricName)] = brs m[metricNameStrUnsafe] = brs
} else { } else {
// An optimization for big number of time series with long metricName values: // An optimization for big number of time series with long metricName values:
// use only a single copy of metricName for both orderedMetricNames and m. // use only a single copy of metricName for both orderedMetricNames and m.

View file

@ -3669,8 +3669,9 @@ func TestExecSuccess(t *testing.T) {
t.Run(`histogram(scalar)`, func(t *testing.T) { t.Run(`histogram(scalar)`, func(t *testing.T) {
t.Parallel() t.Parallel()
q := `sort(histogram(123)+( q := `sort(histogram(123)+(
label_set(0, "le", "1.0e2"), label_set(0, "le", "1.000e+02"),
label_set(0, "le", "1.5e2"), label_set(0, "le", "1.136e+02"),
label_set(0, "le", "1.292e+02"),
label_set(1, "le", "+Inf"), label_set(1, "le", "+Inf"),
))` ))`
r1 := netstorage.Result{ r1 := netstorage.Result{
@ -3681,7 +3682,7 @@ func TestExecSuccess(t *testing.T) {
r1.MetricName.Tags = []storage.Tag{ r1.MetricName.Tags = []storage.Tag{
{ {
Key: []byte("le"), Key: []byte("le"),
Value: []byte("1.0e2"), Value: []byte("1.136e+02"),
}, },
} }
r2 := netstorage.Result{ r2 := netstorage.Result{
@ -3692,7 +3693,7 @@ func TestExecSuccess(t *testing.T) {
r2.MetricName.Tags = []storage.Tag{ r2.MetricName.Tags = []storage.Tag{
{ {
Key: []byte("le"), Key: []byte("le"),
Value: []byte("1.5e2"), Value: []byte("1.292e+02"),
}, },
} }
r3 := netstorage.Result{ r3 := netstorage.Result{
@ -3716,9 +3717,9 @@ func TestExecSuccess(t *testing.T) {
label_set(1.1, "xx", "yy"), label_set(1.1, "xx", "yy"),
alias(1.15, "foobar"), alias(1.15, "foobar"),
))+( ))+(
label_set(0, "le", "9.5e-1"), label_set(0, "le", "8.799e-01"),
label_set(0, "le", "1.0e0"), label_set(0, "le", "1.000e+00"),
label_set(0, "le", "1.5e0"), label_set(0, "le", "1.292e+00"),
label_set(1, "le", "+Inf"), label_set(1, "le", "+Inf"),
))` ))`
r1 := netstorage.Result{ r1 := netstorage.Result{
@ -3729,7 +3730,7 @@ func TestExecSuccess(t *testing.T) {
r1.MetricName.Tags = []storage.Tag{ r1.MetricName.Tags = []storage.Tag{
{ {
Key: []byte("le"), Key: []byte("le"),
Value: []byte("9.5e-1"), Value: []byte("8.799e-01"),
}, },
} }
r2 := netstorage.Result{ r2 := netstorage.Result{
@ -3740,7 +3741,7 @@ func TestExecSuccess(t *testing.T) {
r2.MetricName.Tags = []storage.Tag{ r2.MetricName.Tags = []storage.Tag{
{ {
Key: []byte("le"), Key: []byte("le"),
Value: []byte("1.0e0"), Value: []byte("1.000e+00"),
}, },
} }
r3 := netstorage.Result{ r3 := netstorage.Result{
@ -3751,7 +3752,7 @@ func TestExecSuccess(t *testing.T) {
r3.MetricName.Tags = []storage.Tag{ r3.MetricName.Tags = []storage.Tag{
{ {
Key: []byte("le"), Key: []byte("le"),
Value: []byte("1.5e0"), Value: []byte("1.292e+00"),
}, },
} }
r4 := netstorage.Result{ r4 := netstorage.Result{
@ -4021,10 +4022,10 @@ func TestExecSuccess(t *testing.T) {
}) })
t.Run(`histogram_over_time`, func(t *testing.T) { t.Run(`histogram_over_time`, func(t *testing.T) {
t.Parallel() t.Parallel()
q := `sort(histogram_over_time(alias(label_set(rand(0)*1.3+1.1, "foo", "bar"), "xxx")[200s:5s]))` q := `sort_by_label(histogram_over_time(alias(label_set(rand(0)*1.3+1.1, "foo", "bar"), "xxx")[200s:5s]), "vmrange")`
r1 := netstorage.Result{ r1 := netstorage.Result{
MetricName: metricNameExpected, MetricName: metricNameExpected,
Values: []float64{14, 16, 12, 13, 15, 11}, Values: []float64{1, 2, 2, 2, nan, 1},
Timestamps: timestampsExpected, Timestamps: timestampsExpected,
} }
r1.MetricName.Tags = []storage.Tag{ r1.MetricName.Tags = []storage.Tag{
@ -4034,12 +4035,12 @@ func TestExecSuccess(t *testing.T) {
}, },
{ {
Key: []byte("vmrange"), Key: []byte("vmrange"),
Value: []byte("2.0e0...2.5e0"), Value: []byte("1.000e+00...1.136e+00"),
}, },
} }
r2 := netstorage.Result{ r2 := netstorage.Result{
MetricName: metricNameExpected, MetricName: metricNameExpected,
Values: []float64{13, 14, 12, 8, 12, 13}, Values: []float64{3, 3, 4, 2, 8, 3},
Timestamps: timestampsExpected, Timestamps: timestampsExpected,
} }
r2.MetricName.Tags = []storage.Tag{ r2.MetricName.Tags = []storage.Tag{
@ -4049,12 +4050,12 @@ func TestExecSuccess(t *testing.T) {
}, },
{ {
Key: []byte("vmrange"), Key: []byte("vmrange"),
Value: []byte("1.0e0...1.5e0"), Value: []byte("1.136e+00...1.292e+00"),
}, },
} }
r3 := netstorage.Result{ r3 := netstorage.Result{
MetricName: metricNameExpected, MetricName: metricNameExpected,
Values: []float64{13, 10, 16, 19, 13, 16}, Values: []float64{7, 7, 5, 3, 3, 9},
Timestamps: timestampsExpected, Timestamps: timestampsExpected,
} }
r3.MetricName.Tags = []storage.Tag{ r3.MetricName.Tags = []storage.Tag{
@ -4064,46 +4065,111 @@ func TestExecSuccess(t *testing.T) {
}, },
{ {
Key: []byte("vmrange"), Key: []byte("vmrange"),
Value: []byte("1.5e0...2.0e0"), Value: []byte("1.292e+00...1.468e+00"),
}, },
} }
resultExpected := []netstorage.Result{r1, r2, r3} r4 := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{7, 4, 6, 5, 6, 4},
Timestamps: timestampsExpected,
}
r4.MetricName.Tags = []storage.Tag{
{
Key: []byte("foo"),
Value: []byte("bar"),
},
{
Key: []byte("vmrange"),
Value: []byte("1.468e+00...1.668e+00"),
},
}
r5 := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{6, 6, 9, 13, 7, 7},
Timestamps: timestampsExpected,
}
r5.MetricName.Tags = []storage.Tag{
{
Key: []byte("foo"),
Value: []byte("bar"),
},
{
Key: []byte("vmrange"),
Value: []byte("1.668e+00...1.896e+00"),
},
}
r6 := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{5, 9, 4, 6, 7, 9},
Timestamps: timestampsExpected,
}
r6.MetricName.Tags = []storage.Tag{
{
Key: []byte("foo"),
Value: []byte("bar"),
},
{
Key: []byte("vmrange"),
Value: []byte("1.896e+00...2.154e+00"),
},
}
r7 := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{11, 9, 10, 9, 9, 7},
Timestamps: timestampsExpected,
}
r7.MetricName.Tags = []storage.Tag{
{
Key: []byte("foo"),
Value: []byte("bar"),
},
{
Key: []byte("vmrange"),
Value: []byte("2.154e+00...2.448e+00"),
},
}
resultExpected := []netstorage.Result{r1, r2, r3, r4, r5, r6, r7}
f(q, resultExpected) f(q, resultExpected)
}) })
t.Run(`sum(histogram_over_time) by (vmrange)`, func(t *testing.T) { t.Run(`sum(histogram_over_time) by (vmrange)`, func(t *testing.T) {
t.Parallel() t.Parallel()
q := `sort(sum(histogram_over_time(alias(label_set(rand(0)*1.3+1.1, "foo", "bar"), "xxx")[200s:5s])) by (vmrange))` q := `sort_desc(
buckets_limit(
3,
sum(histogram_over_time(alias(label_set(rand(0)*1.3+1.1, "foo", "bar"), "xxx")[200s:5s])) by (vmrange)
)
)`
r1 := netstorage.Result{ r1 := netstorage.Result{
MetricName: metricNameExpected, MetricName: metricNameExpected,
Values: []float64{14, 16, 12, 13, 15, 11}, Values: []float64{40, 40, 40, 40, 40, 40},
Timestamps: timestampsExpected, Timestamps: timestampsExpected,
} }
r1.MetricName.Tags = []storage.Tag{ r1.MetricName.Tags = []storage.Tag{
{ {
Key: []byte("vmrange"), Key: []byte("le"),
Value: []byte("2.0e0...2.5e0"), Value: []byte("+Inf"),
}, },
} }
r2 := netstorage.Result{ r2 := netstorage.Result{
MetricName: metricNameExpected, MetricName: metricNameExpected,
Values: []float64{13, 14, 12, 8, 12, 13}, Values: []float64{24, 22, 26, 25, 24, 24},
Timestamps: timestampsExpected, Timestamps: timestampsExpected,
} }
r2.MetricName.Tags = []storage.Tag{ r2.MetricName.Tags = []storage.Tag{
{ {
Key: []byte("vmrange"), Key: []byte("le"),
Value: []byte("1.0e0...1.5e0"), Value: []byte("1.896e+00"),
}, },
} }
r3 := netstorage.Result{ r3 := netstorage.Result{
MetricName: metricNameExpected, MetricName: metricNameExpected,
Values: []float64{13, 10, 16, 19, 13, 16}, Values: []float64{11, 12, 11, 7, 11, 13},
Timestamps: timestampsExpected, Timestamps: timestampsExpected,
} }
r3.MetricName.Tags = []storage.Tag{ r3.MetricName.Tags = []storage.Tag{
{ {
Key: []byte("vmrange"), Key: []byte("le"),
Value: []byte("1.5e0...2.0e0"), Value: []byte("1.468e+00"),
}, },
} }
resultExpected := []netstorage.Result{r1, r2, r3} resultExpected := []netstorage.Result{r1, r2, r3}
@ -4125,7 +4191,7 @@ func TestExecSuccess(t *testing.T) {
q := `topk_max(1, histogram_over_time(alias(label_set(rand(0)*1.3+1.1, "foo", "bar"), "xxx")[200s:5s]))` q := `topk_max(1, histogram_over_time(alias(label_set(rand(0)*1.3+1.1, "foo", "bar"), "xxx")[200s:5s]))`
r := netstorage.Result{ r := netstorage.Result{
MetricName: metricNameExpected, MetricName: metricNameExpected,
Values: []float64{13, 10, 16, 19, 13, 16}, Values: []float64{6, 6, 9, 13, 7, 7},
Timestamps: timestampsExpected, Timestamps: timestampsExpected,
} }
r.MetricName.Tags = []storage.Tag{ r.MetricName.Tags = []storage.Tag{
@ -4135,7 +4201,7 @@ func TestExecSuccess(t *testing.T) {
}, },
{ {
Key: []byte("vmrange"), Key: []byte("vmrange"),
Value: []byte("1.5e0...2.0e0"), Value: []byte("1.668e+00...1.896e+00"),
}, },
} }
resultExpected := []netstorage.Result{r} resultExpected := []netstorage.Result{r}

View file

@ -10,15 +10,17 @@ BASE_IMAGE := local/base:1.1.3-$(shell echo $(ROOT_IMAGE) | tr : _)-$(shell echo
package-base: package-base:
(docker image ls --format '{{.Repository}}:{{.Tag}}' | grep -q '$(BASE_IMAGE)$$') \ (docker image ls --format '{{.Repository}}:{{.Tag}}' | grep -q '$(BASE_IMAGE)$$') \
|| docker build -t $(BASE_IMAGE) \ || docker build \
--build-arg root_image=$(ROOT_IMAGE) \ --build-arg root_image=$(ROOT_IMAGE) \
--build-arg certs_image=$(CERTS_IMAGE) \ --build-arg certs_image=$(CERTS_IMAGE) \
--tag $(BASE_IMAGE) \
deployment/docker/base deployment/docker/base
package-builder: package-builder:
(docker image ls --format '{{.Repository}}:{{.Tag}}' | grep -q '$(BUILDER_IMAGE)$$') \ (docker image ls --format '{{.Repository}}:{{.Tag}}' | grep -q '$(BUILDER_IMAGE)$$') \
|| docker build -t $(BUILDER_IMAGE) \ || docker build \
--build-arg go_builder_image=$(GO_BUILDER_IMAGE) \ --build-arg go_builder_image=$(GO_BUILDER_IMAGE) \
--tag $(BUILDER_IMAGE) \
deployment/docker/builder deployment/docker/builder
app-via-docker: package-builder app-via-docker: package-builder
@ -43,7 +45,7 @@ package-via-docker: package-base
docker build \ docker build \
--build-arg src_binary=$(APP_NAME)$(APP_SUFFIX)-prod \ --build-arg src_binary=$(APP_NAME)$(APP_SUFFIX)-prod \
--build-arg base_image=$(BASE_IMAGE) \ --build-arg base_image=$(BASE_IMAGE) \
-t $(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)$(APP_SUFFIX)$(RACE) \ --tag $(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)$(APP_SUFFIX)$(RACE) \
-f app/$(APP_NAME)/deployment/Dockerfile bin) -f app/$(APP_NAME)/deployment/Dockerfile bin)
publish-via-docker: \ publish-via-docker: \
@ -57,7 +59,8 @@ publish-via-docker: \
--build-arg certs_image=$(CERTS_IMAGE) \ --build-arg certs_image=$(CERTS_IMAGE) \
--build-arg root_image=$(ROOT_IMAGE) \ --build-arg root_image=$(ROOT_IMAGE) \
--build-arg APP_NAME=$(APP_NAME) \ --build-arg APP_NAME=$(APP_NAME) \
-t $(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)$(RACE) \ --tag $(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)$(RACE) \
--tag $(DOCKER_NAMESPACE)/$(APP_NAME):latest$(RACE) \
-o type=image \ -o type=image \
-f app/$(APP_NAME)/multiarch/Dockerfile \ -f app/$(APP_NAME)/multiarch/Dockerfile \
--push \ --push \

View file

@ -10,6 +10,7 @@
* FEATURE: vmauth: add ability to route requests from a single user to multiple destinations depending on the requested paths. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1064 * FEATURE: vmauth: add ability to route requests from a single user to multiple destinations depending on the requested paths. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1064
* FEATURE: remove dependency on external programs such as `cat`, `grep` and `cut` when detecting cpu and memory limits inside Docker or LXC container. * FEATURE: remove dependency on external programs such as `cat`, `grep` and `cut` when detecting cpu and memory limits inside Docker or LXC container.
* FEATURE: vmagent: add `__meta_kubernetes_endpoints_label_*`, `__meta_kubernetes_endpoints_labelpresent_*`, `__meta_kubernetes_endpoints_annotation_*` and `__meta_kubernetes_endpoints_annotationpresent_*` labels for `role: endpoints` in Kubernetes service discovery. These labels where added in Prometheus 2.25. * FEATURE: vmagent: add `__meta_kubernetes_endpoints_label_*`, `__meta_kubernetes_endpoints_labelpresent_*`, `__meta_kubernetes_endpoints_annotation_*` and `__meta_kubernetes_endpoints_annotationpresent_*` labels for `role: endpoints` in Kubernetes service discovery. These labels where added in Prometheus 2.25.
* FEATURE: reduce the minimum supported retention period for inverted index (aka `indexdb`) from one month to one day. This should reduce disk space usage for `<-storageDataPath>/indexdb` folder if `-retentionPeriod` is set to values smaller than one month.
* BUGFIX: properly convert regexp tag filters containing escaped dots to non-regexp tag filters. For example, `{foo=~"bar\.baz"}` should be converted to `{foo="bar.baz"}`. Previously it was incorrectly converted to `{foo="bar\.baz"}`, which could result in missing time series for this tag filter. * BUGFIX: properly convert regexp tag filters containing escaped dots to non-regexp tag filters. For example, `{foo=~"bar\.baz"}` should be converted to `{foo="bar.baz"}`. Previously it was incorrectly converted to `{foo="bar\.baz"}`, which could result in missing time series for this tag filter.
* BUGFIX: do not spam error logs when discovering Docker Swarm targets without dedicated IP. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1028 . * BUGFIX: do not spam error logs when discovering Docker Swarm targets without dedicated IP. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1028 .

View file

@ -1424,7 +1424,7 @@ See also [high availability docs](#high-availability) and [backup docs](#backups
VictoriaMetrics supports backups via [vmbackup](https://victoriametrics.github.io/vmbackup.html) VictoriaMetrics supports backups via [vmbackup](https://victoriametrics.github.io/vmbackup.html)
and [vmrestore](https://victoriametrics.github.io/vmrestore.html) tools. and [vmrestore](https://victoriametrics.github.io/vmrestore.html) tools.
We also provide `vmbackuper` tool for paid enterprise subscribers - see [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/466) for details. We also provide `vmbackupmanager` tool for paid enterprise subscribers - see [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/466) for details.
## Profiling ## Profiling

View file

@ -19,7 +19,7 @@ Backed up data can be restored with [vmrestore](https://victoriametrics.github.i
See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883) for more details. See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883) for more details.
See also [vmbackuper](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/466) tool built on top of `vmbackup`. This tool simplifies See also [vmbackupmanager](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/466) tool built on top of `vmbackup`. This tool simplifies
creation of hourly, daily, weekly and monthly backups. creation of hourly, daily, weekly and monthly backups.
@ -89,7 +89,7 @@ or from any day (`YYYYMMDD` backups). Note that hourly backup shouldn't run when
Do not forget removing old snapshots and backups when they are no longer needed for saving storage costs. Do not forget removing old snapshots and backups when they are no longer needed for saving storage costs.
See also [vmbackuper tool](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/466) for automating smart backups. See also [vmbackupmanager tool](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/466) for automating smart backups.
## How does it work? ## How does it work?

19
go.mod
View file

@ -1,18 +1,18 @@
module github.com/VictoriaMetrics/VictoriaMetrics module github.com/VictoriaMetrics/VictoriaMetrics
require ( require (
cloud.google.com/go v0.76.0 // indirect cloud.google.com/go v0.77.0 // indirect
cloud.google.com/go/storage v1.13.0 cloud.google.com/go/storage v1.13.0
github.com/VictoriaMetrics/fastcache v1.5.7 github.com/VictoriaMetrics/fastcache v1.5.7
// Do not use the original github.com/valyala/fasthttp because of issues // Do not use the original github.com/valyala/fasthttp because of issues
// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b // like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
github.com/VictoriaMetrics/fasthttp v1.0.12 github.com/VictoriaMetrics/fasthttp v1.0.12
github.com/VictoriaMetrics/metrics v1.13.1 github.com/VictoriaMetrics/metrics v1.14.0
github.com/VictoriaMetrics/metricsql v0.10.0 github.com/VictoriaMetrics/metricsql v0.10.1
github.com/aws/aws-sdk-go v1.37.7 github.com/aws/aws-sdk-go v1.37.12
github.com/cespare/xxhash/v2 v2.1.1 github.com/cespare/xxhash/v2 v2.1.1
github.com/cheggaaa/pb/v3 v3.0.5 github.com/cheggaaa/pb/v3 v3.0.6
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
github.com/fatih/color v1.10.0 // indirect github.com/fatih/color v1.10.0 // indirect
github.com/go-kit/kit v0.10.0 github.com/go-kit/kit v0.10.0
@ -22,7 +22,7 @@ require (
github.com/mattn/go-runewidth v0.0.10 // indirect github.com/mattn/go-runewidth v0.0.10 // indirect
github.com/oklog/ulid v1.3.1 github.com/oklog/ulid v1.3.1
github.com/prometheus/client_golang v1.9.0 // indirect github.com/prometheus/client_golang v1.9.0 // indirect
github.com/prometheus/procfs v0.4.1 // indirect github.com/prometheus/procfs v0.6.0 // indirect
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9 github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
github.com/rivo/uniseg v0.2.0 // indirect github.com/rivo/uniseg v0.2.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect
@ -34,10 +34,9 @@ require (
github.com/valyala/histogram v1.1.2 github.com/valyala/histogram v1.1.2
github.com/valyala/quicktemplate v1.6.3 github.com/valyala/quicktemplate v1.6.3
go.opencensus.io v0.22.6 // indirect go.opencensus.io v0.22.6 // indirect
golang.org/x/oauth2 v0.0.0-20210201163806-010130855d6c golang.org/x/oauth2 v0.0.0-20210216194517-16ff1888fd2e
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c golang.org/x/sys v0.0.0-20210216163648-f7da38b97c65
google.golang.org/api v0.39.0 google.golang.org/api v0.40.0
google.golang.org/genproto v0.0.0-20210207032614-bba0dbe2a9ea // indirect
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
) )

40
go.sum
View file

@ -18,8 +18,8 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
cloud.google.com/go v0.76.0 h1:Ckw+E/QYZgd/5bpI4wz4h6f+jmpvh9S9uSrKNnbicJI= cloud.google.com/go v0.77.0 h1:qA5V5+uQf6Mgr+tmFI8UT3D/ELyhIYkPwNGao/3Y+sQ=
cloud.google.com/go v0.76.0/go.mod h1:r9EvIAvLrunusnetGdQ50M/gKui1x3zdGW/VELGkdpw= cloud.google.com/go v0.77.0/go.mod h1:R8fYSLIilC247Iu8WS2OGHw1E/Ufn7Pd7HiDjTqiURs=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
@ -85,10 +85,10 @@ github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6Ro
github.com/VictoriaMetrics/fasthttp v1.0.12 h1:Ag0E119yrH4BTxVyjKD9TeiSImtG9bUcg/stItLJhSE= github.com/VictoriaMetrics/fasthttp v1.0.12 h1:Ag0E119yrH4BTxVyjKD9TeiSImtG9bUcg/stItLJhSE=
github.com/VictoriaMetrics/fasthttp v1.0.12/go.mod h1:3SeUL4zwB/p/a9aEeRc6gdlbrtNHXBJR6N376EgiSHU= github.com/VictoriaMetrics/fasthttp v1.0.12/go.mod h1:3SeUL4zwB/p/a9aEeRc6gdlbrtNHXBJR6N376EgiSHU=
github.com/VictoriaMetrics/metrics v1.12.2/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE= github.com/VictoriaMetrics/metrics v1.12.2/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE=
github.com/VictoriaMetrics/metrics v1.13.1 h1:1S9QrbXLPrcDBYLiDNIqWk9AC/lk5Ptk8eIjDIFFDsQ= github.com/VictoriaMetrics/metrics v1.14.0 h1:yvyEVo7cPN2Hv+Hrm1zPTA1f/squmEZTq6xtPH/8F64=
github.com/VictoriaMetrics/metrics v1.13.1/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE= github.com/VictoriaMetrics/metrics v1.14.0/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE=
github.com/VictoriaMetrics/metricsql v0.10.0 h1:45BARAP2shaL/5p67Hvz+YrWUbr0X0VCy9t+gvdIm8o= github.com/VictoriaMetrics/metricsql v0.10.1 h1:wLl/YbMmBGFPyLKMfqNLC333iygibosSM5iSvlH2B4A=
github.com/VictoriaMetrics/metricsql v0.10.0/go.mod h1:ylO7YITho/Iw6P71oEaGyHbO94bGoGtzWfLGqFhMIg8= github.com/VictoriaMetrics/metricsql v0.10.1/go.mod h1:ylO7YITho/Iw6P71oEaGyHbO94bGoGtzWfLGqFhMIg8=
github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM= github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
@ -123,8 +123,8 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.37.7 h1:vfald/ssuWaA2HgJ9DrieVVXVE9eD0Kly/9kl0hofbE= github.com/aws/aws-sdk-go v1.37.12 h1:rPdjZTlzHn+sbLEO+i535g+WpGf7QBDLYI7rDok+FHo=
github.com/aws/aws-sdk-go v1.37.7/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.37.12/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@ -142,8 +142,8 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheggaaa/pb/v3 v3.0.5 h1:lmZOti7CraK9RSjzExsY53+WWfub9Qv13B5m4ptEoPE= github.com/cheggaaa/pb/v3 v3.0.6 h1:ULPm1wpzvj60FvmCrX7bIaB80UgbhI+zSaQJKRfCbAs=
github.com/cheggaaa/pb/v3 v3.0.5/go.mod h1:X1L61/+36nz9bjIsrDU52qHKOQukUQe2Ge+YvGuquCw= github.com/cheggaaa/pb/v3 v3.0.6/go.mod h1:X1L61/+36nz9bjIsrDU52qHKOQukUQe2Ge+YvGuquCw=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@ -680,8 +680,8 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx
github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.4.1 h1:a4oCTNJdGpE6eD4j1mypyS2ZXLFXo8wEVgUabL47Xr0= github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
github.com/prometheus/procfs v0.4.1/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9 h1:F2A86PGVYqn3P7oWbrSmSlJHae9y6wwpAdoWb/pZi6Q= github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9 h1:F2A86PGVYqn3P7oWbrSmSlJHae9y6wwpAdoWb/pZi6Q=
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9/go.mod h1:1MDE/bXgu4gqd5w/otko6WQpXZX9vu8QX4KbitCmaPg= github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9/go.mod h1:1MDE/bXgu4gqd5w/otko6WQpXZX9vu8QX4KbitCmaPg=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
@ -920,8 +920,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210113205817-d3ed898aa8a3/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210113205817-d3ed898aa8a3/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210201163806-010130855d6c h1:HiAZXo96zOhVhtFHchj/ojzoxCFiPrp9/j0GtS38V3g= golang.org/x/oauth2 v0.0.0-20210216194517-16ff1888fd2e h1:xxTKAjlluPXFVQnUNoBO7OvmNNE/RpmyUeLVFSYiQQ0=
golang.org/x/oauth2 v0.0.0-20210201163806-010130855d6c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210216194517-16ff1888fd2e/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -998,8 +998,9 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210216163648-f7da38b97c65 h1:pTMjDVnP5eVRRlWO76rEWJ8JoC6Lf1CmyjPZXRiy2Sw=
golang.org/x/sys v0.0.0-20210216163648-f7da38b97c65/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1116,8 +1117,8 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
google.golang.org/api v0.38.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.38.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/api v0.39.0 h1:zHCTXf0NeDdKTgcSQpT+ZflWAqHsEp1GmdpxW09f3YM= google.golang.org/api v0.40.0 h1:uWrpz12dpVPn7cojP82mk02XDgTJLDPc2KbVTxrWb4A=
google.golang.org/api v0.39.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -1166,10 +1167,9 @@ google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210202153253-cf70463f6119/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210203152818-3206188e46ba/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210203152818-3206188e46ba/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210207032614-bba0dbe2a9ea h1:N98SvVh7Hdle2lgUVFuIkf0B3u29CUakMUQa7Hwz8Wc= google.golang.org/genproto v0.0.0-20210212180131-e7f2df4ecc2d h1:Edhcm0CKDPLQIecHCp5Iz57Lo7MfT6zUFBAlocmOjcY=
google.golang.org/genproto v0.0.0-20210207032614-bba0dbe2a9ea/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210212180131-e7f2df4ecc2d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=

View file

@ -23,6 +23,9 @@ func TestDurationSetFailure(t *testing.T) {
// Too big value in months // Too big value in months
f("12345") f("12345")
// Too big duration
f("100000000000y")
// Negative duration // Negative duration
f("-1") f("-1")
f("-34h") f("-34h")

View file

@ -6,9 +6,12 @@ import (
"io" "io"
"regexp" "regexp"
"strings" "strings"
"sync"
"sync/atomic"
"time" "time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo" "github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory" "github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
"github.com/VictoriaMetrics/metrics" "github.com/VictoriaMetrics/metrics"
@ -18,6 +21,27 @@ var versionRe = regexp.MustCompile(`v\d+\.\d+\.\d+`)
// WritePrometheusMetrics writes all the registered metrics to w in Prometheus exposition format. // WritePrometheusMetrics writes all the registered metrics to w in Prometheus exposition format.
func WritePrometheusMetrics(w io.Writer) { func WritePrometheusMetrics(w io.Writer) {
currentTime := time.Now()
metricsCacheLock.Lock()
if currentTime.Sub(metricsCacheLastUpdateTime) > time.Second {
var bb bytesutil.ByteBuffer
writePrometheusMetrics(&bb)
metricsCache.Store(&bb)
metricsCacheLastUpdateTime = currentTime
}
metricsCacheLock.Unlock()
bb := metricsCache.Load().(*bytesutil.ByteBuffer)
_, _ = w.Write(bb.B)
}
var (
metricsCacheLock sync.Mutex
metricsCacheLastUpdateTime time.Time
metricsCache atomic.Value
)
func writePrometheusMetrics(w io.Writer) {
metrics.WritePrometheus(w, true) metrics.WritePrometheus(w, true)
metrics.WriteFDMetrics(w) metrics.WriteFDMetrics(w)

View file

@ -195,7 +195,7 @@ func (idxbc *indexBlockCache) MustClose() {
close(idxbc.cleanerStopCh) close(idxbc.cleanerStopCh)
idxbc.cleanerWG.Wait() idxbc.cleanerWG.Wait()
// It is safe returning idxbc.m to pool, since the Reset must be called // It is safe returning idxbc.m to pool, since the MustClose can be called
// when the idxbc entries are no longer accessed by concurrent goroutines. // when the idxbc entries are no longer accessed by concurrent goroutines.
for _, idxbe := range idxbc.m { for _, idxbe := range idxbc.m {
putIndexBlock(idxbe.idxb) putIndexBlock(idxbe.idxb)
@ -221,8 +221,10 @@ func (idxbc *indexBlockCache) cleanByTimeout() {
currentTime := fasttime.UnixTimestamp() currentTime := fasttime.UnixTimestamp()
idxbc.mu.Lock() idxbc.mu.Lock()
for k, idxbe := range idxbc.m { for k, idxbe := range idxbc.m {
// Delete items accessed more than two minutes ago. // Delete items accessed more than 90 seconds ago.
if currentTime-atomic.LoadUint64(&idxbe.lastAccessTime) > 2*60 { if currentTime-atomic.LoadUint64(&idxbe.lastAccessTime) > 90 {
// do not call putIndexBlock(ibxbc.m[k]), since it
// may be used by concurrent goroutines.
delete(idxbc.m, k) delete(idxbc.m, k)
} }
} }
@ -255,8 +257,8 @@ func (idxbc *indexBlockCache) Put(k uint64, idxb *indexBlock) {
// Remove 10% of items from the cache. // Remove 10% of items from the cache.
overflow = int(float64(len(idxbc.m)) * 0.1) overflow = int(float64(len(idxbc.m)) * 0.1)
for k := range idxbc.m { for k := range idxbc.m {
// Do not return idxb to pool, since these entries may be used // do not call putIndexBlock(ibxbc.m[k]), since it
// by concurrent goroutines. // may be used by concurrent goroutines.
delete(idxbc.m, k) delete(idxbc.m, k)
overflow-- overflow--
if overflow == 0 { if overflow == 0 {
@ -347,7 +349,7 @@ func (ibc *inmemoryBlockCache) MustClose() {
close(ibc.cleanerStopCh) close(ibc.cleanerStopCh)
ibc.cleanerWG.Wait() ibc.cleanerWG.Wait()
// It is safe returning ibc.m entries to pool, since the Reset function may be called // It is safe returning ibc.m entries to pool, since the MustClose can be called
// only if no other goroutines access ibc entries. // only if no other goroutines access ibc entries.
for _, ibe := range ibc.m { for _, ibe := range ibc.m {
putInmemoryBlock(ibe.ib) putInmemoryBlock(ibe.ib)
@ -373,8 +375,8 @@ func (ibc *inmemoryBlockCache) cleanByTimeout() {
currentTime := fasttime.UnixTimestamp() currentTime := fasttime.UnixTimestamp()
ibc.mu.Lock() ibc.mu.Lock()
for k, ibe := range ibc.m { for k, ibe := range ibc.m {
// Delete items accessed more than a two minutes ago. // Delete items accessed more than 90 seconds ago.
if currentTime-atomic.LoadUint64(&ibe.lastAccessTime) > 2*60 { if currentTime-atomic.LoadUint64(&ibe.lastAccessTime) > 90 {
// do not call putInmemoryBlock(ibc.m[k]), since it // do not call putInmemoryBlock(ibc.m[k]), since it
// may be used by concurrent goroutines. // may be used by concurrent goroutines.
delete(ibc.m, k) delete(ibc.m, k)

View file

@ -25,9 +25,6 @@ type partSearch struct {
// The remaining block headers to scan in the current metaindexRow. // The remaining block headers to scan in the current metaindexRow.
bhs []blockHeader bhs []blockHeader
// Pointer to inmemory block, which may be reused.
inmemoryBlockReuse *inmemoryBlock
idxbCache *indexBlockCache idxbCache *indexBlockCache
ibCache *inmemoryBlockCache ibCache *inmemoryBlockCache
@ -48,10 +45,6 @@ func (ps *partSearch) reset() {
ps.p = nil ps.p = nil
ps.mrs = nil ps.mrs = nil
ps.bhs = nil ps.bhs = nil
if ps.inmemoryBlockReuse != nil {
putInmemoryBlock(ps.inmemoryBlockReuse)
ps.inmemoryBlockReuse = nil
}
ps.idxbCache = nil ps.idxbCache = nil
ps.ibCache = nil ps.ibCache = nil
ps.err = nil ps.err = nil
@ -240,10 +233,6 @@ func (ps *partSearch) Error() error {
} }
func (ps *partSearch) nextBlock() error { func (ps *partSearch) nextBlock() error {
if ps.inmemoryBlockReuse != nil {
putInmemoryBlock(ps.inmemoryBlockReuse)
ps.inmemoryBlockReuse = nil
}
if len(ps.bhs) == 0 { if len(ps.bhs) == 0 {
// The current metaindexRow is over. Proceed to the next metaindexRow. // The current metaindexRow is over. Proceed to the next metaindexRow.
if err := ps.nextBHS(); err != nil { if err := ps.nextBHS(); err != nil {
@ -252,13 +241,10 @@ func (ps *partSearch) nextBlock() error {
} }
bh := &ps.bhs[0] bh := &ps.bhs[0]
ps.bhs = ps.bhs[1:] ps.bhs = ps.bhs[1:]
ib, mayReuseInmemoryBlock, err := ps.getInmemoryBlock(bh) ib, err := ps.getInmemoryBlock(bh)
if err != nil { if err != nil {
return err return err
} }
if mayReuseInmemoryBlock {
ps.inmemoryBlockReuse = ib
}
ps.ib = ib ps.ib = ib
ps.ibItemIdx = 0 ps.ibItemIdx = 0
return nil return nil
@ -301,19 +287,19 @@ func (ps *partSearch) readIndexBlock(mr *metaindexRow) (*indexBlock, error) {
return idxb, nil return idxb, nil
} }
func (ps *partSearch) getInmemoryBlock(bh *blockHeader) (*inmemoryBlock, bool, error) { func (ps *partSearch) getInmemoryBlock(bh *blockHeader) (*inmemoryBlock, error) {
var ibKey inmemoryBlockCacheKey var ibKey inmemoryBlockCacheKey
ibKey.Init(bh) ibKey.Init(bh)
ib := ps.ibCache.Get(ibKey) ib := ps.ibCache.Get(ibKey)
if ib != nil { if ib != nil {
return ib, false, nil return ib, nil
} }
ib, err := ps.readInmemoryBlock(bh) ib, err := ps.readInmemoryBlock(bh)
if err != nil { if err != nil {
return nil, false, err return nil, err
} }
ps.ibCache.Put(ibKey, ib) ps.ibCache.Put(ibKey, ib)
return ib, false, nil return ib, nil
} }
func (ps *partSearch) readInmemoryBlock(bh *blockHeader) (*inmemoryBlock, error) { func (ps *partSearch) readInmemoryBlock(bh *blockHeader) (*inmemoryBlock, error) {

View file

@ -344,15 +344,15 @@ func TestRowsUnmarshalSuccess(t *testing.T) {
}) })
// Escape chars // Escape chars
f(`fo\,bar\=baz,x\=\b=\\a\,\=\q\ \\\a\=\,=4.34`, &Rows{ f(`fo\,bar\=b\ az,x\=\ b=\\a\,\=\q\ \\\a\ b\=\,=4.34`, &Rows{
Rows: []Row{{ Rows: []Row{{
Measurement: `fo,bar=baz`, Measurement: `fo,bar=b az`,
Tags: []Tag{{ Tags: []Tag{{
Key: `x=\b`, Key: `x= b`,
Value: `\a,=\q `, Value: `\a,=\q `,
}}, }},
Fields: []Field{{ Fields: []Field{{
Key: `\\a=,`, Key: `\\a b=,`,
Value: 4.34, Value: 4.34,
}}, }},
}}, }},

View file

@ -16,6 +16,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding" "github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs" "github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory" "github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
@ -1123,8 +1124,8 @@ func (is *indexSearch) searchTagValueSuffixesForTimeRange(tvss map[string]struct
defer wg.Done() defer wg.Done()
tvssLocal := make(map[string]struct{}) tvssLocal := make(map[string]struct{})
isLocal := is.db.getIndexSearch(is.deadline) isLocal := is.db.getIndexSearch(is.deadline)
defer is.db.putIndexSearch(isLocal)
err := isLocal.searchTagValueSuffixesForDate(tvssLocal, date, tagKey, tagValuePrefix, delimiter, maxTagValueSuffixes) err := isLocal.searchTagValueSuffixesForDate(tvssLocal, date, tagKey, tagValuePrefix, delimiter, maxTagValueSuffixes)
is.db.putIndexSearch(isLocal)
mu.Lock() mu.Lock()
defer mu.Unlock() defer mu.Unlock()
if errGlobal != nil { if errGlobal != nil {
@ -2309,7 +2310,7 @@ func (is *indexSearch) updateMetricIDsForTagFilters(metricIDs *uint64set.Set, tf
// Slow path - try searching over the whole inverted index. // Slow path - try searching over the whole inverted index.
// Sort tag filters for faster ts.Seek below. // Sort tag filters for faster ts.Seek below.
sort.Slice(tfs.tfs, func(i, j int) bool { sort.SliceStable(tfs.tfs, func(i, j int) bool {
return tfs.tfs[i].Less(&tfs.tfs[j]) return tfs.tfs[i].Less(&tfs.tfs[j])
}) })
minTf, minMetricIDs, err := is.getTagFilterWithMinMetricIDsCountOptimized(tfs, tr, maxMetrics) minTf, minMetricIDs, err := is.getTagFilterWithMinMetricIDsCountOptimized(tfs, tr, maxMetrics)
@ -2684,8 +2685,8 @@ func (is *indexSearch) getMetricIDsForTimeRange(tr TimeRange, maxMetrics int) (*
go func(date uint64) { go func(date uint64) {
defer wg.Done() defer wg.Done()
isLocal := is.db.getIndexSearch(is.deadline) isLocal := is.db.getIndexSearch(is.deadline)
defer is.db.putIndexSearch(isLocal)
m, err := isLocal.getMetricIDsForDate(date, maxMetrics) m, err := isLocal.getMetricIDsForDate(date, maxMetrics)
is.db.putIndexSearch(isLocal)
mu.Lock() mu.Lock()
defer mu.Unlock() defer mu.Unlock()
if errGlobal != nil { if errGlobal != nil {
@ -2743,8 +2744,8 @@ func (is *indexSearch) tryUpdatingMetricIDsForDateRange(metricIDs *uint64set.Set
go func(date uint64) { go func(date uint64) {
defer wg.Done() defer wg.Done()
isLocal := is.db.getIndexSearch(is.deadline) isLocal := is.db.getIndexSearch(is.deadline)
defer is.db.putIndexSearch(isLocal)
m, err := isLocal.getMetricIDsForDateAndFilters(date, tfs, maxMetrics) m, err := isLocal.getMetricIDsForDateAndFilters(date, tfs, maxMetrics)
is.db.putIndexSearch(isLocal)
mu.Lock() mu.Lock()
defer mu.Unlock() defer mu.Unlock()
if errGlobal != nil { if errGlobal != nil {
@ -2776,49 +2777,60 @@ func (is *indexSearch) tryUpdatingMetricIDsForDateRange(metricIDs *uint64set.Set
} }
func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilters, maxMetrics int) (*uint64set.Set, error) { func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilters, maxMetrics int) (*uint64set.Set, error) {
// Sort tfs by the number of matching filters from previous queries. // Sort tfs by the duration from previous queries.
// This way we limit the amount of work below by applying more specific filters at first. // This way we limit the amount of work below by applying fast filters at first.
type tagFilterWithCount struct { type tagFilterWithWeight struct {
tf *tagFilter tf *tagFilter
seconds float64 durationSeconds float64
lastQueryTimestamp uint64
} }
tfsWithCount := make([]tagFilterWithCount, len(tfs.tfs)) tfws := make([]tagFilterWithWeight, len(tfs.tfs))
kb := &is.kb ct := fasttime.UnixTimestamp()
var buf []byte
for i := range tfs.tfs { for i := range tfs.tfs {
tf := &tfs.tfs[i] tf := &tfs.tfs[i]
kb.B = appendDateTagFilterCacheKey(kb.B[:0], date, tf) durationSeconds, lastQueryTimestamp := is.getDurationAndTimestampForDateFilter(date, tf)
buf = is.db.durationsPerDateTagFilterCache.Get(buf[:0], kb.B) if ct > lastQueryTimestamp+60 {
seconds := float64(0) // It is time to update filter duration stats.
if len(buf) == 8 { if tf.isNegative || tf.isRegexp && len(tf.orSuffixes) == 0 {
n := encoding.UnmarshalUint64(buf) // Negative and regexp filters usually take the most time, so move them to the end of filters
seconds = math.Float64frombits(n) // in the hope they won't be executed at all.
if durationSeconds == 0 {
durationSeconds = 10
} }
tfsWithCount[i] = tagFilterWithCount{ } else {
// Reset duration stats for relatively fast {key="value"} and {key=~"foo|bar|baz"} filters, so it is re-populated below.
if durationSeconds < 0.5 {
durationSeconds = 0
}
}
}
tfws[i] = tagFilterWithWeight{
tf: tf, tf: tf,
seconds: seconds, durationSeconds: durationSeconds,
lastQueryTimestamp: lastQueryTimestamp,
} }
} }
sort.Slice(tfsWithCount, func(i, j int) bool { sort.SliceStable(tfws, func(i, j int) bool {
a, b := &tfsWithCount[i], &tfsWithCount[j] a, b := &tfws[i], &tfws[j]
if a.seconds != b.seconds { if a.durationSeconds != b.durationSeconds {
return a.seconds < b.seconds return a.durationSeconds < b.durationSeconds
} }
return a.tf.matchCost < b.tf.matchCost return a.tf.Less(b.tf)
}) })
// Populate metricIDs for the first non-negative filter. // Populate metricIDs for the first non-negative filter.
var tfsPostponed []*tagFilter var tfsPostponed []*tagFilter
var metricIDs *uint64set.Set var metricIDs *uint64set.Set
maxDateMetrics := maxMetrics * 50 maxDateMetrics := maxMetrics * 50
tfsRemainingWithCount := tfsWithCount[:0] tfwsRemaining := tfws[:0]
for i := range tfsWithCount { for i := range tfws {
tf := tfsWithCount[i].tf tfw := tfws[i]
tf := tfw.tf
if tf.isNegative { if tf.isNegative {
tfsRemainingWithCount = append(tfsRemainingWithCount, tfsWithCount[i]) tfwsRemaining = append(tfwsRemaining, tfw)
continue continue
} }
m, err := is.getMetricIDsForDateTagFilter(tf, date, tfs.commonPrefix, maxDateMetrics) m, err := is.getMetricIDsForDateTagFilter(tf, tfw.lastQueryTimestamp, date, tfs.commonPrefix, maxDateMetrics)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -2829,8 +2841,8 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter
} }
metricIDs = m metricIDs = m
i++ i++
for i < len(tfsWithCount) { for i < len(tfws) {
tfsRemainingWithCount = append(tfsRemainingWithCount, tfsWithCount[i]) tfwsRemaining = append(tfwsRemaining, tfws[i])
i++ i++
} }
break break
@ -2860,21 +2872,21 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter
// when the intial tag filters significantly reduce the number of found metricIDs, // when the intial tag filters significantly reduce the number of found metricIDs,
// so the remaining filters could be performed via much faster metricName matching instead // so the remaining filters could be performed via much faster metricName matching instead
// of slow selecting of matching metricIDs. // of slow selecting of matching metricIDs.
for i := range tfsRemainingWithCount { for i := range tfwsRemaining {
tfWithCount := tfsRemainingWithCount[i] tfw := tfwsRemaining[i]
tf := tfWithCount.tf tf := tfw.tf
metricIDsLen := metricIDs.Len() metricIDsLen := metricIDs.Len()
if metricIDsLen == 0 { if metricIDsLen == 0 {
// Short circuit - there is no need in applying the remaining filters to an empty set. // Short circuit - there is no need in applying the remaining filters to an empty set.
break break
} }
if float64(metricIDsLen)/metricNameMatchesPerSecond < tfWithCount.seconds { if float64(metricIDsLen)/metricNameMatchesPerSecond < tfw.durationSeconds {
// It should be faster performing metricName match on the remaining filters // It should be faster performing metricName match on the remaining filters
// instead of scanning big number of entries in the inverted index for these filters. // instead of scanning big number of entries in the inverted index for these filters.
tfsPostponed = append(tfsPostponed, tf) tfsPostponed = append(tfsPostponed, tf)
continue continue
} }
m, err := is.getMetricIDsForDateTagFilter(tf, date, tfs.commonPrefix, maxDateMetrics) m, err := is.getMetricIDsForDateTagFilter(tf, tfw.lastQueryTimestamp, date, tfs.commonPrefix, maxDateMetrics)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -2894,11 +2906,6 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter
return nil, nil return nil, nil
} }
if len(tfsPostponed) > 0 { if len(tfsPostponed) > 0 {
if n := metricIDs.Len(); n > 50000 && n > maxMetrics/10 {
// It will be slow to perform metricName match on this number of time series.
// Fall back to global search.
return nil, errFallbackToMetricNameMatch
}
// Apply the postponed filters via metricName match. // Apply the postponed filters via metricName match.
var m uint64set.Set var m uint64set.Set
if err := is.updateMetricIDsByMetricNameMatch(&m, metricIDs, tfsPostponed); err != nil { if err := is.updateMetricIDsByMetricNameMatch(&m, metricIDs, tfsPostponed); err != nil {
@ -2913,7 +2920,7 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(date uint64, tfs *TagFilter
// //
// This value is used for determining when matching by metric name must be perfromed instead of matching // This value is used for determining when matching by metric name must be perfromed instead of matching
// by the remaining tag filters. // by the remaining tag filters.
const metricNameMatchesPerSecond = 10000 const metricNameMatchesPerSecond = 50000
func (is *indexSearch) storeDateMetricID(date, metricID uint64) error { func (is *indexSearch) storeDateMetricID(date, metricID uint64) error {
ii := getIndexItems() ii := getIndexItems()
@ -3061,38 +3068,63 @@ func (is *indexSearch) hasDateMetricID(date, metricID uint64) (bool, error) {
return true, nil return true, nil
} }
func (is *indexSearch) getMetricIDsForDateTagFilter(tf *tagFilter, date uint64, commonPrefix []byte, maxMetrics int) (*uint64set.Set, error) { func (is *indexSearch) getMetricIDsForDateTagFilter(tf *tagFilter, lastQueryTimestamp, date uint64, commonPrefix []byte, maxMetrics int) (*uint64set.Set, error) {
// Augument tag filter prefix for per-date search instead of global search. // Augument tag filter prefix for per-date search instead of global search.
if !bytes.HasPrefix(tf.prefix, commonPrefix) { if !bytes.HasPrefix(tf.prefix, commonPrefix) {
logger.Panicf("BUG: unexpected tf.prefix %q; must start with commonPrefix %q", tf.prefix, commonPrefix) logger.Panicf("BUG: unexpected tf.prefix %q; must start with commonPrefix %q", tf.prefix, commonPrefix)
} }
kb := kbPool.Get() kb := kbPool.Get()
defer kbPool.Put(kb)
kb.B = is.marshalCommonPrefix(kb.B[:0], nsPrefixDateTagToMetricIDs) kb.B = is.marshalCommonPrefix(kb.B[:0], nsPrefixDateTagToMetricIDs)
kb.B = encoding.MarshalUint64(kb.B, date) kb.B = encoding.MarshalUint64(kb.B, date)
kb.B = append(kb.B, tf.prefix[len(commonPrefix):]...) kb.B = append(kb.B, tf.prefix[len(commonPrefix):]...)
tfNew := *tf tfNew := *tf
tfNew.isNegative = false // isNegative for the original tf is handled by the caller. tfNew.isNegative = false // isNegative for the original tf is handled by the caller.
tfNew.prefix = kb.B tfNew.prefix = kb.B
startTime := time.Now() startTime := time.Now()
metricIDs, err := is.getMetricIDsForTagFilter(&tfNew, maxMetrics) metricIDs, err := is.getMetricIDsForTagFilter(&tfNew, maxMetrics)
duration := time.Since(startTime) kbPool.Put(kb)
currentTimestamp := fasttime.UnixTimestamp()
if currentTimestamp > lastQueryTimestamp+5 {
// The cache already contains quite fresh entry for the current (date, tf).
// Do not update it too frequently.
return metricIDs, err
}
// Store the duration for tag filter execution in the cache in order to sort tag filters // Store the duration for tag filter execution in the cache in order to sort tag filters
// in ascending durations on the next search. // in ascending durations on the next search.
is.kb.B = appendDateTagFilterCacheKey(is.kb.B[:0], date, tf) durationSeconds := time.Since(startTime).Seconds()
if err != nil { if metricIDs.Len() >= maxMetrics {
// Set duration to big value, so the given tag filter will be moved to the end // Increase the duration for tag filter matching too many metrics,
// of tag filters on the next search. // So next time it will be applied after filters matching lower number of metrics.
duration = time.Hour durationSeconds *= 2
} }
seconds := duration.Seconds() is.storeDurationAndTimestampForDateFilter(date, tf, durationSeconds, currentTimestamp)
n := math.Float64bits(seconds)
kb.B = encoding.MarshalUint64(kb.B[:0], n)
is.db.durationsPerDateTagFilterCache.Set(is.kb.B, kb.B)
return metricIDs, err return metricIDs, err
} }
func (is *indexSearch) getDurationAndTimestampForDateFilter(date uint64, tf *tagFilter) (float64, uint64) {
is.kb.B = appendDateTagFilterCacheKey(is.kb.B[:0], date, tf)
kb := kbPool.Get()
defer kbPool.Put(kb)
kb.B = is.db.durationsPerDateTagFilterCache.Get(kb.B[:0], is.kb.B)
if len(kb.B) != 16 {
return 0, 0
}
n := encoding.UnmarshalUint64(kb.B)
durationSeconds := math.Float64frombits(n)
timestamp := encoding.UnmarshalUint64(kb.B[8:])
return durationSeconds, timestamp
}
func (is *indexSearch) storeDurationAndTimestampForDateFilter(date uint64, tf *tagFilter, durationSeconds float64, timestamp uint64) {
is.kb.B = appendDateTagFilterCacheKey(is.kb.B[:0], date, tf)
n := math.Float64bits(durationSeconds)
kb := kbPool.Get()
kb.B = encoding.MarshalUint64(kb.B[:0], n)
kb.B = encoding.MarshalUint64(kb.B, timestamp)
is.db.durationsPerDateTagFilterCache.Set(is.kb.B, kb.B)
kbPool.Put(kb)
}
func appendDateTagFilterCacheKey(dst []byte, date uint64, tf *tagFilter) []byte { func appendDateTagFilterCacheKey(dst []byte, date uint64, tf *tagFilter) []byte {
dst = encoding.MarshalUint64(dst, date) dst = encoding.MarshalUint64(dst, date)
dst = tf.Marshal(dst) dst = tf.Marshal(dst)

View file

@ -225,8 +225,8 @@ func (ibc *indexBlockCache) cleanByTimeout() {
currentTime := fasttime.UnixTimestamp() currentTime := fasttime.UnixTimestamp()
ibc.mu.Lock() ibc.mu.Lock()
for k, ibe := range ibc.m { for k, ibe := range ibc.m {
// Delete items accessed more than two minutes ago. // Delete items accessed more than 90 seconds ago.
if currentTime-atomic.LoadUint64(&ibe.lastAccessTime) > 2*60 { if currentTime-atomic.LoadUint64(&ibe.lastAccessTime) > 90 {
delete(ibc.m, k) delete(ibc.m, k)
} }
} }

View file

@ -122,6 +122,9 @@ func OpenStorage(path string, retentionMsecs int64) (*Storage, error) {
if retentionMsecs <= 0 { if retentionMsecs <= 0 {
retentionMsecs = maxRetentionMsecs retentionMsecs = maxRetentionMsecs
} }
if retentionMsecs > maxRetentionMsecs {
retentionMsecs = maxRetentionMsecs
}
s := &Storage{ s := &Storage{
path: path, path: path,
cachePath: path + "/cache", cachePath: path + "/cache",
@ -490,9 +493,8 @@ func (s *Storage) startRetentionWatcher() {
} }
func (s *Storage) retentionWatcher() { func (s *Storage) retentionWatcher() {
retentionMonths := int((s.retentionMsecs + (msecsPerMonth - 1)) / msecsPerMonth)
for { for {
d := nextRetentionDuration(retentionMonths) d := nextRetentionDuration(s.retentionMsecs)
select { select {
case <-s.stop: case <-s.stop:
return return
@ -840,17 +842,16 @@ func (s *Storage) mustSaveAndStopCache(c *workingsetcache.Cache, info, name stri
info, path, time.Since(startTime).Seconds(), cs.EntriesCount, cs.BytesSize) info, path, time.Since(startTime).Seconds(), cs.EntriesCount, cs.BytesSize)
} }
func nextRetentionDuration(retentionMonths int) time.Duration { func nextRetentionDuration(retentionMsecs int64) time.Duration {
t := time.Now().UTC() // Round retentionMsecs to days. This guarantees that per-day inverted index works as expected.
n := t.Year()*12 + int(t.Month()) - 1 + retentionMonths retentionMsecs = ((retentionMsecs + msecPerDay - 1) / msecPerDay) * msecPerDay
n -= n % retentionMonths t := time.Now().UnixNano() / 1e6
y := n / 12 deadline := ((t + retentionMsecs - 1) / retentionMsecs) * retentionMsecs
m := time.Month((n % 12) + 1)
// Schedule the deadline to +4 hours from the next retention period start. // Schedule the deadline to +4 hours from the next retention period start.
// This should prevent from possible double deletion of indexdb // This should prevent from possible double deletion of indexdb
// due to time drift - see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/248 . // due to time drift - see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/248 .
deadline := time.Date(y, m, 1, 4, 0, 0, 0, time.UTC) deadline += 4 * 3600 * 1000
return deadline.Sub(t) return time.Duration(deadline-t) * time.Millisecond
} }
// SearchMetricNames returns metric names matching the given tfss on the given tr. // SearchMetricNames returns metric names matching the given tfss on the given tr.

View file

@ -335,12 +335,12 @@ func TestMetricRowMarshalUnmarshal(t *testing.T) {
} }
func TestNextRetentionDuration(t *testing.T) { func TestNextRetentionDuration(t *testing.T) {
for retentionMonths := 1; retentionMonths < 360; retentionMonths++ { for retentionMonths := float64(0.1); retentionMonths < 120; retentionMonths += 0.3 {
d := nextRetentionDuration(retentionMonths) d := nextRetentionDuration(int64(retentionMonths * msecsPerMonth))
if d <= 0 { if d <= 0 {
currTime := time.Now().UTC() currTime := time.Now().UTC()
nextTime := time.Now().UTC().Add(d) nextTime := time.Now().UTC().Add(d)
t.Fatalf("unexected retention duration for retentionMonths=%d; got %s; must be %s + %d months", retentionMonths, nextTime, currTime, retentionMonths) t.Fatalf("unexected retention duration for retentionMonths=%f; got %s; must be %s + %f months", retentionMonths, nextTime, currTime, retentionMonths)
} }
} }
} }

View file

@ -240,12 +240,15 @@ type tagFilter struct {
func (tf *tagFilter) Less(other *tagFilter) bool { func (tf *tagFilter) Less(other *tagFilter) bool {
// Move regexp and negative filters to the end, since they require scanning // Move regexp and negative filters to the end, since they require scanning
// all the entries for the given label. // all the entries for the given label.
if tf.isRegexp != other.isRegexp { if tf.matchCost != other.matchCost {
return !tf.isRegexp return tf.matchCost < other.matchCost
} }
if tf.isNegative != other.isNegative { if tf.isNegative != other.isNegative {
return !tf.isNegative return !tf.isNegative
} }
if tf.isRegexp != other.isRegexp {
return !tf.isRegexp
}
if len(tf.orSuffixes) != len(other.orSuffixes) { if len(tf.orSuffixes) != len(other.orSuffixes) {
return len(tf.orSuffixes) < len(other.orSuffixes) return len(tf.orSuffixes) < len(other.orSuffixes)
} }
@ -312,6 +315,9 @@ func (tf *tagFilter) InitFromGraphiteQuery(commonPrefix, query []byte, paths []s
tf.prefix = marshalTagValueNoTrailingTagSeparator(tf.prefix, []byte(prefix)) tf.prefix = marshalTagValueNoTrailingTagSeparator(tf.prefix, []byte(prefix))
tf.orSuffixes = append(tf.orSuffixes[:0], orSuffixes...) tf.orSuffixes = append(tf.orSuffixes[:0], orSuffixes...)
tf.reSuffixMatch, tf.matchCost = newMatchFuncForOrSuffixes(orSuffixes) tf.reSuffixMatch, tf.matchCost = newMatchFuncForOrSuffixes(orSuffixes)
if isNegative {
tf.matchCost *= negativeMatchCostMultiplier
}
} }
func getCommonPrefix(ss []string) (string, []string) { func getCommonPrefix(ss []string) (string, []string) {
@ -379,6 +385,9 @@ func (tf *tagFilter) Init(commonPrefix, key, value []byte, isNegative, isRegexp
tf.orSuffixes = append(tf.orSuffixes[:0], "") tf.orSuffixes = append(tf.orSuffixes[:0], "")
tf.isEmptyMatch = len(prefix) == 0 tf.isEmptyMatch = len(prefix) == 0
tf.matchCost = fullMatchCost tf.matchCost = fullMatchCost
if isNegative {
tf.matchCost *= negativeMatchCostMultiplier
}
return nil return nil
} }
rcv, err := getRegexpFromCache(expr) rcv, err := getRegexpFromCache(expr)
@ -388,6 +397,9 @@ func (tf *tagFilter) Init(commonPrefix, key, value []byte, isNegative, isRegexp
tf.orSuffixes = append(tf.orSuffixes[:0], rcv.orValues...) tf.orSuffixes = append(tf.orSuffixes[:0], rcv.orValues...)
tf.reSuffixMatch = rcv.reMatch tf.reSuffixMatch = rcv.reMatch
tf.matchCost = rcv.reCost tf.matchCost = rcv.reCost
if isNegative {
tf.matchCost *= negativeMatchCostMultiplier
}
tf.isEmptyMatch = len(prefix) == 0 && tf.reSuffixMatch(nil) tf.isEmptyMatch = len(prefix) == 0 && tf.reSuffixMatch(nil)
if !tf.isNegative && len(key) == 0 && strings.IndexByte(rcv.literalSuffix, '.') >= 0 { if !tf.isNegative && len(key) == 0 && strings.IndexByte(rcv.literalSuffix, '.') >= 0 {
// Reverse suffix is needed only for non-negative regexp filters on __name__ that contains dots. // Reverse suffix is needed only for non-negative regexp filters on __name__ that contains dots.
@ -559,6 +571,8 @@ const (
reMatchCost = 100 reMatchCost = 100
) )
const negativeMatchCostMultiplier = 1000
func getOptimizedReMatchFuncExt(reMatch func(b []byte) bool, sre *syntax.Regexp) (func(b []byte) bool, string, uint64) { func getOptimizedReMatchFuncExt(reMatch func(b []byte) bool, sre *syntax.Regexp) (func(b []byte) bool, string, uint64) {
if isDotStar(sre) { if isDotStar(sre) {
// '.*' // '.*'

View file

@ -743,9 +743,11 @@ const (
type bucket16 struct { type bucket16 struct {
bits *[wordsPerBucket]uint64 bits *[wordsPerBucket]uint64
smallPoolLen int smallPoolLen int
smallPool [56]uint16 smallPool [smallPoolSize]uint16
} }
const smallPoolSize = 56
func (b *bucket16) isZero() bool { func (b *bucket16) isZero() bool {
return b.bits == nil && b.smallPoolLen == 0 return b.bits == nil && b.smallPoolLen == 0
} }
@ -927,22 +929,20 @@ func (b *bucket16) delFromSmallPool(x uint16) bool {
func (b *bucket16) appendTo(dst []uint64, hi uint32, hi16 uint16) []uint64 { func (b *bucket16) appendTo(dst []uint64, hi uint32, hi16 uint16) []uint64 {
hi64 := uint64(hi)<<32 | uint64(hi16)<<16 hi64 := uint64(hi)<<32 | uint64(hi16)<<16
if b.bits == nil { if b.bits == nil {
// Use smallPoolSorter instead of sort.Slice here in order to reduce memory allocations.
sps := smallPoolSorterPool.Get().(*smallPoolSorter)
// Sort a copy of b.smallPool, since b must be readonly in order to prevent from data races // Sort a copy of b.smallPool, since b must be readonly in order to prevent from data races
// when b.appendTo is called from concurrent goroutines. // when b.appendTo is called from concurrent goroutines.
smallPool := b.smallPool sps.smallPool = b.smallPool
sps.a = sps.smallPool[:b.smallPoolLen]
// Use uint16Sorter instead of sort.Slice here in order to reduce memory allocations. if len(sps.a) > 1 && !sort.IsSorted(sps) {
a := uint16SorterPool.Get().(*uint16Sorter) sort.Sort(sps)
*a = uint16Sorter(smallPool[:b.smallPoolLen])
if len(*a) > 1 && !sort.IsSorted(a) {
sort.Sort(a)
} }
for _, v := range *a { for _, v := range sps.a {
x := hi64 | uint64(v) x := hi64 | uint64(v)
dst = append(dst, x) dst = append(dst, x)
} }
*a = nil smallPoolSorterPool.Put(sps)
uint16SorterPool.Put(a)
return dst return dst
} }
var wordNum uint64 var wordNum uint64
@ -966,20 +966,25 @@ func (b *bucket16) appendTo(dst []uint64, hi uint32, hi16 uint16) []uint64 {
return dst return dst
} }
var uint16SorterPool = &sync.Pool{ var smallPoolSorterPool = &sync.Pool{
New: func() interface{} { New: func() interface{} {
return &uint16Sorter{} return &smallPoolSorter{}
}, },
} }
type uint16Sorter []uint16 type smallPoolSorter struct {
smallPool [smallPoolSize]uint16
func (s uint16Sorter) Len() int { return len(s) } a []uint16
func (s uint16Sorter) Less(i, j int) bool {
return s[i] < s[j]
} }
func (s uint16Sorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i] func (sps *smallPoolSorter) Len() int { return len(sps.a) }
func (sps *smallPoolSorter) Less(i, j int) bool {
a := sps.a
return a[i] < a[j]
}
func (sps *smallPoolSorter) Swap(i, j int) {
a := sps.a
a[i], a[j] = a[j], a[i]
} }
func getWordNumBitMask(x uint16) (uint16, uint64) { func getWordNumBitMask(x uint16) (uint16, uint64) {

View file

@ -1,6 +1,34 @@
# Changes # Changes
## [0.77.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.76.0...v0.77.0) (2021-02-16)
### Features
* **channel:** Add Pub/Sub endpoints for Cloud Channel API. ([1aea7c8](https://www.github.com/googleapis/google-cloud-go/commit/1aea7c87d39eed87620b488ba0dd60b88ff26c04))
* **dialogflow/cx:** supports SentimentAnalysisResult in webhook request docs: minor updates in wording ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
* **errorreporting:** Make resolution status field available for error groups. Now callers can set the status of an error group by passing this to UpdateGroup. When not specified, it's treated like OPEN. feat: Make source location available for error groups created from GAE. ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
* **errorreporting:** Make resolution status field available for error groups. Now callers can set the status of an error group by passing this to UpdateGroup. When not specified, it's treated like OPEN. feat: Make source location available for error groups created from GAE. ([f66114b](https://www.github.com/googleapis/google-cloud-go/commit/f66114bc7233ad06e18f38dd39497a74d85fdbd8))
* **gkehub:** start generating apiv1beta1 ([#3698](https://www.github.com/googleapis/google-cloud-go/issues/3698)) ([8aed3bd](https://www.github.com/googleapis/google-cloud-go/commit/8aed3bd1bbbe983e4891c813e4c5dc9b3aa1b9b2))
* **internal/docfx:** full cross reference linking ([#3656](https://www.github.com/googleapis/google-cloud-go/issues/3656)) ([fcb7318](https://www.github.com/googleapis/google-cloud-go/commit/fcb7318eb338bf3828ac831ed06ca630e1876418))
* **memcache:** added ApplySoftwareUpdate API docs: various clarifications, new documentation for ApplySoftwareUpdate chore: update proto annotations ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
* **networkconnectivity:** Add state field in resources docs: Minor changes ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a))
* **networkconnectivity:** Add state field in resources docs: Minor changes ([b4b5898](https://www.github.com/googleapis/google-cloud-go/commit/b4b58987368f80494bbc7f651f50e9123200fb3f))
* **recommendationengine:** start generating apiv1beta1 ([#3686](https://www.github.com/googleapis/google-cloud-go/issues/3686)) ([8f4e130](https://www.github.com/googleapis/google-cloud-go/commit/8f4e13009444d88a5a56144129f055623a2205ac))
### Bug Fixes
* **errorreporting:** Remove dependency on AppEngine's proto definitions. This also removes the source_references field. ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
* **errorreporting:** Update bazel builds for ER client libraries. ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a))
* **internal/godocfx:** use exact list of top-level decls ([#3665](https://www.github.com/googleapis/google-cloud-go/issues/3665)) ([3cd2961](https://www.github.com/googleapis/google-cloud-go/commit/3cd2961bd7b9c29d82a21ba8850eff00c7c332fd))
* **kms:** do not retry on 13 INTERNAL ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
* **orgpolicy:** Fix constraint resource pattern annotation ([f66114b](https://www.github.com/googleapis/google-cloud-go/commit/f66114bc7233ad06e18f38dd39497a74d85fdbd8))
* **orgpolicy:** Fix constraint resource pattern annotation ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a))
* **profiler:** make sure retries use the most up-to-date copy of the trailer ([#3660](https://www.github.com/googleapis/google-cloud-go/issues/3660)) ([3ba9ebc](https://www.github.com/googleapis/google-cloud-go/commit/3ba9ebcee2b8b43cdf2c8f8a3d810516a604b363))
* **vision:** sync vision v1 protos to get extra FaceAnnotation Landmark Types ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
## [0.76.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.75.0...v0.76.0) (2021-02-02) ## [0.76.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.75.0...v0.76.0) (2021-02-02)

4
vendor/cloud.google.com/go/go.mod generated vendored
View file

@ -19,7 +19,7 @@ require (
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c // indirect golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c // indirect
golang.org/x/text v0.3.5 golang.org/x/text v0.3.5
golang.org/x/tools v0.1.0 golang.org/x/tools v0.1.0
google.golang.org/api v0.38.0 google.golang.org/api v0.40.0
google.golang.org/genproto v0.0.0-20210202153253-cf70463f6119 google.golang.org/genproto v0.0.0-20210212180131-e7f2df4ecc2d
google.golang.org/grpc v1.35.0 google.golang.org/grpc v1.35.0
) )

8
vendor/cloud.google.com/go/go.sum generated vendored
View file

@ -390,8 +390,8 @@ google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSr
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
google.golang.org/api v0.38.0 h1:vDyWk6eup8eQAidaZ31sNWIn8tZEL8qpbtGkBD4ytQo= google.golang.org/api v0.40.0 h1:uWrpz12dpVPn7cojP82mk02XDgTJLDPc2KbVTxrWb4A=
google.golang.org/api v0.38.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -434,8 +434,8 @@ google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210202153253-cf70463f6119 h1:m9+RjTMas6brUP8DBxSAa/WIPFy7FIhKpvk+9Ppce8E= google.golang.org/genproto v0.0.0-20210212180131-e7f2df4ecc2d h1:Edhcm0CKDPLQIecHCp5Iz57Lo7MfT6zUFBAlocmOjcY=
google.golang.org/genproto v0.0.0-20210202153253-cf70463f6119/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210212180131-e7f2df4ecc2d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=

View file

@ -393,7 +393,7 @@
}, },
"cloud.google.com/go/errorreporting/apiv1beta1": { "cloud.google.com/go/errorreporting/apiv1beta1": {
"distribution_name": "cloud.google.com/go/errorreporting/apiv1beta1", "distribution_name": "cloud.google.com/go/errorreporting/apiv1beta1",
"description": "Cloud Error Reporting API", "description": "Error Reporting API",
"language": "Go", "language": "Go",
"client_library_type": "generated", "client_library_type": "generated",
"docs_url": "https://pkg.go.dev/cloud.google.com/go/errorreporting/apiv1beta1", "docs_url": "https://pkg.go.dev/cloud.google.com/go/errorreporting/apiv1beta1",
@ -447,6 +447,14 @@
"docs_url": "https://pkg.go.dev/cloud.google.com/go/gaming/apiv1beta", "docs_url": "https://pkg.go.dev/cloud.google.com/go/gaming/apiv1beta",
"release_level": "beta" "release_level": "beta"
}, },
"cloud.google.com/go/gkehub/apiv1beta1": {
"distribution_name": "cloud.google.com/go/gkehub/apiv1beta1",
"description": "GKE Hub",
"language": "Go",
"client_library_type": "generated",
"docs_url": "https://pkg.go.dev/cloud.google.com/go/gkehub/apiv1beta1",
"release_level": "beta"
},
"cloud.google.com/go/iam": { "cloud.google.com/go/iam": {
"distribution_name": "cloud.google.com/go/iam", "distribution_name": "cloud.google.com/go/iam",
"description": "Cloud IAM", "description": "Cloud IAM",
@ -703,6 +711,14 @@
"docs_url": "https://pkg.go.dev/cloud.google.com/go/recaptchaenterprise/apiv1beta1", "docs_url": "https://pkg.go.dev/cloud.google.com/go/recaptchaenterprise/apiv1beta1",
"release_level": "beta" "release_level": "beta"
}, },
"cloud.google.com/go/recommendationengine/apiv1beta1": {
"distribution_name": "cloud.google.com/go/recommendationengine/apiv1beta1",
"description": "Recommendations AI",
"language": "Go",
"client_library_type": "generated",
"docs_url": "https://pkg.go.dev/cloud.google.com/go/recommendationengine/apiv1beta1",
"release_level": "beta"
},
"cloud.google.com/go/recommender/apiv1": { "cloud.google.com/go/recommender/apiv1": {
"distribution_name": "cloud.google.com/go/recommender/apiv1", "distribution_name": "cloud.google.com/go/recommender/apiv1",
"description": "Recommender API", "description": "Recommender API",

View file

@ -11,12 +11,13 @@ import (
const ( const (
e10Min = -9 e10Min = -9
e10Max = 18 e10Max = 18
decimalMultiplier = 2 bucketsPerDecimal = 18
bucketSize = 9 * decimalMultiplier decimalBucketsCount = e10Max - e10Min
bucketsCount = e10Max - e10Min bucketsCount = decimalBucketsCount * bucketsPerDecimal
decimalPrecision = 1e-12
) )
var bucketMultiplier = math.Pow(10, 1.0/bucketsPerDecimal)
// Histogram is a histogram for non-negative values with automatically created buckets. // Histogram is a histogram for non-negative values with automatically created buckets.
// //
// See https://medium.com/@valyala/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350 // See https://medium.com/@valyala/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350
@ -48,9 +49,8 @@ type Histogram struct {
// Mu gurantees synchronous update for all the counters and sum. // Mu gurantees synchronous update for all the counters and sum.
mu sync.Mutex mu sync.Mutex
buckets [bucketsCount]*histogramBucket decimalBuckets [decimalBucketsCount]*[bucketsPerDecimal]uint64
zeros uint64
lower uint64 lower uint64
upper uint64 upper uint64
@ -65,15 +65,14 @@ func (h *Histogram) Reset() {
} }
func (h *Histogram) resetLocked() { func (h *Histogram) resetLocked() {
for _, hb := range h.buckets[:] { for _, db := range h.decimalBuckets[:] {
if hb == nil { if db == nil {
continue continue
} }
for offset := range hb.counts[:] { for i := range db[:] {
hb.counts[offset] = 0 db[i] = 0
} }
} }
h.zeros = 0
h.lower = 0 h.lower = 0
h.upper = 0 h.upper = 0
} }
@ -86,31 +85,31 @@ func (h *Histogram) Update(v float64) {
// Skip NaNs and negative values. // Skip NaNs and negative values.
return return
} }
bucketIdx, offset := getBucketIdxAndOffset(v) bucketIdx := (math.Log10(v) - e10Min) * bucketsPerDecimal
idx := uint(bucketIdx)
if bucketIdx == float64(idx) {
// Edge case for 10^n values, which must go to the lower bucket
// according to Prometheus logic for `le`-based histograms.
idx--
}
decimalBucketIdx := idx / bucketsPerDecimal
offset := idx % bucketsPerDecimal
h.mu.Lock() h.mu.Lock()
h.updateLocked(v, bucketIdx, offset)
h.mu.Unlock()
}
func (h *Histogram) updateLocked(v float64, bucketIdx int, offset uint) {
h.sum += v h.sum += v
if bucketIdx < 0 { if bucketIdx < 0 {
// Special cases for zero, too small or too big value
if offset == 0 {
h.zeros++
} else if offset == 1 {
h.lower++ h.lower++
} else { } else if bucketIdx >= bucketsCount {
h.upper++ h.upper++
} else {
db := h.decimalBuckets[decimalBucketIdx]
if db == nil {
var b [bucketsPerDecimal]uint64
db = &b
h.decimalBuckets[decimalBucketIdx] = db
} }
return db[offset]++
} }
hb := h.buckets[bucketIdx] h.mu.Unlock()
if hb == nil {
hb = &histogramBucket{}
h.buckets[bucketIdx] = hb
}
hb.counts[offset]++
} }
// VisitNonZeroBuckets calls f for all buckets with non-zero counters. // VisitNonZeroBuckets calls f for all buckets with non-zero counters.
@ -121,38 +120,25 @@ func (h *Histogram) updateLocked(v float64, bucketIdx int, offset uint) {
// with `le` (less or equal) labels. // with `le` (less or equal) labels.
func (h *Histogram) VisitNonZeroBuckets(f func(vmrange string, count uint64)) { func (h *Histogram) VisitNonZeroBuckets(f func(vmrange string, count uint64)) {
h.mu.Lock() h.mu.Lock()
h.visitNonZeroBucketsLocked(f)
h.mu.Unlock()
}
func (h *Histogram) visitNonZeroBucketsLocked(f func(vmrange string, count uint64)) {
if h.zeros > 0 {
vmrange := getVMRange(-1, 0)
f(vmrange, h.zeros)
}
if h.lower > 0 { if h.lower > 0 {
vmrange := getVMRange(-1, 1) f(lowerBucketRange, h.lower)
f(vmrange, h.lower)
} }
for bucketIdx, hb := range h.buckets[:] { for decimalBucketIdx, db := range h.decimalBuckets[:] {
if hb == nil { if db == nil {
continue continue
} }
for offset, count := range hb.counts[:] { for offset, count := range db[:] {
if count > 0 { if count > 0 {
vmrange := getVMRange(bucketIdx, uint(offset)) bucketIdx := decimalBucketIdx*bucketsPerDecimal + offset
vmrange := getVMRange(bucketIdx)
f(vmrange, count) f(vmrange, count)
} }
} }
} }
if h.upper > 0 { if h.upper > 0 {
vmrange := getVMRange(-1, 2) f(upperBucketRange, h.upper)
f(vmrange, h.upper)
} }
} h.mu.Unlock()
type histogramBucket struct {
counts [bucketSize]uint64
} }
// NewHistogram creates and returns new histogram with the given name. // NewHistogram creates and returns new histogram with the given name.
@ -193,43 +179,27 @@ func (h *Histogram) UpdateDuration(startTime time.Time) {
h.Update(d) h.Update(d)
} }
func getVMRange(bucketIdx int, offset uint) string { func getVMRange(bucketIdx int) string {
bucketRangesOnce.Do(initBucketRanges) bucketRangesOnce.Do(initBucketRanges)
if bucketIdx < 0 { return bucketRanges[bucketIdx]
if offset > 2 {
panic(fmt.Errorf("BUG: offset must be in range [0...2] for negative bucketIdx; got %d", offset))
}
return bucketRanges[offset]
}
idx := 3 + uint(bucketIdx)*bucketSize + offset
return bucketRanges[idx]
} }
func initBucketRanges() { func initBucketRanges() {
bucketRanges[0] = "0...0" v := math.Pow10(e10Min)
bucketRanges[1] = fmt.Sprintf("0...%.1fe%d", 1.0, e10Min) start := fmt.Sprintf("%.3e", v)
bucketRanges[2] = fmt.Sprintf("%.1fe%d...+Inf", 1.0, e10Max) for i := 0; i < bucketsCount; i++ {
idx := 3 v *= bucketMultiplier
start := fmt.Sprintf("%.1fe%d", 1.0, e10Min) end := fmt.Sprintf("%.3e", v)
for bucketIdx := 0; bucketIdx < bucketsCount; bucketIdx++ { bucketRanges[i] = start + "..." + end
for offset := 0; offset < bucketSize; offset++ {
e10 := e10Min + bucketIdx
m := 1 + float64(offset+1)/decimalMultiplier
if math.Abs(m-10) < decimalPrecision {
m = 1
e10++
}
end := fmt.Sprintf("%.1fe%d", m, e10)
bucketRanges[idx] = start + "..." + end
idx++
start = end start = end
} }
}
} }
var ( var (
// 3 additional buckets for zero, lower and upper. lowerBucketRange = fmt.Sprintf("0...%.3e", math.Pow10(e10Min))
bucketRanges [3 + bucketsCount*bucketSize]string upperBucketRange = fmt.Sprintf("%.3e...+Inf", math.Pow10(e10Max))
bucketRanges [bucketsCount]string
bucketRangesOnce sync.Once bucketRangesOnce sync.Once
) )
@ -238,21 +208,21 @@ func (h *Histogram) marshalTo(prefix string, w io.Writer) {
h.VisitNonZeroBuckets(func(vmrange string, count uint64) { h.VisitNonZeroBuckets(func(vmrange string, count uint64) {
tag := fmt.Sprintf("vmrange=%q", vmrange) tag := fmt.Sprintf("vmrange=%q", vmrange)
metricName := addTag(prefix, tag) metricName := addTag(prefix, tag)
name, filters := splitMetricName(metricName) name, labels := splitMetricName(metricName)
fmt.Fprintf(w, "%s_bucket%s %d\n", name, filters, count) fmt.Fprintf(w, "%s_bucket%s %d\n", name, labels, count)
countTotal += count countTotal += count
}) })
if countTotal == 0 { if countTotal == 0 {
return return
} }
name, filters := splitMetricName(prefix) name, labels := splitMetricName(prefix)
sum := h.getSum() sum := h.getSum()
if float64(int64(sum)) == sum { if float64(int64(sum)) == sum {
fmt.Fprintf(w, "%s_sum%s %d\n", name, filters, int64(sum)) fmt.Fprintf(w, "%s_sum%s %d\n", name, labels, int64(sum))
} else { } else {
fmt.Fprintf(w, "%s_sum%s %g\n", name, filters, sum) fmt.Fprintf(w, "%s_sum%s %g\n", name, labels, sum)
} }
fmt.Fprintf(w, "%s_count%s %d\n", name, filters, countTotal) fmt.Fprintf(w, "%s_count%s %d\n", name, labels, countTotal)
} }
func (h *Histogram) getSum() float64 { func (h *Histogram) getSum() float64 {
@ -261,46 +231,3 @@ func (h *Histogram) getSum() float64 {
h.mu.Unlock() h.mu.Unlock()
return sum return sum
} }
func getBucketIdxAndOffset(v float64) (int, uint) {
if v < 0 {
panic(fmt.Errorf("BUG: v must be positive; got %g", v))
}
if v == 0 {
return -1, 0
}
if math.IsInf(v, 1) {
return -1, 2
}
e10 := int(math.Floor(math.Log10(v)))
bucketIdx := e10 - e10Min
if bucketIdx < 0 {
return -1, 1
}
if bucketIdx >= bucketsCount {
if bucketIdx == bucketsCount && math.Abs(math.Pow10(e10)-v) < decimalPrecision {
// Adjust m to be on par with Prometheus 'le' buckets (aka 'less or equal')
return bucketsCount - 1, bucketSize - 1
}
return -1, 2
}
m := ((v / math.Pow10(e10)) - 1) * decimalMultiplier
offset := int(m)
if offset < 0 {
offset = 0
} else if offset >= bucketSize {
offset = bucketSize - 1
}
if math.Abs(float64(offset)-m) < decimalPrecision {
// Adjust offset to be on par with Prometheus 'le' buckets (aka 'less or equal')
offset--
if offset < 0 {
bucketIdx--
offset = bucketSize - 1
if bucketIdx < 0 {
return -1, 1
}
}
}
return bucketIdx, uint(offset)
}

View file

@ -2,6 +2,7 @@ package metricsql
import ( import (
"fmt" "fmt"
"math"
"strconv" "strconv"
"strings" "strings"
"unicode" "unicode"
@ -444,7 +445,7 @@ func DurationValue(s string, step int64) (int64, error) {
if len(s) == 0 { if len(s) == 0 {
return 0, fmt.Errorf("duration cannot be empty") return 0, fmt.Errorf("duration cannot be empty")
} }
var d int64 var d float64
isMinus := false isMinus := false
for len(s) > 0 { for len(s) > 0 {
n := scanSingleDuration(s, true) n := scanSingleDuration(s, true)
@ -465,10 +466,13 @@ func DurationValue(s string, step int64) (int64, error) {
isMinus = true isMinus = true
} }
} }
return d, nil if math.Abs(d) > 1<<63-1 {
return 0, fmt.Errorf("too big duration %.0fms", d)
}
return int64(d), nil
} }
func parseSingleDuration(s string, step int64) (int64, error) { func parseSingleDuration(s string, step int64) (float64, error) {
numPart := s[:len(s)-1] numPart := s[:len(s)-1]
if strings.HasSuffix(numPart, "m") { if strings.HasSuffix(numPart, "m") {
// Duration in ms // Duration in ms
@ -499,7 +503,7 @@ func parseSingleDuration(s string, step int64) (int64, error) {
default: default:
return 0, fmt.Errorf("invalid duration suffix in %q", s) return 0, fmt.Errorf("invalid duration suffix in %q", s)
} }
return int64(mp * f * 1e3), nil return mp * f * 1e3, nil
} }
// scanDuration scans duration, which must start with positive num. // scanDuration scans duration, which must start with positive num.

View file

@ -88,10 +88,6 @@ func (c *Client) NewRequest(operation *request.Operation, params interface{}, da
// AddDebugHandlers injects debug logging handlers into the service to log request // AddDebugHandlers injects debug logging handlers into the service to log request
// debug information. // debug information.
func (c *Client) AddDebugHandlers() { func (c *Client) AddDebugHandlers() {
if !c.Config.LogLevel.AtLeast(aws.LogDebug) {
return
}
c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler) c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler)
c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler) c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler)
} }

View file

@ -53,6 +53,10 @@ var LogHTTPRequestHandler = request.NamedHandler{
} }
func logRequest(r *request.Request) { func logRequest(r *request.Request) {
if !r.Config.LogLevel.AtLeast(aws.LogDebug) {
return
}
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
bodySeekable := aws.IsReaderSeekable(r.Body) bodySeekable := aws.IsReaderSeekable(r.Body)
@ -120,6 +124,10 @@ var LogHTTPResponseHandler = request.NamedHandler{
} }
func logResponse(r *request.Request) { func logResponse(r *request.Request) {
if !r.Config.LogLevel.AtLeast(aws.LogDebug) {
return
}
lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)}
if r.HTTPResponse == nil { if r.HTTPResponse == nil {

View file

@ -358,6 +358,22 @@ var awsPartition = partition{
"us-west-2": endpoint{}, "us-west-2": endpoint{},
}, },
}, },
"amplifybackend": service{
Endpoints: endpoints{
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-south-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-2": endpoint{},
},
},
"api.detective": service{ "api.detective": service{
Defaults: endpoint{ Defaults: endpoint{
Protocols: []string{"https"}, Protocols: []string{"https"},
@ -8986,6 +9002,22 @@ var awsusgovPartition = partition{
"us-gov-west-1": endpoint{}, "us-gov-west-1": endpoint{},
}, },
}, },
"models.lex": service{
Defaults: endpoint{
CredentialScope: credentialScope{
Service: "lex",
},
},
Endpoints: endpoints{
"us-gov-west-1": endpoint{},
"us-gov-west-1-fips": endpoint{
Hostname: "models-fips.lex.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
},
},
},
"monitoring": service{ "monitoring": service{
Endpoints: endpoints{ Endpoints: endpoints{
@ -9191,6 +9223,22 @@ var awsusgovPartition = partition{
"us-gov-west-1": endpoint{}, "us-gov-west-1": endpoint{},
}, },
}, },
"runtime.lex": service{
Defaults: endpoint{
CredentialScope: credentialScope{
Service: "lex",
},
},
Endpoints: endpoints{
"us-gov-west-1": endpoint{},
"us-gov-west-1-fips": endpoint{
Hostname: "runtime-fips.lex.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
},
},
},
"runtime.sagemaker": service{ "runtime.sagemaker": service{
Endpoints: endpoints{ Endpoints: endpoints{

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go" const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK // SDKVersion is the version of this SDK
const SDKVersion = "1.37.7" const SDKVersion = "1.37.12"

View file

@ -128,10 +128,13 @@ type bar struct {
func (p *bar) write(state *State, eln, width int) int { func (p *bar) write(state *State, eln, width int) int {
repeat := width / p.cc[eln] repeat := width / p.cc[eln]
remainder := width % p.cc[eln]
for i := 0; i < repeat; i++ { for i := 0; i < repeat; i++ {
p.buf.Write(p.eb[eln]) p.buf.Write(p.eb[eln])
} }
StripStringToBuffer(string(p.eb[eln]), width%p.cc[eln], p.buf) if remainder > 0 {
StripStringToBuffer(string(p.eb[eln]), remainder, p.buf)
}
return width return width
} }

View file

@ -12,6 +12,8 @@ import (
"text/template" "text/template"
"time" "time"
"github.com/fatih/color"
"github.com/mattn/go-colorable" "github.com/mattn/go-colorable"
"github.com/mattn/go-isatty" "github.com/mattn/go-isatty"
@ -19,7 +21,7 @@ import (
) )
// Version of ProgressBar library // Version of ProgressBar library
const Version = "3.0.5" const Version = "3.0.6"
type key int type key int
@ -43,6 +45,9 @@ const (
// Color by default is true when output is tty, but you can set to false for disabling colors // Color by default is true when output is tty, but you can set to false for disabling colors
Color Color
// Hide the progress bar when finished, rather than leaving it up. By default it's false.
CleanOnFinish
) )
const ( const (
@ -140,6 +145,9 @@ func (pb *ProgressBar) configure() {
if pb.refreshRate == 0 { if pb.refreshRate == 0 {
pb.refreshRate = defaultRefreshRate pb.refreshRate = defaultRefreshRate
} }
if pb.vars[CleanOnFinish] == nil {
pb.vars[CleanOnFinish] = false
}
if f, ok := pb.output.(*os.File); ok { if f, ok := pb.output.(*os.File); ok {
pb.coutput = colorable.NewColorable(f) pb.coutput = colorable.NewColorable(f)
} else { } else {
@ -204,9 +212,14 @@ func (pb *ProgressBar) write(finish bool) {
if ret, ok := pb.Get(ReturnSymbol).(string); ok { if ret, ok := pb.Get(ReturnSymbol).(string); ok {
result = ret + result result = ret + result
if finish && ret == "\r" { if finish && ret == "\r" {
if pb.GetBool(CleanOnFinish) {
// "Wipe out" progress bar by overwriting one line with blanks
result = "\r" + color.New(color.Reset).Sprintf(strings.Repeat(" ", width)) + "\r"
} else {
result += "\n" result += "\n"
} }
} }
}
if pb.GetBool(Color) { if pb.GetBool(Color) {
pb.coutput.Write([]byte(result)) pb.coutput.Write([]byte(result))
} else { } else {

View file

@ -68,7 +68,8 @@ func fillTemplateFuncs(t *template.Template) {
emf := make(template.FuncMap) emf := make(template.FuncMap)
elementsM.Lock() elementsM.Lock()
for k, v := range elements { for k, v := range elements {
emf[k] = v element := v
emf[k] = func(state *State, args ...string) string { return element.ProgressElement(state, args...) }
} }
elementsM.Unlock() elementsM.Unlock()
t.Funcs(emf) t.Funcs(emf)

View file

@ -21,7 +21,7 @@ const (
_TB = 1e12 _TB = 1e12
) )
var ctrlFinder = regexp.MustCompile("\x1b\x5b[0-9]+\x6d") var ctrlFinder = regexp.MustCompile("\x1b\x5b[0-9;]+\x6d")
func CellCount(s string) int { func CellCount(s string) int {
n := runewidth.StringWidth(s) n := runewidth.StringWidth(s)
@ -43,6 +43,7 @@ func StripString(s string, w int) string {
func StripStringToBuffer(s string, w int, buf *bytes.Buffer) { func StripStringToBuffer(s string, w int, buf *bytes.Buffer) {
var seqs = ctrlFinder.FindAllStringIndex(s, -1) var seqs = ctrlFinder.FindAllStringIndex(s, -1)
var maxWidthReached bool
mainloop: mainloop:
for i, r := range s { for i, r := range s {
for _, seq := range seqs { for _, seq := range seqs {
@ -51,11 +52,11 @@ mainloop:
continue mainloop continue mainloop
} }
} }
if rw := CellCount(string(r)); rw <= w { if rw := CellCount(string(r)); rw <= w && !maxWidthReached {
w -= rw w -= rw
buf.WriteRune(r) buf.WriteRune(r)
} else { } else {
break maxWidthReached = true
} }
} }
for w > 0 { for w > 0 {

View file

@ -36,7 +36,7 @@ type ARPEntry struct {
func (fs FS) GatherARPEntries() ([]ARPEntry, error) { func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
data, err := ioutil.ReadFile(fs.proc.Path("net/arp")) data, err := ioutil.ReadFile(fs.proc.Path("net/arp"))
if err != nil { if err != nil {
return nil, fmt.Errorf("error reading arp %s: %s", fs.proc.Path("net/arp"), err) return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err)
} }
return parseARPEntries(data) return parseARPEntries(data)
@ -59,7 +59,7 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) {
} else if width == expectedDataWidth { } else if width == expectedDataWidth {
entry, err := parseARPEntry(columns) entry, err := parseARPEntry(columns)
if err != nil { if err != nil {
return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %s", err) return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %w", err)
} }
entries = append(entries, entry) entries = append(entries, entry)
} else { } else {

View file

@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
for i := 0; i < arraySize; i++ { for i := 0; i < arraySize; i++ {
sizes[i], err = strconv.ParseFloat(parts[i+4], 64) sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid value in buddyinfo: %s", err) return nil, fmt.Errorf("invalid value in buddyinfo: %w", err)
} }
} }

View file

@ -19,6 +19,7 @@ import (
"bufio" "bufio"
"bytes" "bytes"
"errors" "errors"
"fmt"
"regexp" "regexp"
"strconv" "strconv"
"strings" "strings"
@ -77,7 +78,7 @@ func parseCPUInfoX86(info []byte) ([]CPUInfo, error) {
// find the first "processor" line // find the first "processor" line
firstLine := firstNonEmptyLine(scanner) firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, errors.New("invalid cpuinfo file: " + firstLine) return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
} }
field := strings.SplitN(firstLine, ": ", 2) field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32) v, err := strconv.ParseUint(field[1], 0, 32)
@ -192,7 +193,7 @@ func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner) firstLine := firstNonEmptyLine(scanner)
match, _ := regexp.MatchString("^[Pp]rocessor", firstLine) match, _ := regexp.MatchString("^[Pp]rocessor", firstLine)
if !match || !strings.Contains(firstLine, ":") { if !match || !strings.Contains(firstLine, ":") {
return nil, errors.New("invalid cpuinfo file: " + firstLine) return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
} }
field := strings.SplitN(firstLine, ": ", 2) field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{} cpuinfo := []CPUInfo{}
@ -256,7 +257,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner) firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") { if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") {
return nil, errors.New("invalid cpuinfo file: " + firstLine) return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
} }
field := strings.SplitN(firstLine, ": ", 2) field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{} cpuinfo := []CPUInfo{}
@ -281,7 +282,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
if strings.HasPrefix(line, "processor") { if strings.HasPrefix(line, "processor") {
match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line) match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line)
if len(match) < 2 { if len(match) < 2 {
return nil, errors.New("Invalid line found in cpuinfo: " + line) return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
} }
cpu := commonCPUInfo cpu := commonCPUInfo
v, err := strconv.ParseUint(match[1], 0, 32) v, err := strconv.ParseUint(match[1], 0, 32)
@ -341,7 +342,7 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
// find the first "processor" line // find the first "processor" line
firstLine := firstNonEmptyLine(scanner) firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
return nil, errors.New("invalid cpuinfo file: " + firstLine) return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
} }
field := strings.SplitN(firstLine, ": ", 2) field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{} cpuinfo := []CPUInfo{}
@ -383,7 +384,7 @@ func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner) firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, errors.New("invalid cpuinfo file: " + firstLine) return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
} }
field := strings.SplitN(firstLine, ": ", 2) field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32) v, err := strconv.ParseUint(field[1], 0, 32)
@ -428,7 +429,7 @@ func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner) firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, errors.New("invalid cpuinfo file: " + firstLine) return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
} }
field := strings.SplitN(firstLine, ": ", 2) field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32) v, err := strconv.ParseUint(field[1], 0, 32)

View file

@ -55,12 +55,12 @@ func (fs FS) Crypto() ([]Crypto, error) {
path := fs.proc.Path("crypto") path := fs.proc.Path("crypto")
b, err := util.ReadFileNoStat(path) b, err := util.ReadFileNoStat(path)
if err != nil { if err != nil {
return nil, fmt.Errorf("error reading crypto %s: %s", path, err) return nil, fmt.Errorf("error reading crypto %q: %w", path, err)
} }
crypto, err := parseCrypto(bytes.NewReader(b)) crypto, err := parseCrypto(bytes.NewReader(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing crypto %s: %s", path, err) return nil, fmt.Errorf("error parsing crypto %q: %w", path, err)
} }
return crypto, nil return crypto, nil

View file

@ -1080,7 +1080,6 @@ internal : yes
type : skcipher type : skcipher
async : yes async : yes
blocksize : 1 blocksize : 1
min keysize : 16
max keysize : 32 max keysize : 32
ivsize : 16 ivsize : 16
chunksize : 16 chunksize : 16
@ -1839,6 +1838,7 @@ min keysize : 16
max keysize : 32 max keysize : 32
Mode: 444 Mode: 444
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/diskstats Path: fixtures/proc/diskstats
Lines: 52 Lines: 52
@ -2325,6 +2325,312 @@ Mode: 644
Path: fixtures/proc/self Path: fixtures/proc/self
SymlinkTo: 26231 SymlinkTo: 26231
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/slabinfo
Lines: 302
slabinfo - version: 2.1
# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab> : tunables <limit> <batchcount> <sharedfactor> : slabdata <active_slabs> <num_slabs> <sharedavail>
pid_3 375 532 576 28 4 : tunables 0 0 0 : slabdata 19 19 0
pid_2 3 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
nvidia_p2p_page_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
nvidia_pte_cache 9022 9152 368 22 2 : tunables 0 0 0 : slabdata 416 416 0
nvidia_stack_cache 321 326 12624 2 8 : tunables 0 0 0 : slabdata 163 163 0
kvm_async_pf 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0
kvm_vcpu 0 0 15552 2 8 : tunables 0 0 0 : slabdata 0 0 0
kvm_mmu_page_header 0 0 504 32 4 : tunables 0 0 0 : slabdata 0 0 0
pte_list_desc 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
x86_emulator 0 0 3024 10 8 : tunables 0 0 0 : slabdata 0 0 0
x86_fpu 0 0 4608 7 8 : tunables 0 0 0 : slabdata 0 0 0
iwl_cmd_pool:0000:04:00.0 0 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0
ext4_groupinfo_4k 3719 3740 480 34 4 : tunables 0 0 0 : slabdata 110 110 0
bio-6 32 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0
bio-5 16 48 1344 24 8 : tunables 0 0 0 : slabdata 2 2 0
bio-4 17 92 1408 23 8 : tunables 0 0 0 : slabdata 4 4 0
fat_inode_cache 0 0 1056 31 8 : tunables 0 0 0 : slabdata 0 0 0
fat_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
ovl_aio_req 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
ovl_inode 0 0 1000 32 8 : tunables 0 0 0 : slabdata 0 0 0
squashfs_inode_cache 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0
fuse_request 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0
fuse_inode 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0
xfs_dqtrx 0 0 864 37 8 : tunables 0 0 0 : slabdata 0 0 0
xfs_dquot 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0
xfs_buf 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_bui_item 0 0 544 30 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_bud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_cui_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_cud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_rui_item 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0
xfs_rud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_icr 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_ili 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_inode 0 0 1344 24 8 : tunables 0 0 0 : slabdata 0 0 0
xfs_efi_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_efd_item 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_buf_item 0 0 608 26 4 : tunables 0 0 0 : slabdata 0 0 0
xf_trans 0 0 568 28 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_ifork 0 0 376 21 2 : tunables 0 0 0 : slabdata 0 0 0
xfs_da_state 0 0 816 20 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_btree_cur 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_bmap_free_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
xfs_log_ticket 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0
nfs_direct_cache 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0
nfs_commit_data 4 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0
nfs_write_data 32 50 1280 25 8 : tunables 0 0 0 : slabdata 2 2 0
nfs_read_data 0 0 1280 25 8 : tunables 0 0 0 : slabdata 0 0 0
nfs_inode_cache 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0
nfs_page 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
rpc_inode_cache 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0
rpc_buffers 8 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0
rpc_tasks 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0
fscache_cookie_jar 1 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0
jfs_mp 32 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0
jfs_ip 0 0 1592 20 8 : tunables 0 0 0 : slabdata 0 0 0
reiser_inode_cache 0 0 1096 29 8 : tunables 0 0 0 : slabdata 0 0 0
btrfs_end_io_wq 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_prelim_ref 0 0 424 38 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_delayed_extent_op 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
btrfs_delayed_data_ref 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_delayed_tree_ref 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_delayed_ref_head 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_inode_defrag 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
btrfs_delayed_node 0 0 648 25 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_ordered_extent 0 0 752 21 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_extent_map 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_extent_state 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
bio-3 35 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0
btrfs_extent_buffer 0 0 600 27 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_free_space_bitmap 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0
btrfs_free_space 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_path 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_trans_handle 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_inode 0 0 1496 21 8 : tunables 0 0 0 : slabdata 0 0 0
ext4_inode_cache 84136 84755 1400 23 8 : tunables 0 0 0 : slabdata 3685 3685 0
ext4_free_data 22 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0
ext4_allocation_context 0 70 464 35 4 : tunables 0 0 0 : slabdata 2 2 0
ext4_prealloc_space 24 74 440 37 4 : tunables 0 0 0 : slabdata 2 2 0
ext4_system_zone 267 273 376 21 2 : tunables 0 0 0 : slabdata 13 13 0
ext4_io_end_vec 0 88 368 22 2 : tunables 0 0 0 : slabdata 4 4 0
ext4_io_end 0 80 400 20 2 : tunables 0 0 0 : slabdata 4 4 0
ext4_bio_post_read_ctx 128 147 384 21 2 : tunables 0 0 0 : slabdata 7 7 0
ext4_pending_reservation 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
ext4_extent_status 79351 79422 376 21 2 : tunables 0 0 0 : slabdata 3782 3782 0
jbd2_transaction_s 44 100 640 25 4 : tunables 0 0 0 : slabdata 4 4 0
jbd2_inode 6785 6840 400 20 2 : tunables 0 0 0 : slabdata 342 342 0
jbd2_journal_handle 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0
jbd2_journal_head 824 1944 448 36 4 : tunables 0 0 0 : slabdata 54 54 0
jbd2_revoke_table_s 4 23 352 23 2 : tunables 0 0 0 : slabdata 1 1 0
jbd2_revoke_record_s 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0
ext2_inode_cache 0 0 1144 28 8 : tunables 0 0 0 : slabdata 0 0 0
mbcache 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0
dm_thin_new_mapping 0 152 424 38 4 : tunables 0 0 0 : slabdata 4 4 0
dm_snap_pending_exception 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0
dm_exception 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
dm_dirty_log_flush_entry 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
dm_bio_prison_cell_v2 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0
dm_bio_prison_cell 0 148 432 37 4 : tunables 0 0 0 : slabdata 4 4 0
kcopyd_job 0 8 3648 8 8 : tunables 0 0 0 : slabdata 1 1 0
io 0 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0
dm_uevent 0 0 3224 10 8 : tunables 0 0 0 : slabdata 0 0 0
dax_cache 1 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0
aic94xx_ascb 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
aic94xx_dma_token 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0
asd_sas_event 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
sas_task 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0
qla2xxx_srbs 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0
sd_ext_cdb 2 22 368 22 2 : tunables 0 0 0 : slabdata 1 1 0
scsi_sense_cache 258 288 512 32 4 : tunables 0 0 0 : slabdata 9 9 0
virtio_scsi_cmd 64 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0
L2TP/IPv6 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0
L2TP/IP 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0
ip6-frags 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0
fib6_nodes 5 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0
ip6_dst_cache 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0
ip6_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
PINGv6 0 0 1600 20 8 : tunables 0 0 0 : slabdata 0 0 0
RAWv6 25 40 1600 20 8 : tunables 0 0 0 : slabdata 2 2 0
UDPLITEv6 0 0 1728 18 8 : tunables 0 0 0 : slabdata 0 0 0
UDPv6 3 54 1728 18 8 : tunables 0 0 0 : slabdata 3 3 0
tw_sock_TCPv6 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
request_sock_TCPv6 0 0 632 25 4 : tunables 0 0 0 : slabdata 0 0 0
TCPv6 0 33 2752 11 8 : tunables 0 0 0 : slabdata 3 3 0
uhci_urb_priv 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0
sgpool-128 2 14 4544 7 8 : tunables 0 0 0 : slabdata 2 2 0
sgpool-64 2 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0
sgpool-32 2 44 1472 22 8 : tunables 0 0 0 : slabdata 2 2 0
sgpool-16 2 68 960 34 8 : tunables 0 0 0 : slabdata 2 2 0
sgpool-8 2 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0
btree_node 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
bfq_io_cq 0 0 488 33 4 : tunables 0 0 0 : slabdata 0 0 0
bfq_queue 0 0 848 38 8 : tunables 0 0 0 : slabdata 0 0 0
mqueue_inode_cache 1 24 1344 24 8 : tunables 0 0 0 : slabdata 1 1 0
isofs_inode_cache 0 0 968 33 8 : tunables 0 0 0 : slabdata 0 0 0
io_kiocb 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0
kioctx 0 30 1088 30 8 : tunables 0 0 0 : slabdata 1 1 0
aio_kiocb 0 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
userfaultfd_ctx_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
fanotify_path_event 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0
fanotify_fid_event 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
fsnotify_mark 0 0 408 20 2 : tunables 0 0 0 : slabdata 0 0 0
dnotify_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
dnotify_struct 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
dio 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0
bio-2 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0
fasync_cache 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0
audit_tree_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
pid_namespace 30 34 480 34 4 : tunables 0 0 0 : slabdata 1 1 0
posix_timers_cache 0 27 592 27 4 : tunables 0 0 0 : slabdata 1 1 0
iommu_devinfo 24 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0
iommu_domain 10 10 3264 10 8 : tunables 0 0 0 : slabdata 1 1 0
iommu_iova 8682 8748 448 36 4 : tunables 0 0 0 : slabdata 243 243 0
UNIX 529 814 1472 22 8 : tunables 0 0 0 : slabdata 37 37 0
ip4-frags 0 0 536 30 4 : tunables 0 0 0 : slabdata 0 0 0
ip_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
UDP-Lite 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0
tcp_bind_bucket 7 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0
inet_peer_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
xfrm_dst_cache 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0
xfrm_state 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0
ip_fib_trie 7 21 384 21 2 : tunables 0 0 0 : slabdata 1 1 0
ip_fib_alias 9 20 392 20 2 : tunables 0 0 0 : slabdata 1 1 0
ip_dst_cache 27 84 576 28 4 : tunables 0 0 0 : slabdata 3 3 0
PING 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0
RAW 32 46 1408 23 8 : tunables 0 0 0 : slabdata 2 2 0
UDP 11 168 1536 21 8 : tunables 0 0 0 : slabdata 8 8 0
tw_sock_TCP 1 56 576 28 4 : tunables 0 0 0 : slabdata 2 2 0
request_sock_TCP 0 25 632 25 4 : tunables 0 0 0 : slabdata 1 1 0
TCP 10 60 2624 12 8 : tunables 0 0 0 : slabdata 5 5 0
hugetlbfs_inode_cache 2 35 928 35 8 : tunables 0 0 0 : slabdata 1 1 0
dquot 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0
bio-1 32 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0
eventpoll_pwq 409 600 408 20 2 : tunables 0 0 0 : slabdata 30 30 0
eventpoll_epi 408 672 576 28 4 : tunables 0 0 0 : slabdata 24 24 0
inotify_inode_mark 58 195 416 39 4 : tunables 0 0 0 : slabdata 5 5 0
scsi_data_buffer 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0
bio_crypt_ctx 128 147 376 21 2 : tunables 0 0 0 : slabdata 7 7 0
request_queue 29 39 2408 13 8 : tunables 0 0 0 : slabdata 3 3 0
blkdev_ioc 81 148 440 37 4 : tunables 0 0 0 : slabdata 4 4 0
bio-0 125 200 640 25 4 : tunables 0 0 0 : slabdata 8 8 0
biovec-max 166 196 4544 7 8 : tunables 0 0 0 : slabdata 28 28 0
biovec-128 0 52 2496 13 8 : tunables 0 0 0 : slabdata 4 4 0
biovec-64 0 88 1472 22 8 : tunables 0 0 0 : slabdata 4 4 0
biovec-16 0 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0
bio_integrity_payload 4 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
khugepaged_mm_slot 59 180 448 36 4 : tunables 0 0 0 : slabdata 5 5 0
ksm_mm_slot 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0
ksm_stable_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
ksm_rmap_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
user_namespace 2 37 864 37 8 : tunables 0 0 0 : slabdata 1 1 0
uid_cache 5 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
dmaengine-unmap-256 1 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0
dmaengine-unmap-128 1 22 1472 22 8 : tunables 0 0 0 : slabdata 1 1 0
dmaengine-unmap-16 1 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
dmaengine-unmap-2 1 36 448 36 4 : tunables 0 0 0 : slabdata 1 1 0
audit_buffer 0 22 360 22 2 : tunables 0 0 0 : slabdata 1 1 0
sock_inode_cache 663 1170 1216 26 8 : tunables 0 0 0 : slabdata 45 45 0
skbuff_ext_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
skbuff_fclone_cache 1 72 896 36 8 : tunables 0 0 0 : slabdata 2 2 0
skbuff_head_cache 3 650 640 25 4 : tunables 0 0 0 : slabdata 26 26 0
configfs_dir_cache 7 38 424 38 4 : tunables 0 0 0 : slabdata 1 1 0
file_lock_cache 27 116 552 29 4 : tunables 0 0 0 : slabdata 4 4 0
file_lock_ctx 106 120 392 20 2 : tunables 0 0 0 : slabdata 6 6 0
fsnotify_mark_connector 52 66 368 22 2 : tunables 0 0 0 : slabdata 3 3 0
net_namespace 1 6 5312 6 8 : tunables 0 0 0 : slabdata 1 1 0
task_delay_info 784 1560 416 39 4 : tunables 0 0 0 : slabdata 40 40 0
taskstats 45 92 688 23 4 : tunables 0 0 0 : slabdata 4 4 0
proc_dir_entry 678 682 528 31 4 : tunables 0 0 0 : slabdata 22 22 0
pde_opener 0 189 376 21 2 : tunables 0 0 0 : slabdata 9 9 0
proc_inode_cache 7150 8250 992 33 8 : tunables 0 0 0 : slabdata 250 250 0
seq_file 60 735 456 35 4 : tunables 0 0 0 : slabdata 21 21 0
sigqueue 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0
bdev_cache 36 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0
shmem_inode_cache 1599 2208 1016 32 8 : tunables 0 0 0 : slabdata 69 69 0
kernfs_iattrs_cache 1251 1254 424 38 4 : tunables 0 0 0 : slabdata 33 33 0
kernfs_node_cache 52898 52920 464 35 4 : tunables 0 0 0 : slabdata 1512 1512 0
mnt_cache 42 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0
filp 4314 6371 704 23 4 : tunables 0 0 0 : slabdata 277 277 0
inode_cache 28695 29505 920 35 8 : tunables 0 0 0 : slabdata 843 843 0
dentry 166069 169074 528 31 4 : tunables 0 0 0 : slabdata 5454 5454 0
names_cache 0 35 4544 7 8 : tunables 0 0 0 : slabdata 5 5 0
hashtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0
ebitmap_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
avtab_extended_perms 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
avtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0
avc_xperms_data 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
avc_xperms_decision_node 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0
avc_xperms_node 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0
avc_node 37 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0
iint_cache 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0
lsm_inode_cache 122284 122340 392 20 2 : tunables 0 0 0 : slabdata 6117 6117 0
lsm_file_cache 4266 4485 352 23 2 : tunables 0 0 0 : slabdata 195 195 0
key_jar 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0
buffer_head 255622 257076 440 37 4 : tunables 0 0 0 : slabdata 6948 6948 0
uts_namespace 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0
nsproxy 31 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0
vm_area_struct 39115 43214 528 31 4 : tunables 0 0 0 : slabdata 1394 1394 0
mm_struct 96 529 1408 23 8 : tunables 0 0 0 : slabdata 23 23 0
fs_cache 102 756 448 36 4 : tunables 0 0 0 : slabdata 21 21 0
files_cache 102 588 1152 28 8 : tunables 0 0 0 : slabdata 21 21 0
signal_cache 266 672 1536 21 8 : tunables 0 0 0 : slabdata 32 32 0
sighand_cache 266 507 2496 13 8 : tunables 0 0 0 : slabdata 39 39 0
task_struct 783 963 10240 3 8 : tunables 0 0 0 : slabdata 321 321 0
cred_jar 364 952 576 28 4 : tunables 0 0 0 : slabdata 34 34 0
anon_vma_chain 63907 67821 416 39 4 : tunables 0 0 0 : slabdata 1739 1739 0
anon_vma 25891 28899 416 39 4 : tunables 0 0 0 : slabdata 741 741 0
pid 408 992 512 32 4 : tunables 0 0 0 : slabdata 31 31 0
Acpi-Operand 6682 6740 408 20 2 : tunables 0 0 0 : slabdata 337 337 0
Acpi-ParseExt 0 39 416 39 4 : tunables 0 0 0 : slabdata 1 1 0
Acpi-Parse 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0
Acpi-State 0 78 416 39 4 : tunables 0 0 0 : slabdata 2 2 0
Acpi-Namespace 3911 3948 384 21 2 : tunables 0 0 0 : slabdata 188 188 0
trace_event_file 2638 2660 424 38 4 : tunables 0 0 0 : slabdata 70 70 0
ftrace_event_field 6592 6594 384 21 2 : tunables 0 0 0 : slabdata 314 314 0
pool_workqueue 41 64 1024 32 8 : tunables 0 0 0 : slabdata 2 2 0
radix_tree_node 21638 24045 912 35 8 : tunables 0 0 0 : slabdata 687 687 0
task_group 48 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0
vmap_area 4411 4680 400 20 2 : tunables 0 0 0 : slabdata 234 234 0
dma-kmalloc-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-128 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-64 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-96 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-128 31 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0
kmalloc-rcl-96 3371 3626 432 37 4 : tunables 0 0 0 : slabdata 98 98 0
kmalloc-rcl-64 2080 2272 512 32 4 : tunables 0 0 0 : slabdata 71 71 0
kmalloc-rcl-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-8k 133 140 24576 1 8 : tunables 0 0 0 : slabdata 140 140 0
kmalloc-4k 403 444 12288 2 8 : tunables 0 0 0 : slabdata 222 222 0
kmalloc-2k 2391 2585 6144 5 8 : tunables 0 0 0 : slabdata 517 517 0
kmalloc-1k 2163 2420 3072 10 8 : tunables 0 0 0 : slabdata 242 242 0
kmalloc-512 2972 3633 1536 21 8 : tunables 0 0 0 : slabdata 173 173 0
kmalloc-256 1841 1856 1024 32 8 : tunables 0 0 0 : slabdata 58 58 0
kmalloc-192 2165 2914 528 31 4 : tunables 0 0 0 : slabdata 94 94 0
kmalloc-128 1137 1175 640 25 4 : tunables 0 0 0 : slabdata 47 47 0
kmalloc-96 1925 2590 432 37 4 : tunables 0 0 0 : slabdata 70 70 0
kmalloc-64 9433 10688 512 32 4 : tunables 0 0 0 : slabdata 334 334 0
kmalloc-32 9098 10062 416 39 4 : tunables 0 0 0 : slabdata 258 258 0
kmalloc-16 10914 10956 368 22 2 : tunables 0 0 0 : slabdata 498 498 0
kmalloc-8 7576 7705 344 23 2 : tunables 0 0 0 : slabdata 335 335 0
kmem_cache_node 904 928 512 32 4 : tunables 0 0 0 : slabdata 29 29 0
kmem_cache 904 936 832 39 8 : tunables 0 0 0 : slabdata 24 24 0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/stat Path: fixtures/proc/stat
Lines: 16 Lines: 16
cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 cpu 301854 612 111922 8979004 3552 2 3944 0 0 0

View file

@ -236,7 +236,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
m, err := parseFscacheinfo(bytes.NewReader(b)) m, err := parseFscacheinfo(bytes.NewReader(b))
if err != nil { if err != nil {
return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %v", err) return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %w", err)
} }
return *m, nil return *m, nil

View file

@ -1,9 +1,9 @@
module github.com/prometheus/procfs module github.com/prometheus/procfs
go 1.12 go 1.13
require ( require (
github.com/google/go-cmp v0.3.1 github.com/google/go-cmp v0.5.4
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
) )

View file

@ -1,6 +1,8 @@
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e h1:LwyF2AFISC9nVbS6MgzsaQNSUsRXI49GS+YQ5KX/QH0= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View file

@ -39,10 +39,10 @@ type FS string
func NewFS(mountPoint string) (FS, error) { func NewFS(mountPoint string) (FS, error) {
info, err := os.Stat(mountPoint) info, err := os.Stat(mountPoint)
if err != nil { if err != nil {
return "", fmt.Errorf("could not read %s: %s", mountPoint, err) return "", fmt.Errorf("could not read %q: %w", mountPoint, err)
} }
if !info.IsDir() { if !info.IsDir() {
return "", fmt.Errorf("mount point %s is not a directory", mountPoint) return "", fmt.Errorf("mount point %q is not a directory", mountPoint)
} }
return FS(mountPoint), nil return FS(mountPoint), nil

View file

@ -44,14 +44,14 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
loads := make([]float64, 3) loads := make([]float64, 3)
parts := strings.Fields(string(loadavgBytes)) parts := strings.Fields(string(loadavgBytes))
if len(parts) < 3 { if len(parts) < 3 {
return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %s", string(loadavgBytes)) return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %q", string(loadavgBytes))
} }
var err error var err error
for i, load := range parts[0:3] { for i, load := range parts[0:3] {
loads[i], err = strconv.ParseFloat(load, 64) loads[i], err = strconv.ParseFloat(load, 64)
if err != nil { if err != nil {
return nil, fmt.Errorf("could not parse load '%s': %s", load, err) return nil, fmt.Errorf("could not parse load %q: %w", load, err)
} }
} }
return &LoadAvg{ return &LoadAvg{

View file

@ -59,7 +59,7 @@ func (fs FS) MDStat() ([]MDStat, error) {
} }
mdstat, err := parseMDStat(data) mdstat, err := parseMDStat(data)
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err) return nil, fmt.Errorf("error parsing mdstat %q: %w", fs.proc.Path("mdstat"), err)
} }
return mdstat, nil return mdstat, nil
} }
@ -85,10 +85,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
state := deviceFields[2] // active or inactive state := deviceFields[2] // active or inactive
if len(lines) <= i+3 { if len(lines) <= i+3 {
return nil, fmt.Errorf( return nil, fmt.Errorf("error parsing %q: too few lines for md device", mdName)
"error parsing %s: too few lines for md device",
mdName,
)
} }
// Failed disks have the suffix (F) & Spare disks have the suffix (S). // Failed disks have the suffix (F) & Spare disks have the suffix (S).
@ -97,7 +94,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
active, total, size, err := evalStatusLine(lines[i], lines[i+1]) active, total, size, err := evalStatusLine(lines[i], lines[i+1])
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing md device lines: %s", err) return nil, fmt.Errorf("error parsing md device lines: %w", err)
} }
syncLineIdx := i + 2 syncLineIdx := i + 2
@ -129,7 +126,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
} else { } else {
syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx]) syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx])
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing sync line in md device %s: %s", mdName, err) return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err)
} }
} }
} }
@ -155,7 +152,7 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, e
sizeStr := strings.Fields(statusLine)[0] sizeStr := strings.Fields(statusLine)[0]
size, err = strconv.ParseInt(sizeStr, 10, 64) size, err = strconv.ParseInt(sizeStr, 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
} }
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
@ -175,12 +172,12 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, e
total, err = strconv.ParseInt(matches[2], 10, 64) total, err = strconv.ParseInt(matches[2], 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
} }
active, err = strconv.ParseInt(matches[3], 10, 64) active, err = strconv.ParseInt(matches[3], 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
} }
return active, total, size, nil return active, total, size, nil
@ -194,7 +191,7 @@ func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, err error) {
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
if err != nil { if err != nil {
return 0, fmt.Errorf("%s in recoveryLine: %s", err, recoveryLine) return 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err)
} }
return syncedBlocks, nil return syncedBlocks, nil

View file

@ -152,7 +152,7 @@ func (fs FS) Meminfo() (Meminfo, error) {
m, err := parseMemInfo(bytes.NewReader(b)) m, err := parseMemInfo(bytes.NewReader(b))
if err != nil { if err != nil {
return Meminfo{}, fmt.Errorf("failed to parse meminfo: %v", err) return Meminfo{}, fmt.Errorf("failed to parse meminfo: %w", err)
} }
return *m, nil return *m, nil

View file

@ -55,7 +55,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
stat, err := parseConntrackStat(bytes.NewReader(b)) stat, err := parseConntrackStat(bytes.NewReader(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to read conntrack stats from %q: %v", path, err) return nil, fmt.Errorf("failed to read conntrack stats from %q: %w", path, err)
} }
return stat, nil return stat, nil
@ -147,7 +147,7 @@ func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
func parseConntrackStatField(field string) (uint64, error) { func parseConntrackStatField(field string) (uint64, error) {
val, err := strconv.ParseUint(field, 16, 64) val, err := strconv.ParseUint(field, 16, 64)
if err != nil { if err != nil {
return 0, fmt.Errorf("couldn't parse \"%s\" field: %s", field, err) return 0, fmt.Errorf("couldn't parse %q field: %w", field, err)
} }
return val, err return val, err
} }

View file

@ -129,8 +129,7 @@ func parseIP(hexIP string) (net.IP, error) {
var byteIP []byte var byteIP []byte
byteIP, err := hex.DecodeString(hexIP) byteIP, err := hex.DecodeString(hexIP)
if err != nil { if err != nil {
return nil, fmt.Errorf( return nil, fmt.Errorf("cannot parse address field in socket line %q", hexIP)
"cannot parse address field in socket line: %s", hexIP)
} }
switch len(byteIP) { switch len(byteIP) {
case 4: case 4:
@ -153,7 +152,7 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
line := &netIPSocketLine{} line := &netIPSocketLine{}
if len(fields) < 8 { if len(fields) < 8 {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"cannot parse net socket line as it has less then 8 columns: %s", "cannot parse net socket line as it has less then 8 columns %q",
strings.Join(fields, " "), strings.Join(fields, " "),
) )
} }
@ -162,66 +161,59 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
// sl // sl
s := strings.Split(fields[0], ":") s := strings.Split(fields[0], ":")
if len(s) != 2 { if len(s) != 2 {
return nil, fmt.Errorf( return nil, fmt.Errorf("cannot parse sl field in socket line %q", fields[0])
"cannot parse sl field in socket line: %s", fields[0])
} }
if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
return nil, fmt.Errorf("cannot parse sl value in socket line: %s", err) return nil, fmt.Errorf("cannot parse sl value in socket line: %w", err)
} }
// local_address // local_address
l := strings.Split(fields[1], ":") l := strings.Split(fields[1], ":")
if len(l) != 2 { if len(l) != 2 {
return nil, fmt.Errorf( return nil, fmt.Errorf("cannot parse local_address field in socket line %q", fields[1])
"cannot parse local_address field in socket line: %s", fields[1])
} }
if line.LocalAddr, err = parseIP(l[0]); err != nil { if line.LocalAddr, err = parseIP(l[0]); err != nil {
return nil, err return nil, err
} }
if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
return nil, fmt.Errorf( return nil, fmt.Errorf("cannot parse local_address port value in socket line: %w", err)
"cannot parse local_address port value in socket line: %s", err)
} }
// remote_address // remote_address
r := strings.Split(fields[2], ":") r := strings.Split(fields[2], ":")
if len(r) != 2 { if len(r) != 2 {
return nil, fmt.Errorf( return nil, fmt.Errorf("cannot parse rem_address field in socket line %q", fields[1])
"cannot parse rem_address field in socket line: %s", fields[1])
} }
if line.RemAddr, err = parseIP(r[0]); err != nil { if line.RemAddr, err = parseIP(r[0]); err != nil {
return nil, err return nil, err
} }
if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
return nil, fmt.Errorf( return nil, fmt.Errorf("cannot parse rem_address port value in socket line: %w", err)
"cannot parse rem_address port value in socket line: %s", err)
} }
// st // st
if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
return nil, fmt.Errorf( return nil, fmt.Errorf("cannot parse st value in socket line: %w", err)
"cannot parse st value in socket line: %s", err)
} }
// tx_queue and rx_queue // tx_queue and rx_queue
q := strings.Split(fields[4], ":") q := strings.Split(fields[4], ":")
if len(q) != 2 { if len(q) != 2 {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"cannot parse tx/rx queues in socket line as it has a missing colon: %s", "cannot parse tx/rx queues in socket line as it has a missing colon %q",
fields[4], fields[4],
) )
} }
if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse tx_queue value in socket line: %s", err) return nil, fmt.Errorf("cannot parse tx_queue value in socket line: %w", err)
} }
if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse rx_queue value in socket line: %s", err) return nil, fmt.Errorf("cannot parse rx_queue value in socket line: %w", err)
} }
// uid // uid
if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
return nil, fmt.Errorf( return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err)
"cannot parse uid value in socket line: %s", err)
} }
return line, nil return line, nil

View file

@ -70,7 +70,7 @@ func readSockstat(name string) (*NetSockstat, error) {
stat, err := parseSockstat(bytes.NewReader(b)) stat, err := parseSockstat(bytes.NewReader(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to read sockstats from %q: %v", name, err) return nil, fmt.Errorf("failed to read sockstats from %q: %w", name, err)
} }
return stat, nil return stat, nil
@ -90,7 +90,7 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) {
// The remaining fields are key/value pairs. // The remaining fields are key/value pairs.
kvs, err := parseSockstatKVs(fields[1:]) kvs, err := parseSockstatKVs(fields[1:])
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %v", s.Text(), err) return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %w", s.Text(), err)
} }
// The first field is the protocol. We must trim its colon suffix. // The first field is the protocol. We must trim its colon suffix.

View file

@ -51,7 +51,7 @@ func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
entries, err := parseSoftnet(bytes.NewReader(b)) entries, err := parseSoftnet(bytes.NewReader(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %v", err) return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %w", err)
} }
return entries, nil return entries, nil

View file

@ -108,14 +108,14 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
line := s.Text() line := s.Text()
item, err := nu.parseLine(line, hasInode, minFields) item, err := nu.parseLine(line, hasInode, minFields)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %v", line, err) return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %w", line, err)
} }
nu.Rows = append(nu.Rows, item) nu.Rows = append(nu.Rows, item)
} }
if err := s.Err(); err != nil { if err := s.Err(); err != nil {
return nil, fmt.Errorf("failed to scan /proc/net/unix data: %v", err) return nil, fmt.Errorf("failed to scan /proc/net/unix data: %w", err)
} }
return &nu, nil return &nu, nil
@ -136,29 +136,29 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
users, err := u.parseUsers(fields[1]) users, err := u.parseUsers(fields[1])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse ref count(%s): %v", fields[1], err) return nil, fmt.Errorf("failed to parse ref count %q: %w", fields[1], err)
} }
flags, err := u.parseFlags(fields[3]) flags, err := u.parseFlags(fields[3])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse flags(%s): %v", fields[3], err) return nil, fmt.Errorf("failed to parse flags %q: %w", fields[3], err)
} }
typ, err := u.parseType(fields[4]) typ, err := u.parseType(fields[4])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse type(%s): %v", fields[4], err) return nil, fmt.Errorf("failed to parse type %q: %w", fields[4], err)
} }
state, err := u.parseState(fields[5]) state, err := u.parseState(fields[5])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse state(%s): %v", fields[5], err) return nil, fmt.Errorf("failed to parse state %q: %w", fields[5], err)
} }
var inode uint64 var inode uint64
if hasInode { if hasInode {
inode, err = u.parseInode(fields[6]) inode, err = u.parseInode(fields[6])
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse inode(%s): %v", fields[6], err) return nil, fmt.Errorf("failed to parse inode %q: %w", fields[6], err)
} }
} }

View file

@ -105,7 +105,7 @@ func (fs FS) AllProcs() (Procs, error) {
names, err := d.Readdirnames(-1) names, err := d.Readdirnames(-1)
if err != nil { if err != nil {
return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err) return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err)
} }
p := Procs{} p := Procs{}
@ -206,7 +206,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) {
for i, n := range names { for i, n := range names {
fd, err := strconv.ParseInt(n, 10, 32) fd, err := strconv.ParseInt(n, 10, 32)
if err != nil { if err != nil {
return nil, fmt.Errorf("could not parse fd %s: %s", n, err) return nil, fmt.Errorf("could not parse fd %q: %w", n, err)
} }
fds[i] = uintptr(fd) fds[i] = uintptr(fd)
} }
@ -278,7 +278,7 @@ func (p Proc) fileDescriptors() ([]string, error) {
names, err := d.Readdirnames(-1) names, err := d.Readdirnames(-1)
if err != nil { if err != nil {
return nil, fmt.Errorf("could not read %s: %s", d.Name(), err) return nil, fmt.Errorf("could not read %q: %w", d.Name(), err)
} }
return names, nil return names, nil

View file

@ -16,7 +16,7 @@ package procfs
import ( import (
"bufio" "bufio"
"bytes" "bytes"
"errors" "fmt"
"regexp" "regexp"
"github.com/prometheus/procfs/internal/util" "github.com/prometheus/procfs/internal/util"
@ -112,7 +112,7 @@ func parseInotifyInfo(line string) (*InotifyInfo, error) {
} }
return i, nil return i, nil
} }
return nil, errors.New("invalid inode entry: " + line) return nil, fmt.Errorf("invalid inode entry: %q", line)
} }
// ProcFDInfos represents a list of ProcFDInfo structs. // ProcFDInfos represents a list of ProcFDInfo structs.

View file

@ -103,8 +103,7 @@ func (p Proc) Limits() (ProcLimits, error) {
//fields := limitsMatch.Split(s.Text(), limitsFields) //fields := limitsMatch.Split(s.Text(), limitsFields)
fields := limitsMatch.FindStringSubmatch(s.Text()) fields := limitsMatch.FindStringSubmatch(s.Text())
if len(fields) != limitsFields { if len(fields) != limitsFields {
return ProcLimits{}, fmt.Errorf( return ProcLimits{}, fmt.Errorf("couldn't parse %q line %q", f.Name(), s.Text())
"couldn't parse %s line %s", f.Name(), s.Text())
} }
switch fields[1] { switch fields[1] {
@ -155,7 +154,7 @@ func parseUint(s string) (uint64, error) {
} }
i, err := strconv.ParseUint(s, 10, 64) i, err := strconv.ParseUint(s, 10, 64)
if err != nil { if err != nil {
return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) return 0, fmt.Errorf("couldn't parse value %q: %w", s, err)
} }
return i, nil return i, nil
} }

View file

@ -40,7 +40,7 @@ func (p Proc) Namespaces() (Namespaces, error) {
names, err := d.Readdirnames(-1) names, err := d.Readdirnames(-1)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to read contents of ns dir: %v", err) return nil, fmt.Errorf("failed to read contents of ns dir: %w", err)
} }
ns := make(Namespaces, len(names)) ns := make(Namespaces, len(names))
@ -52,13 +52,13 @@ func (p Proc) Namespaces() (Namespaces, error) {
fields := strings.SplitN(target, ":", 2) fields := strings.SplitN(target, ":", 2)
if len(fields) != 2 { if len(fields) != 2 {
return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target) return nil, fmt.Errorf("failed to parse namespace type and inode from %q", target)
} }
typ := fields[0] typ := fields[0]
inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err) return nil, fmt.Errorf("failed to parse inode from %q: %w", fields[1], err)
} }
ns[name] = Namespace{typ, uint32(inode)} ns[name] = Namespace{typ, uint32(inode)}

View file

@ -59,7 +59,7 @@ type PSIStats struct {
func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
if err != nil { if err != nil {
return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %s", resource) return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %q: %w", resource, err)
} }
return parsePSIStats(resource, bytes.NewReader(data)) return parsePSIStats(resource, bytes.NewReader(data))

View file

@ -127,10 +127,7 @@ func (p Proc) Stat() (ProcStat, error) {
) )
if l < 0 || r < 0 { if l < 0 || r < 0 {
return ProcStat{}, fmt.Errorf( return ProcStat{}, fmt.Errorf("unexpected format, couldn't extract comm %q", data)
"unexpected format, couldn't extract comm: %s",
data,
)
} }
s.Comm = string(data[l+1 : r]) s.Comm = string(data[l+1 : r])

View file

@ -95,24 +95,27 @@ func (fs FS) Schedstat() (*Schedstat, error) {
return stats, nil return stats, nil
} }
func parseProcSchedstat(contents string) (stats ProcSchedstat, err error) { func parseProcSchedstat(contents string) (ProcSchedstat, error) {
var (
stats ProcSchedstat
err error
)
match := procLineRE.FindStringSubmatch(contents) match := procLineRE.FindStringSubmatch(contents)
if match != nil { if match != nil {
stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64) stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64)
if err != nil { if err != nil {
return return stats, err
} }
stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64) stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64)
if err != nil { if err != nil {
return return stats, err
} }
stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64) stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64)
return return stats, err
} }
err = errors.New("could not parse schedstat") return stats, errors.New("could not parse schedstat")
return
} }

151
vendor/github.com/prometheus/procfs/slab.go generated vendored Normal file
View file

@ -0,0 +1,151 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"regexp"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
var (
slabSpace = regexp.MustCompile(`\s+`)
slabVer = regexp.MustCompile(`slabinfo -`)
slabHeader = regexp.MustCompile(`# name`)
)
// Slab represents a slab pool in the kernel.
type Slab struct {
Name string
ObjActive int64
ObjNum int64
ObjSize int64
ObjPerSlab int64
PagesPerSlab int64
// tunables
Limit int64
Batch int64
SharedFactor int64
SlabActive int64
SlabNum int64
SharedAvail int64
}
// SlabInfo represents info for all slabs.
type SlabInfo struct {
Slabs []*Slab
}
func shouldParseSlab(line string) bool {
if slabVer.MatchString(line) {
return false
}
if slabHeader.MatchString(line) {
return false
}
return true
}
// parseV21SlabEntry is used to parse a line from /proc/slabinfo version 2.1.
func parseV21SlabEntry(line string) (*Slab, error) {
// First cleanup whitespace.
l := slabSpace.ReplaceAllString(line, " ")
s := strings.Split(l, " ")
if len(s) != 16 {
return nil, fmt.Errorf("unable to parse: %q", line)
}
var err error
i := &Slab{Name: s[0]}
i.ObjActive, err = strconv.ParseInt(s[1], 10, 64)
if err != nil {
return nil, err
}
i.ObjNum, err = strconv.ParseInt(s[2], 10, 64)
if err != nil {
return nil, err
}
i.ObjSize, err = strconv.ParseInt(s[3], 10, 64)
if err != nil {
return nil, err
}
i.ObjPerSlab, err = strconv.ParseInt(s[4], 10, 64)
if err != nil {
return nil, err
}
i.PagesPerSlab, err = strconv.ParseInt(s[5], 10, 64)
if err != nil {
return nil, err
}
i.Limit, err = strconv.ParseInt(s[8], 10, 64)
if err != nil {
return nil, err
}
i.Batch, err = strconv.ParseInt(s[9], 10, 64)
if err != nil {
return nil, err
}
i.SharedFactor, err = strconv.ParseInt(s[10], 10, 64)
if err != nil {
return nil, err
}
i.SlabActive, err = strconv.ParseInt(s[13], 10, 64)
if err != nil {
return nil, err
}
i.SlabNum, err = strconv.ParseInt(s[14], 10, 64)
if err != nil {
return nil, err
}
i.SharedAvail, err = strconv.ParseInt(s[15], 10, 64)
if err != nil {
return nil, err
}
return i, nil
}
// parseSlabInfo21 is used to parse a slabinfo 2.1 file.
func parseSlabInfo21(r *bytes.Reader) (SlabInfo, error) {
scanner := bufio.NewScanner(r)
s := SlabInfo{Slabs: []*Slab{}}
for scanner.Scan() {
line := scanner.Text()
if !shouldParseSlab(line) {
continue
}
slab, err := parseV21SlabEntry(line)
if err != nil {
return s, err
}
s.Slabs = append(s.Slabs, slab)
}
return s, nil
}
// SlabInfo reads data from /proc/slabinfo
func (fs FS) SlabInfo() (SlabInfo, error) {
// TODO: Consider passing options to allow for parsing different
// slabinfo versions. However, slabinfo 2.1 has been stable since
// kernel 2.6.10 and later.
data, err := util.ReadFileNoStat(fs.proc.Path("slabinfo"))
if err != nil {
return SlabInfo{}, err
}
return parseSlabInfo21(bytes.NewReader(data))
}

View file

@ -93,10 +93,10 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
&cpuStat.Guest, &cpuStat.GuestNice) &cpuStat.Guest, &cpuStat.GuestNice)
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err) return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): %w", line, err)
} }
if count == 0 { if count == 0 {
return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line) return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): 0 elements parsed", line)
} }
cpuStat.User /= userHZ cpuStat.User /= userHZ
@ -116,7 +116,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
if err != nil { if err != nil {
return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err) return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu/cpuid): %w", line, err)
} }
return cpuStat, cpuID, nil return cpuStat, cpuID, nil
@ -136,7 +136,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
&softIRQStat.Hrtimer, &softIRQStat.Rcu) &softIRQStat.Hrtimer, &softIRQStat.Rcu)
if err != nil { if err != nil {
return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err) return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %q (softirq): %w", line, err)
} }
return softIRQStat, total, nil return softIRQStat, total, nil
@ -184,34 +184,34 @@ func (fs FS) Stat() (Stat, error) {
switch { switch {
case parts[0] == "btime": case parts[0] == "btime":
if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err) return Stat{}, fmt.Errorf("couldn't parse %q (btime): %w", parts[1], err)
} }
case parts[0] == "intr": case parts[0] == "intr":
if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err) return Stat{}, fmt.Errorf("couldn't parse %q (intr): %w", parts[1], err)
} }
numberedIRQs := parts[2:] numberedIRQs := parts[2:]
stat.IRQ = make([]uint64, len(numberedIRQs)) stat.IRQ = make([]uint64, len(numberedIRQs))
for i, count := range numberedIRQs { for i, count := range numberedIRQs {
if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err) return Stat{}, fmt.Errorf("couldn't parse %q (intr%d): %w", count, i, err)
} }
} }
case parts[0] == "ctxt": case parts[0] == "ctxt":
if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err) return Stat{}, fmt.Errorf("couldn't parse %q (ctxt): %w", parts[1], err)
} }
case parts[0] == "processes": case parts[0] == "processes":
if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err) return Stat{}, fmt.Errorf("couldn't parse %q (processes): %w", parts[1], err)
} }
case parts[0] == "procs_running": case parts[0] == "procs_running":
if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err) return Stat{}, fmt.Errorf("couldn't parse %q (procs_running): %w", parts[1], err)
} }
case parts[0] == "procs_blocked": case parts[0] == "procs_blocked":
if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err) return Stat{}, fmt.Errorf("couldn't parse %q (procs_blocked): %w", parts[1], err)
} }
case parts[0] == "softirq": case parts[0] == "softirq":
softIRQStats, total, err := parseSoftIRQStat(line) softIRQStats, total, err := parseSoftIRQStat(line)
@ -237,7 +237,7 @@ func (fs FS) Stat() (Stat, error) {
} }
if err := scanner.Err(); err != nil { if err := scanner.Err(); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s: %s", fileName, err) return Stat{}, fmt.Errorf("couldn't parse %q: %w", fileName, err)
} }
return stat, nil return stat, nil

View file

@ -112,8 +112,7 @@ func (fs FS) NewXfrmStat() (XfrmStat, error) {
fields := strings.Fields(s.Text()) fields := strings.Fields(s.Text())
if len(fields) != 2 { if len(fields) != 2 {
return XfrmStat{}, fmt.Errorf( return XfrmStat{}, fmt.Errorf("couldn't parse %q line %q", file.Name(), s.Text())
"couldn't parse %s line %s", file.Name(), s.Text())
} }
name := fields[0] name := fields[0]

View file

@ -74,11 +74,11 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`)
func (fs FS) Zoneinfo() ([]Zoneinfo, error) { func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo")) data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo"))
if err != nil { if err != nil {
return nil, fmt.Errorf("error reading zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err) return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err)
} }
zoneinfo, err := parseZoneinfo(data) zoneinfo, err := parseZoneinfo(data)
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err) return nil, fmt.Errorf("error parsing zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err)
} }
return zoneinfo, nil return zoneinfo, nil
} }

View file

@ -16,6 +16,7 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/url"
"os" "os"
"path" "path"
"sort" "sort"
@ -334,7 +335,7 @@ func (cs awsCredentialSource) subjectToken() (string, error) {
if err != nil { if err != nil {
return "", err return "", err
} }
return string(result), nil return url.QueryEscape(string(result)), nil
} }
func (cs *awsCredentialSource) getRegion() (string, error) { func (cs *awsCredentialSource) getRegion() (string, error) {

View file

@ -96,7 +96,7 @@ func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) {
} else if c.CredentialSource.File != "" { } else if c.CredentialSource.File != "" {
return fileCredentialSource{File: c.CredentialSource.File, Format: c.CredentialSource.Format}, nil return fileCredentialSource{File: c.CredentialSource.File, Format: c.CredentialSource.Format}, nil
} else if c.CredentialSource.URL != "" { } else if c.CredentialSource.URL != "" {
return urlCredentialSource{URL: c.CredentialSource.URL, Format: c.CredentialSource.Format, ctx: ctx}, nil return urlCredentialSource{URL: c.CredentialSource.URL, Headers: c.CredentialSource.Headers, Format: c.CredentialSource.Format, ctx: ctx}, nil
} }
return nil, fmt.Errorf("oauth2/google: unable to parse credential source") return nil, fmt.Errorf("oauth2/google: unable to parse credential source")
} }

View file

@ -9,6 +9,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"strconv" "strconv"
@ -32,11 +33,13 @@ func ExchangeToken(ctx context.Context, endpoint string, request *STSTokenExchan
data.Set("subject_token_type", request.SubjectTokenType) data.Set("subject_token_type", request.SubjectTokenType)
data.Set("subject_token", request.SubjectToken) data.Set("subject_token", request.SubjectToken)
data.Set("scope", strings.Join(request.Scope, " ")) data.Set("scope", strings.Join(request.Scope, " "))
if options != nil {
opts, err := json.Marshal(options) opts, err := json.Marshal(options)
if err != nil { if err != nil {
return nil, fmt.Errorf("oauth2/google: failed to marshal additional options: %v", err) return nil, fmt.Errorf("oauth2/google: failed to marshal additional options: %v", err)
} }
data.Set("options", string(opts)) data.Set("options", string(opts))
}
authentication.InjectAuthentication(data, headers) authentication.InjectAuthentication(data, headers)
encodedData := data.Encode() encodedData := data.Encode()
@ -61,9 +64,12 @@ func ExchangeToken(ctx context.Context, endpoint string, request *STSTokenExchan
} }
defer resp.Body.Close() defer resp.Body.Close()
bodyJson := json.NewDecoder(io.LimitReader(resp.Body, 1<<20)) body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20))
if c := resp.StatusCode; c < 200 || c > 299 {
return nil, fmt.Errorf("oauth2/google: status code %d: %s", c, body)
}
var stsResp STSTokenExchangeResponse var stsResp STSTokenExchangeResponse
err = bodyJson.Decode(&stsResp) err = json.Unmarshal(body, &stsResp)
if err != nil { if err != nil {
return nil, fmt.Errorf("oauth2/google: failed to unmarshal response body from Secure Token Server: %v", err) return nil, fmt.Errorf("oauth2/google: failed to unmarshal response body from Secure Token Server: %v", err)

View file

@ -39,15 +39,18 @@ func (cs urlCredentialSource) subjectToken() (string, error) {
} }
defer resp.Body.Close() defer resp.Body.Close()
tokenBytes, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20))
if err != nil { if err != nil {
return "", fmt.Errorf("oauth2/google: invalid body in subject token URL query: %v", err) return "", fmt.Errorf("oauth2/google: invalid body in subject token URL query: %v", err)
} }
if c := resp.StatusCode; c < 200 || c > 299 {
return "", fmt.Errorf("oauth2/google: status code %d: %s", c, respBody)
}
switch cs.Format.Type { switch cs.Format.Type {
case "json": case "json":
jsonData := make(map[string]interface{}) jsonData := make(map[string]interface{})
err = json.Unmarshal(tokenBytes, &jsonData) err = json.Unmarshal(respBody, &jsonData)
if err != nil { if err != nil {
return "", fmt.Errorf("oauth2/google: failed to unmarshal subject token file: %v", err) return "", fmt.Errorf("oauth2/google: failed to unmarshal subject token file: %v", err)
} }
@ -61,9 +64,9 @@ func (cs urlCredentialSource) subjectToken() (string, error) {
} }
return token, nil return token, nil
case "text": case "text":
return string(tokenBytes), nil return string(respBody), nil
case "": case "":
return string(tokenBytes), nil return string(respBody), nil
default: default:
return "", errors.New("oauth2/google: invalid credential_source file format type") return "", errors.New("oauth2/google: invalid credential_source file format type")
} }

View file

@ -32,7 +32,7 @@ const (
AF_LAT = 0xe AF_LAT = 0xe
AF_LINK = 0x12 AF_LINK = 0x12
AF_LOCAL = 0x1 AF_LOCAL = 0x1
AF_MAX = 0x28 AF_MAX = 0x29
AF_NATM = 0x1f AF_NATM = 0x1f
AF_NDRV = 0x1b AF_NDRV = 0x1b
AF_NETBIOS = 0x21 AF_NETBIOS = 0x21
@ -49,6 +49,7 @@ const (
AF_UNIX = 0x1 AF_UNIX = 0x1
AF_UNSPEC = 0x0 AF_UNSPEC = 0x0
AF_UTUN = 0x26 AF_UTUN = 0x26
AF_VSOCK = 0x28
ALTWERASE = 0x200 ALTWERASE = 0x200
ATTR_BIT_MAP_COUNT = 0x5 ATTR_BIT_MAP_COUNT = 0x5
ATTR_CMN_ACCESSMASK = 0x20000 ATTR_CMN_ACCESSMASK = 0x20000
@ -83,7 +84,7 @@ const (
ATTR_CMN_PAROBJID = 0x80 ATTR_CMN_PAROBJID = 0x80
ATTR_CMN_RETURNED_ATTRS = 0x80000000 ATTR_CMN_RETURNED_ATTRS = 0x80000000
ATTR_CMN_SCRIPT = 0x100 ATTR_CMN_SCRIPT = 0x100
ATTR_CMN_SETMASK = 0x41c7ff00 ATTR_CMN_SETMASK = 0x51c7ff00
ATTR_CMN_USERACCESS = 0x200000 ATTR_CMN_USERACCESS = 0x200000
ATTR_CMN_UUID = 0x800000 ATTR_CMN_UUID = 0x800000
ATTR_CMN_VALIDMASK = 0xffffffff ATTR_CMN_VALIDMASK = 0xffffffff
@ -357,7 +358,7 @@ const (
DLT_LINUX_SLL = 0x71 DLT_LINUX_SLL = 0x71
DLT_LOOP = 0x6c DLT_LOOP = 0x6c
DLT_LTALK = 0x72 DLT_LTALK = 0x72
DLT_MATCHING_MAX = 0xf5 DLT_MATCHING_MAX = 0x10a
DLT_MATCHING_MIN = 0x68 DLT_MATCHING_MIN = 0x68
DLT_MFR = 0xb6 DLT_MFR = 0xb6
DLT_MOST = 0xd3 DLT_MOST = 0xd3
@ -398,6 +399,7 @@ const (
DLT_SYMANTEC_FIREWALL = 0x63 DLT_SYMANTEC_FIREWALL = 0x63
DLT_TZSP = 0x80 DLT_TZSP = 0x80
DLT_USB = 0xba DLT_USB = 0xba
DLT_USB_DARWIN = 0x10a
DLT_USB_LINUX = 0xbd DLT_USB_LINUX = 0xbd
DLT_USB_LINUX_MMAPPED = 0xdc DLT_USB_LINUX_MMAPPED = 0xdc
DLT_USER0 = 0x93 DLT_USER0 = 0x93
@ -442,8 +444,8 @@ const (
EVFILT_PROC = -0x5 EVFILT_PROC = -0x5
EVFILT_READ = -0x1 EVFILT_READ = -0x1
EVFILT_SIGNAL = -0x6 EVFILT_SIGNAL = -0x6
EVFILT_SYSCOUNT = 0xf EVFILT_SYSCOUNT = 0x11
EVFILT_THREADMARKER = 0xf EVFILT_THREADMARKER = 0x11
EVFILT_TIMER = -0x7 EVFILT_TIMER = -0x7
EVFILT_USER = -0xa EVFILT_USER = -0xa
EVFILT_VM = -0xc EVFILT_VM = -0xc
@ -481,9 +483,12 @@ const (
FSOPT_NOINMEMUPDATE = 0x2 FSOPT_NOINMEMUPDATE = 0x2
FSOPT_PACK_INVAL_ATTRS = 0x8 FSOPT_PACK_INVAL_ATTRS = 0x8
FSOPT_REPORT_FULLSIZE = 0x4 FSOPT_REPORT_FULLSIZE = 0x4
FSOPT_RETURN_REALDEV = 0x200
F_ADDFILESIGS = 0x3d F_ADDFILESIGS = 0x3d
F_ADDFILESIGS_FOR_DYLD_SIM = 0x53 F_ADDFILESIGS_FOR_DYLD_SIM = 0x53
F_ADDFILESIGS_INFO = 0x67
F_ADDFILESIGS_RETURN = 0x61 F_ADDFILESIGS_RETURN = 0x61
F_ADDFILESUPPL = 0x68
F_ADDSIGS = 0x3b F_ADDSIGS = 0x3b
F_ALLOCATEALL = 0x4 F_ALLOCATEALL = 0x4
F_ALLOCATECONTIG = 0x2 F_ALLOCATECONTIG = 0x2
@ -505,8 +510,10 @@ const (
F_GETOWN = 0x5 F_GETOWN = 0x5
F_GETPATH = 0x32 F_GETPATH = 0x32
F_GETPATH_MTMINFO = 0x47 F_GETPATH_MTMINFO = 0x47
F_GETPATH_NOFIRMLINK = 0x66
F_GETPROTECTIONCLASS = 0x3f F_GETPROTECTIONCLASS = 0x3f
F_GETPROTECTIONLEVEL = 0x4d F_GETPROTECTIONLEVEL = 0x4d
F_GETSIGSINFO = 0x69
F_GLOBAL_NOCACHE = 0x37 F_GLOBAL_NOCACHE = 0x37
F_LOG2PHYS = 0x31 F_LOG2PHYS = 0x31
F_LOG2PHYS_EXT = 0x41 F_LOG2PHYS_EXT = 0x41
@ -531,6 +538,7 @@ const (
F_SETPROTECTIONCLASS = 0x40 F_SETPROTECTIONCLASS = 0x40
F_SETSIZE = 0x2b F_SETSIZE = 0x2b
F_SINGLE_WRITER = 0x4c F_SINGLE_WRITER = 0x4c
F_SPECULATIVE_READ = 0x65
F_THAW_FS = 0x36 F_THAW_FS = 0x36
F_TRANSCODEKEY = 0x4b F_TRANSCODEKEY = 0x4b
F_TRIM_ACTIVE_FILE = 0x64 F_TRIM_ACTIVE_FILE = 0x64
@ -562,6 +570,7 @@ const (
IFF_UP = 0x1 IFF_UP = 0x1
IFNAMSIZ = 0x10 IFNAMSIZ = 0x10
IFT_1822 = 0x2 IFT_1822 = 0x2
IFT_6LOWPAN = 0x40
IFT_AAL5 = 0x31 IFT_AAL5 = 0x31
IFT_ARCNET = 0x23 IFT_ARCNET = 0x23
IFT_ARCNETPLUS = 0x24 IFT_ARCNETPLUS = 0x24
@ -766,6 +775,9 @@ const (
IPV6_2292PKTINFO = 0x13 IPV6_2292PKTINFO = 0x13
IPV6_2292PKTOPTIONS = 0x19 IPV6_2292PKTOPTIONS = 0x19
IPV6_2292RTHDR = 0x18 IPV6_2292RTHDR = 0x18
IPV6_ADDR_MC_FLAGS_PREFIX = 0x20
IPV6_ADDR_MC_FLAGS_TRANSIENT = 0x10
IPV6_ADDR_MC_FLAGS_UNICAST_BASED = 0x30
IPV6_BINDV6ONLY = 0x1b IPV6_BINDV6ONLY = 0x1b
IPV6_BOUND_IF = 0x7d IPV6_BOUND_IF = 0x7d
IPV6_CHECKSUM = 0x1a IPV6_CHECKSUM = 0x1a
@ -775,7 +787,7 @@ const (
IPV6_FAITH = 0x1d IPV6_FAITH = 0x1d
IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00 IPV6_FLOWLABEL_MASK = 0xffff0f00
IPV6_FLOW_ECN_MASK = 0x300 IPV6_FLOW_ECN_MASK = 0x3000
IPV6_FRAGTTL = 0x3c IPV6_FRAGTTL = 0x3c
IPV6_FW_ADD = 0x1e IPV6_FW_ADD = 0x1e
IPV6_FW_DEL = 0x1f IPV6_FW_DEL = 0x1f
@ -818,6 +830,7 @@ const (
IP_DEFAULT_MULTICAST_LOOP = 0x1 IP_DEFAULT_MULTICAST_LOOP = 0x1
IP_DEFAULT_MULTICAST_TTL = 0x1 IP_DEFAULT_MULTICAST_TTL = 0x1
IP_DF = 0x4000 IP_DF = 0x4000
IP_DONTFRAG = 0x1c
IP_DROP_MEMBERSHIP = 0xd IP_DROP_MEMBERSHIP = 0xd
IP_DROP_SOURCE_MEMBERSHIP = 0x47 IP_DROP_SOURCE_MEMBERSHIP = 0x47
IP_DUMMYNET_CONFIGURE = 0x3c IP_DUMMYNET_CONFIGURE = 0x3c
@ -904,6 +917,7 @@ const (
MADV_SEQUENTIAL = 0x2 MADV_SEQUENTIAL = 0x2
MADV_WILLNEED = 0x3 MADV_WILLNEED = 0x3
MADV_ZERO_WIRED_PAGES = 0x6 MADV_ZERO_WIRED_PAGES = 0x6
MAP_32BIT = 0x8000
MAP_ANON = 0x1000 MAP_ANON = 0x1000
MAP_ANONYMOUS = 0x1000 MAP_ANONYMOUS = 0x1000
MAP_COPY = 0x2 MAP_COPY = 0x2
@ -920,6 +934,17 @@ const (
MAP_RESILIENT_CODESIGN = 0x2000 MAP_RESILIENT_CODESIGN = 0x2000
MAP_RESILIENT_MEDIA = 0x4000 MAP_RESILIENT_MEDIA = 0x4000
MAP_SHARED = 0x1 MAP_SHARED = 0x1
MAP_TRANSLATED_ALLOW_EXECUTE = 0x20000
MAP_UNIX03 = 0x40000
MCAST_BLOCK_SOURCE = 0x54
MCAST_EXCLUDE = 0x2
MCAST_INCLUDE = 0x1
MCAST_JOIN_GROUP = 0x50
MCAST_JOIN_SOURCE_GROUP = 0x52
MCAST_LEAVE_GROUP = 0x51
MCAST_LEAVE_SOURCE_GROUP = 0x53
MCAST_UNBLOCK_SOURCE = 0x55
MCAST_UNDEFINED = 0x0
MCL_CURRENT = 0x1 MCL_CURRENT = 0x1
MCL_FUTURE = 0x2 MCL_FUTURE = 0x2
MNT_ASYNC = 0x40 MNT_ASYNC = 0x40
@ -931,6 +956,7 @@ const (
MNT_DOVOLFS = 0x8000 MNT_DOVOLFS = 0x8000
MNT_DWAIT = 0x4 MNT_DWAIT = 0x4
MNT_EXPORTED = 0x100 MNT_EXPORTED = 0x100
MNT_EXT_ROOT_DATA_VOL = 0x1
MNT_FORCE = 0x80000 MNT_FORCE = 0x80000
MNT_IGNORE_OWNERSHIP = 0x200000 MNT_IGNORE_OWNERSHIP = 0x200000
MNT_JOURNALED = 0x800000 MNT_JOURNALED = 0x800000
@ -947,12 +973,15 @@ const (
MNT_QUOTA = 0x2000 MNT_QUOTA = 0x2000
MNT_RDONLY = 0x1 MNT_RDONLY = 0x1
MNT_RELOAD = 0x40000 MNT_RELOAD = 0x40000
MNT_REMOVABLE = 0x200
MNT_ROOTFS = 0x4000 MNT_ROOTFS = 0x4000
MNT_SNAPSHOT = 0x40000000
MNT_STRICTATIME = 0x80000000
MNT_SYNCHRONOUS = 0x2 MNT_SYNCHRONOUS = 0x2
MNT_UNION = 0x20 MNT_UNION = 0x20
MNT_UNKNOWNPERMISSIONS = 0x200000 MNT_UNKNOWNPERMISSIONS = 0x200000
MNT_UPDATE = 0x10000 MNT_UPDATE = 0x10000
MNT_VISFLAGMASK = 0x17f0f5ff MNT_VISFLAGMASK = 0xd7f0f7ff
MNT_WAIT = 0x1 MNT_WAIT = 0x1
MSG_CTRUNC = 0x20 MSG_CTRUNC = 0x20
MSG_DONTROUTE = 0x4 MSG_DONTROUTE = 0x4
@ -963,6 +992,7 @@ const (
MSG_HAVEMORE = 0x2000 MSG_HAVEMORE = 0x2000
MSG_HOLD = 0x800 MSG_HOLD = 0x800
MSG_NEEDSA = 0x10000 MSG_NEEDSA = 0x10000
MSG_NOSIGNAL = 0x80000
MSG_OOB = 0x1 MSG_OOB = 0x1
MSG_PEEK = 0x2 MSG_PEEK = 0x2
MSG_RCVMORE = 0x4000 MSG_RCVMORE = 0x4000
@ -979,9 +1009,10 @@ const (
NET_RT_DUMP = 0x1 NET_RT_DUMP = 0x1
NET_RT_DUMP2 = 0x7 NET_RT_DUMP2 = 0x7
NET_RT_FLAGS = 0x2 NET_RT_FLAGS = 0x2
NET_RT_FLAGS_PRIV = 0xa
NET_RT_IFLIST = 0x3 NET_RT_IFLIST = 0x3
NET_RT_IFLIST2 = 0x6 NET_RT_IFLIST2 = 0x6
NET_RT_MAXID = 0xa NET_RT_MAXID = 0xb
NET_RT_STAT = 0x4 NET_RT_STAT = 0x4
NET_RT_TRASH = 0x5 NET_RT_TRASH = 0x5
NFDBITS = 0x20 NFDBITS = 0x20
@ -1019,6 +1050,7 @@ const (
NOTE_LEEWAY = 0x10 NOTE_LEEWAY = 0x10
NOTE_LINK = 0x10 NOTE_LINK = 0x10
NOTE_LOWAT = 0x1 NOTE_LOWAT = 0x1
NOTE_MACHTIME = 0x100
NOTE_MACH_CONTINUOUS_TIME = 0x80 NOTE_MACH_CONTINUOUS_TIME = 0x80
NOTE_NONE = 0x80 NOTE_NONE = 0x80
NOTE_NSECONDS = 0x4 NOTE_NSECONDS = 0x4
@ -1065,6 +1097,7 @@ const (
O_NDELAY = 0x4 O_NDELAY = 0x4
O_NOCTTY = 0x20000 O_NOCTTY = 0x20000
O_NOFOLLOW = 0x100 O_NOFOLLOW = 0x100
O_NOFOLLOW_ANY = 0x20000000
O_NONBLOCK = 0x4 O_NONBLOCK = 0x4
O_POPUP = 0x80000000 O_POPUP = 0x80000000
O_RDONLY = 0x0 O_RDONLY = 0x0
@ -1136,6 +1169,7 @@ const (
RTF_BROADCAST = 0x400000 RTF_BROADCAST = 0x400000
RTF_CLONING = 0x100 RTF_CLONING = 0x100
RTF_CONDEMNED = 0x2000000 RTF_CONDEMNED = 0x2000000
RTF_DEAD = 0x20000000
RTF_DELCLONE = 0x80 RTF_DELCLONE = 0x80
RTF_DONE = 0x40 RTF_DONE = 0x40
RTF_DYNAMIC = 0x10 RTF_DYNAMIC = 0x10
@ -1143,6 +1177,7 @@ const (
RTF_HOST = 0x4 RTF_HOST = 0x4
RTF_IFREF = 0x4000000 RTF_IFREF = 0x4000000
RTF_IFSCOPE = 0x1000000 RTF_IFSCOPE = 0x1000000
RTF_LLDATA = 0x400
RTF_LLINFO = 0x400 RTF_LLINFO = 0x400
RTF_LOCAL = 0x200000 RTF_LOCAL = 0x200000
RTF_MODIFIED = 0x20 RTF_MODIFIED = 0x20
@ -1210,6 +1245,7 @@ const (
SIOCGDRVSPEC = 0xc028697b SIOCGDRVSPEC = 0xc028697b
SIOCGETVLAN = 0xc020697f SIOCGETVLAN = 0xc020697f
SIOCGHIWAT = 0x40047301 SIOCGHIWAT = 0x40047301
SIOCGIF6LOWPAN = 0xc02069c5
SIOCGIFADDR = 0xc0206921 SIOCGIFADDR = 0xc0206921
SIOCGIFALTMTU = 0xc0206948 SIOCGIFALTMTU = 0xc0206948
SIOCGIFASYNCMAP = 0xc020697c SIOCGIFASYNCMAP = 0xc020697c
@ -1220,6 +1256,7 @@ const (
SIOCGIFDEVMTU = 0xc0206944 SIOCGIFDEVMTU = 0xc0206944
SIOCGIFDSTADDR = 0xc0206922 SIOCGIFDSTADDR = 0xc0206922
SIOCGIFFLAGS = 0xc0206911 SIOCGIFFLAGS = 0xc0206911
SIOCGIFFUNCTIONALTYPE = 0xc02069ad
SIOCGIFGENERIC = 0xc020693a SIOCGIFGENERIC = 0xc020693a
SIOCGIFKPI = 0xc0206987 SIOCGIFKPI = 0xc0206987
SIOCGIFMAC = 0xc0206982 SIOCGIFMAC = 0xc0206982
@ -1233,6 +1270,7 @@ const (
SIOCGIFSTATUS = 0xc331693d SIOCGIFSTATUS = 0xc331693d
SIOCGIFVLAN = 0xc020697f SIOCGIFVLAN = 0xc020697f
SIOCGIFWAKEFLAGS = 0xc0206988 SIOCGIFWAKEFLAGS = 0xc0206988
SIOCGIFXMEDIA = 0xc02c6948
SIOCGLOWAT = 0x40047303 SIOCGLOWAT = 0x40047303
SIOCGPGRP = 0x40047309 SIOCGPGRP = 0x40047309
SIOCIFCREATE = 0xc0206978 SIOCIFCREATE = 0xc0206978
@ -1243,6 +1281,7 @@ const (
SIOCSDRVSPEC = 0x8028697b SIOCSDRVSPEC = 0x8028697b
SIOCSETVLAN = 0x8020697e SIOCSETVLAN = 0x8020697e
SIOCSHIWAT = 0x80047300 SIOCSHIWAT = 0x80047300
SIOCSIF6LOWPAN = 0x802069c4
SIOCSIFADDR = 0x8020690c SIOCSIFADDR = 0x8020690c
SIOCSIFALTMTU = 0x80206945 SIOCSIFALTMTU = 0x80206945
SIOCSIFASYNCMAP = 0x8020697d SIOCSIFASYNCMAP = 0x8020697d

View file

@ -32,7 +32,7 @@ const (
AF_LAT = 0xe AF_LAT = 0xe
AF_LINK = 0x12 AF_LINK = 0x12
AF_LOCAL = 0x1 AF_LOCAL = 0x1
AF_MAX = 0x28 AF_MAX = 0x29
AF_NATM = 0x1f AF_NATM = 0x1f
AF_NDRV = 0x1b AF_NDRV = 0x1b
AF_NETBIOS = 0x21 AF_NETBIOS = 0x21
@ -49,6 +49,7 @@ const (
AF_UNIX = 0x1 AF_UNIX = 0x1
AF_UNSPEC = 0x0 AF_UNSPEC = 0x0
AF_UTUN = 0x26 AF_UTUN = 0x26
AF_VSOCK = 0x28
ALTWERASE = 0x200 ALTWERASE = 0x200
ATTR_BIT_MAP_COUNT = 0x5 ATTR_BIT_MAP_COUNT = 0x5
ATTR_CMN_ACCESSMASK = 0x20000 ATTR_CMN_ACCESSMASK = 0x20000
@ -83,7 +84,7 @@ const (
ATTR_CMN_PAROBJID = 0x80 ATTR_CMN_PAROBJID = 0x80
ATTR_CMN_RETURNED_ATTRS = 0x80000000 ATTR_CMN_RETURNED_ATTRS = 0x80000000
ATTR_CMN_SCRIPT = 0x100 ATTR_CMN_SCRIPT = 0x100
ATTR_CMN_SETMASK = 0x41c7ff00 ATTR_CMN_SETMASK = 0x51c7ff00
ATTR_CMN_USERACCESS = 0x200000 ATTR_CMN_USERACCESS = 0x200000
ATTR_CMN_UUID = 0x800000 ATTR_CMN_UUID = 0x800000
ATTR_CMN_VALIDMASK = 0xffffffff ATTR_CMN_VALIDMASK = 0xffffffff
@ -357,7 +358,7 @@ const (
DLT_LINUX_SLL = 0x71 DLT_LINUX_SLL = 0x71
DLT_LOOP = 0x6c DLT_LOOP = 0x6c
DLT_LTALK = 0x72 DLT_LTALK = 0x72
DLT_MATCHING_MAX = 0xf5 DLT_MATCHING_MAX = 0x10a
DLT_MATCHING_MIN = 0x68 DLT_MATCHING_MIN = 0x68
DLT_MFR = 0xb6 DLT_MFR = 0xb6
DLT_MOST = 0xd3 DLT_MOST = 0xd3
@ -398,6 +399,7 @@ const (
DLT_SYMANTEC_FIREWALL = 0x63 DLT_SYMANTEC_FIREWALL = 0x63
DLT_TZSP = 0x80 DLT_TZSP = 0x80
DLT_USB = 0xba DLT_USB = 0xba
DLT_USB_DARWIN = 0x10a
DLT_USB_LINUX = 0xbd DLT_USB_LINUX = 0xbd
DLT_USB_LINUX_MMAPPED = 0xdc DLT_USB_LINUX_MMAPPED = 0xdc
DLT_USER0 = 0x93 DLT_USER0 = 0x93
@ -442,8 +444,8 @@ const (
EVFILT_PROC = -0x5 EVFILT_PROC = -0x5
EVFILT_READ = -0x1 EVFILT_READ = -0x1
EVFILT_SIGNAL = -0x6 EVFILT_SIGNAL = -0x6
EVFILT_SYSCOUNT = 0xf EVFILT_SYSCOUNT = 0x11
EVFILT_THREADMARKER = 0xf EVFILT_THREADMARKER = 0x11
EVFILT_TIMER = -0x7 EVFILT_TIMER = -0x7
EVFILT_USER = -0xa EVFILT_USER = -0xa
EVFILT_VM = -0xc EVFILT_VM = -0xc
@ -481,9 +483,12 @@ const (
FSOPT_NOINMEMUPDATE = 0x2 FSOPT_NOINMEMUPDATE = 0x2
FSOPT_PACK_INVAL_ATTRS = 0x8 FSOPT_PACK_INVAL_ATTRS = 0x8
FSOPT_REPORT_FULLSIZE = 0x4 FSOPT_REPORT_FULLSIZE = 0x4
FSOPT_RETURN_REALDEV = 0x200
F_ADDFILESIGS = 0x3d F_ADDFILESIGS = 0x3d
F_ADDFILESIGS_FOR_DYLD_SIM = 0x53 F_ADDFILESIGS_FOR_DYLD_SIM = 0x53
F_ADDFILESIGS_INFO = 0x67
F_ADDFILESIGS_RETURN = 0x61 F_ADDFILESIGS_RETURN = 0x61
F_ADDFILESUPPL = 0x68
F_ADDSIGS = 0x3b F_ADDSIGS = 0x3b
F_ALLOCATEALL = 0x4 F_ALLOCATEALL = 0x4
F_ALLOCATECONTIG = 0x2 F_ALLOCATECONTIG = 0x2
@ -505,8 +510,10 @@ const (
F_GETOWN = 0x5 F_GETOWN = 0x5
F_GETPATH = 0x32 F_GETPATH = 0x32
F_GETPATH_MTMINFO = 0x47 F_GETPATH_MTMINFO = 0x47
F_GETPATH_NOFIRMLINK = 0x66
F_GETPROTECTIONCLASS = 0x3f F_GETPROTECTIONCLASS = 0x3f
F_GETPROTECTIONLEVEL = 0x4d F_GETPROTECTIONLEVEL = 0x4d
F_GETSIGSINFO = 0x69
F_GLOBAL_NOCACHE = 0x37 F_GLOBAL_NOCACHE = 0x37
F_LOG2PHYS = 0x31 F_LOG2PHYS = 0x31
F_LOG2PHYS_EXT = 0x41 F_LOG2PHYS_EXT = 0x41
@ -531,6 +538,7 @@ const (
F_SETPROTECTIONCLASS = 0x40 F_SETPROTECTIONCLASS = 0x40
F_SETSIZE = 0x2b F_SETSIZE = 0x2b
F_SINGLE_WRITER = 0x4c F_SINGLE_WRITER = 0x4c
F_SPECULATIVE_READ = 0x65
F_THAW_FS = 0x36 F_THAW_FS = 0x36
F_TRANSCODEKEY = 0x4b F_TRANSCODEKEY = 0x4b
F_TRIM_ACTIVE_FILE = 0x64 F_TRIM_ACTIVE_FILE = 0x64
@ -562,6 +570,7 @@ const (
IFF_UP = 0x1 IFF_UP = 0x1
IFNAMSIZ = 0x10 IFNAMSIZ = 0x10
IFT_1822 = 0x2 IFT_1822 = 0x2
IFT_6LOWPAN = 0x40
IFT_AAL5 = 0x31 IFT_AAL5 = 0x31
IFT_ARCNET = 0x23 IFT_ARCNET = 0x23
IFT_ARCNETPLUS = 0x24 IFT_ARCNETPLUS = 0x24
@ -766,6 +775,9 @@ const (
IPV6_2292PKTINFO = 0x13 IPV6_2292PKTINFO = 0x13
IPV6_2292PKTOPTIONS = 0x19 IPV6_2292PKTOPTIONS = 0x19
IPV6_2292RTHDR = 0x18 IPV6_2292RTHDR = 0x18
IPV6_ADDR_MC_FLAGS_PREFIX = 0x20
IPV6_ADDR_MC_FLAGS_TRANSIENT = 0x10
IPV6_ADDR_MC_FLAGS_UNICAST_BASED = 0x30
IPV6_BINDV6ONLY = 0x1b IPV6_BINDV6ONLY = 0x1b
IPV6_BOUND_IF = 0x7d IPV6_BOUND_IF = 0x7d
IPV6_CHECKSUM = 0x1a IPV6_CHECKSUM = 0x1a
@ -775,7 +787,7 @@ const (
IPV6_FAITH = 0x1d IPV6_FAITH = 0x1d
IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00 IPV6_FLOWLABEL_MASK = 0xffff0f00
IPV6_FLOW_ECN_MASK = 0x300 IPV6_FLOW_ECN_MASK = 0x3000
IPV6_FRAGTTL = 0x3c IPV6_FRAGTTL = 0x3c
IPV6_FW_ADD = 0x1e IPV6_FW_ADD = 0x1e
IPV6_FW_DEL = 0x1f IPV6_FW_DEL = 0x1f
@ -818,6 +830,7 @@ const (
IP_DEFAULT_MULTICAST_LOOP = 0x1 IP_DEFAULT_MULTICAST_LOOP = 0x1
IP_DEFAULT_MULTICAST_TTL = 0x1 IP_DEFAULT_MULTICAST_TTL = 0x1
IP_DF = 0x4000 IP_DF = 0x4000
IP_DONTFRAG = 0x1c
IP_DROP_MEMBERSHIP = 0xd IP_DROP_MEMBERSHIP = 0xd
IP_DROP_SOURCE_MEMBERSHIP = 0x47 IP_DROP_SOURCE_MEMBERSHIP = 0x47
IP_DUMMYNET_CONFIGURE = 0x3c IP_DUMMYNET_CONFIGURE = 0x3c
@ -904,6 +917,7 @@ const (
MADV_SEQUENTIAL = 0x2 MADV_SEQUENTIAL = 0x2
MADV_WILLNEED = 0x3 MADV_WILLNEED = 0x3
MADV_ZERO_WIRED_PAGES = 0x6 MADV_ZERO_WIRED_PAGES = 0x6
MAP_32BIT = 0x8000
MAP_ANON = 0x1000 MAP_ANON = 0x1000
MAP_ANONYMOUS = 0x1000 MAP_ANONYMOUS = 0x1000
MAP_COPY = 0x2 MAP_COPY = 0x2
@ -920,6 +934,17 @@ const (
MAP_RESILIENT_CODESIGN = 0x2000 MAP_RESILIENT_CODESIGN = 0x2000
MAP_RESILIENT_MEDIA = 0x4000 MAP_RESILIENT_MEDIA = 0x4000
MAP_SHARED = 0x1 MAP_SHARED = 0x1
MAP_TRANSLATED_ALLOW_EXECUTE = 0x20000
MAP_UNIX03 = 0x40000
MCAST_BLOCK_SOURCE = 0x54
MCAST_EXCLUDE = 0x2
MCAST_INCLUDE = 0x1
MCAST_JOIN_GROUP = 0x50
MCAST_JOIN_SOURCE_GROUP = 0x52
MCAST_LEAVE_GROUP = 0x51
MCAST_LEAVE_SOURCE_GROUP = 0x53
MCAST_UNBLOCK_SOURCE = 0x55
MCAST_UNDEFINED = 0x0
MCL_CURRENT = 0x1 MCL_CURRENT = 0x1
MCL_FUTURE = 0x2 MCL_FUTURE = 0x2
MNT_ASYNC = 0x40 MNT_ASYNC = 0x40
@ -931,6 +956,7 @@ const (
MNT_DOVOLFS = 0x8000 MNT_DOVOLFS = 0x8000
MNT_DWAIT = 0x4 MNT_DWAIT = 0x4
MNT_EXPORTED = 0x100 MNT_EXPORTED = 0x100
MNT_EXT_ROOT_DATA_VOL = 0x1
MNT_FORCE = 0x80000 MNT_FORCE = 0x80000
MNT_IGNORE_OWNERSHIP = 0x200000 MNT_IGNORE_OWNERSHIP = 0x200000
MNT_JOURNALED = 0x800000 MNT_JOURNALED = 0x800000
@ -947,12 +973,15 @@ const (
MNT_QUOTA = 0x2000 MNT_QUOTA = 0x2000
MNT_RDONLY = 0x1 MNT_RDONLY = 0x1
MNT_RELOAD = 0x40000 MNT_RELOAD = 0x40000
MNT_REMOVABLE = 0x200
MNT_ROOTFS = 0x4000 MNT_ROOTFS = 0x4000
MNT_SNAPSHOT = 0x40000000
MNT_STRICTATIME = 0x80000000
MNT_SYNCHRONOUS = 0x2 MNT_SYNCHRONOUS = 0x2
MNT_UNION = 0x20 MNT_UNION = 0x20
MNT_UNKNOWNPERMISSIONS = 0x200000 MNT_UNKNOWNPERMISSIONS = 0x200000
MNT_UPDATE = 0x10000 MNT_UPDATE = 0x10000
MNT_VISFLAGMASK = 0x17f0f5ff MNT_VISFLAGMASK = 0xd7f0f7ff
MNT_WAIT = 0x1 MNT_WAIT = 0x1
MSG_CTRUNC = 0x20 MSG_CTRUNC = 0x20
MSG_DONTROUTE = 0x4 MSG_DONTROUTE = 0x4
@ -963,6 +992,7 @@ const (
MSG_HAVEMORE = 0x2000 MSG_HAVEMORE = 0x2000
MSG_HOLD = 0x800 MSG_HOLD = 0x800
MSG_NEEDSA = 0x10000 MSG_NEEDSA = 0x10000
MSG_NOSIGNAL = 0x80000
MSG_OOB = 0x1 MSG_OOB = 0x1
MSG_PEEK = 0x2 MSG_PEEK = 0x2
MSG_RCVMORE = 0x4000 MSG_RCVMORE = 0x4000
@ -979,9 +1009,10 @@ const (
NET_RT_DUMP = 0x1 NET_RT_DUMP = 0x1
NET_RT_DUMP2 = 0x7 NET_RT_DUMP2 = 0x7
NET_RT_FLAGS = 0x2 NET_RT_FLAGS = 0x2
NET_RT_FLAGS_PRIV = 0xa
NET_RT_IFLIST = 0x3 NET_RT_IFLIST = 0x3
NET_RT_IFLIST2 = 0x6 NET_RT_IFLIST2 = 0x6
NET_RT_MAXID = 0xa NET_RT_MAXID = 0xb
NET_RT_STAT = 0x4 NET_RT_STAT = 0x4
NET_RT_TRASH = 0x5 NET_RT_TRASH = 0x5
NFDBITS = 0x20 NFDBITS = 0x20
@ -1019,6 +1050,7 @@ const (
NOTE_LEEWAY = 0x10 NOTE_LEEWAY = 0x10
NOTE_LINK = 0x10 NOTE_LINK = 0x10
NOTE_LOWAT = 0x1 NOTE_LOWAT = 0x1
NOTE_MACHTIME = 0x100
NOTE_MACH_CONTINUOUS_TIME = 0x80 NOTE_MACH_CONTINUOUS_TIME = 0x80
NOTE_NONE = 0x80 NOTE_NONE = 0x80
NOTE_NSECONDS = 0x4 NOTE_NSECONDS = 0x4
@ -1065,6 +1097,7 @@ const (
O_NDELAY = 0x4 O_NDELAY = 0x4
O_NOCTTY = 0x20000 O_NOCTTY = 0x20000
O_NOFOLLOW = 0x100 O_NOFOLLOW = 0x100
O_NOFOLLOW_ANY = 0x20000000
O_NONBLOCK = 0x4 O_NONBLOCK = 0x4
O_POPUP = 0x80000000 O_POPUP = 0x80000000
O_RDONLY = 0x0 O_RDONLY = 0x0
@ -1136,6 +1169,7 @@ const (
RTF_BROADCAST = 0x400000 RTF_BROADCAST = 0x400000
RTF_CLONING = 0x100 RTF_CLONING = 0x100
RTF_CONDEMNED = 0x2000000 RTF_CONDEMNED = 0x2000000
RTF_DEAD = 0x20000000
RTF_DELCLONE = 0x80 RTF_DELCLONE = 0x80
RTF_DONE = 0x40 RTF_DONE = 0x40
RTF_DYNAMIC = 0x10 RTF_DYNAMIC = 0x10
@ -1143,6 +1177,7 @@ const (
RTF_HOST = 0x4 RTF_HOST = 0x4
RTF_IFREF = 0x4000000 RTF_IFREF = 0x4000000
RTF_IFSCOPE = 0x1000000 RTF_IFSCOPE = 0x1000000
RTF_LLDATA = 0x400
RTF_LLINFO = 0x400 RTF_LLINFO = 0x400
RTF_LOCAL = 0x200000 RTF_LOCAL = 0x200000
RTF_MODIFIED = 0x20 RTF_MODIFIED = 0x20
@ -1210,6 +1245,7 @@ const (
SIOCGDRVSPEC = 0xc028697b SIOCGDRVSPEC = 0xc028697b
SIOCGETVLAN = 0xc020697f SIOCGETVLAN = 0xc020697f
SIOCGHIWAT = 0x40047301 SIOCGHIWAT = 0x40047301
SIOCGIF6LOWPAN = 0xc02069c5
SIOCGIFADDR = 0xc0206921 SIOCGIFADDR = 0xc0206921
SIOCGIFALTMTU = 0xc0206948 SIOCGIFALTMTU = 0xc0206948
SIOCGIFASYNCMAP = 0xc020697c SIOCGIFASYNCMAP = 0xc020697c
@ -1220,6 +1256,7 @@ const (
SIOCGIFDEVMTU = 0xc0206944 SIOCGIFDEVMTU = 0xc0206944
SIOCGIFDSTADDR = 0xc0206922 SIOCGIFDSTADDR = 0xc0206922
SIOCGIFFLAGS = 0xc0206911 SIOCGIFFLAGS = 0xc0206911
SIOCGIFFUNCTIONALTYPE = 0xc02069ad
SIOCGIFGENERIC = 0xc020693a SIOCGIFGENERIC = 0xc020693a
SIOCGIFKPI = 0xc0206987 SIOCGIFKPI = 0xc0206987
SIOCGIFMAC = 0xc0206982 SIOCGIFMAC = 0xc0206982
@ -1233,6 +1270,7 @@ const (
SIOCGIFSTATUS = 0xc331693d SIOCGIFSTATUS = 0xc331693d
SIOCGIFVLAN = 0xc020697f SIOCGIFVLAN = 0xc020697f
SIOCGIFWAKEFLAGS = 0xc0206988 SIOCGIFWAKEFLAGS = 0xc0206988
SIOCGIFXMEDIA = 0xc02c6948
SIOCGLOWAT = 0x40047303 SIOCGLOWAT = 0x40047303
SIOCGPGRP = 0x40047309 SIOCGPGRP = 0x40047309
SIOCIFCREATE = 0xc0206978 SIOCIFCREATE = 0xc0206978
@ -1243,6 +1281,7 @@ const (
SIOCSDRVSPEC = 0x8028697b SIOCSDRVSPEC = 0x8028697b
SIOCSETVLAN = 0x8020697e SIOCSETVLAN = 0x8020697e
SIOCSHIWAT = 0x80047300 SIOCSHIWAT = 0x80047300
SIOCSIF6LOWPAN = 0x802069c4
SIOCSIFADDR = 0x8020690c SIOCSIFADDR = 0x8020690c
SIOCSIFALTMTU = 0x80206945 SIOCSIFALTMTU = 0x80206945
SIOCSIFASYNCMAP = 0x8020697d SIOCSIFASYNCMAP = 0x8020697d

View file

@ -2449,7 +2449,7 @@ func (c *BucketAccessControlsDeleteCall) Header() http.Header {
func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -2597,7 +2597,7 @@ func (c *BucketAccessControlsGetCall) Header() http.Header {
func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -2764,7 +2764,7 @@ func (c *BucketAccessControlsInsertCall) Header() http.Header {
func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -2937,7 +2937,7 @@ func (c *BucketAccessControlsListCall) Header() http.Header {
func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -3098,7 +3098,7 @@ func (c *BucketAccessControlsPatchCall) Header() http.Header {
func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -3272,7 +3272,7 @@ func (c *BucketAccessControlsUpdateCall) Header() http.Header {
func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -3458,7 +3458,7 @@ func (c *BucketsDeleteCall) Header() http.Header {
func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -3637,7 +3637,7 @@ func (c *BucketsGetCall) Header() http.Header {
func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -3843,7 +3843,7 @@ func (c *BucketsGetIamPolicyCall) Header() http.Header {
func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -4060,7 +4060,7 @@ func (c *BucketsInsertCall) Header() http.Header {
func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -4317,7 +4317,7 @@ func (c *BucketsListCall) Header() http.Header {
func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -4527,7 +4527,7 @@ func (c *BucketsLockRetentionPolicyCall) Header() http.Header {
func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) { func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -4762,7 +4762,7 @@ func (c *BucketsPatchCall) Header() http.Header {
func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -4991,7 +4991,7 @@ func (c *BucketsSetIamPolicyCall) Header() http.Header {
func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -5166,7 +5166,7 @@ func (c *BucketsTestIamPermissionsCall) Header() http.Header {
func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -5406,7 +5406,7 @@ func (c *BucketsUpdateCall) Header() http.Header {
func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -5618,7 +5618,7 @@ func (c *ChannelsStopCall) Header() http.Header {
func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -5735,7 +5735,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header {
func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -5883,7 +5883,7 @@ func (c *DefaultObjectAccessControlsGetCall) Header() http.Header {
func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -6051,7 +6051,7 @@ func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header {
func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -6241,7 +6241,7 @@ func (c *DefaultObjectAccessControlsListCall) Header() http.Header {
func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -6414,7 +6414,7 @@ func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header {
func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -6588,7 +6588,7 @@ func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header {
func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -6760,7 +6760,7 @@ func (c *NotificationsDeleteCall) Header() http.Header {
func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) { func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -6908,7 +6908,7 @@ func (c *NotificationsGetCall) Header() http.Header {
func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -7078,7 +7078,7 @@ func (c *NotificationsInsertCall) Header() http.Header {
func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) { func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -7253,7 +7253,7 @@ func (c *NotificationsListCall) Header() http.Header {
func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -7426,7 +7426,7 @@ func (c *ObjectAccessControlsDeleteCall) Header() http.Header {
func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -7598,7 +7598,7 @@ func (c *ObjectAccessControlsGetCall) Header() http.Header {
func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -7789,7 +7789,7 @@ func (c *ObjectAccessControlsInsertCall) Header() http.Header {
func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -7986,7 +7986,7 @@ func (c *ObjectAccessControlsListCall) Header() http.Header {
func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -8171,7 +8171,7 @@ func (c *ObjectAccessControlsPatchCall) Header() http.Header {
func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -8369,7 +8369,7 @@ func (c *ObjectAccessControlsUpdateCall) Header() http.Header {
func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -8606,7 +8606,7 @@ func (c *ObjectsComposeCall) Header() http.Header {
func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -8949,7 +8949,7 @@ func (c *ObjectsCopyCall) Header() http.Header {
func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -9277,7 +9277,7 @@ func (c *ObjectsDeleteCall) Header() http.Header {
func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -9510,7 +9510,7 @@ func (c *ObjectsGetCall) Header() http.Header {
func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -9760,7 +9760,7 @@ func (c *ObjectsGetIamPolicyCall) Header() http.Header {
func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -10077,7 +10077,7 @@ func (c *ObjectsInsertCall) Header() http.Header {
func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -10450,7 +10450,7 @@ func (c *ObjectsListCall) Header() http.Header {
func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -10767,7 +10767,7 @@ func (c *ObjectsPatchCall) Header() http.Header {
func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -11159,7 +11159,7 @@ func (c *ObjectsRewriteCall) Header() http.Header {
func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -11462,7 +11462,7 @@ func (c *ObjectsSetIamPolicyCall) Header() http.Header {
func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -11662,7 +11662,7 @@ func (c *ObjectsTestIamPermissionsCall) Header() http.Header {
func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -11923,7 +11923,7 @@ func (c *ObjectsUpdateCall) Header() http.Header {
func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -12241,7 +12241,7 @@ func (c *ObjectsWatchAllCall) Header() http.Header {
func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -12457,7 +12457,7 @@ func (c *ProjectsHmacKeysCreateCall) Header() http.Header {
func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, error) { func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -12607,7 +12607,7 @@ func (c *ProjectsHmacKeysDeleteCall) Header() http.Header {
func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, error) { func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -12742,7 +12742,7 @@ func (c *ProjectsHmacKeysGetCall) Header() http.Header {
func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) { func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -12942,7 +12942,7 @@ func (c *ProjectsHmacKeysListCall) Header() http.Header {
func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) { func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -13137,7 +13137,7 @@ func (c *ProjectsHmacKeysUpdateCall) Header() http.Header {
func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, error) { func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }
@ -13314,7 +13314,7 @@ func (c *ProjectsServiceAccountGetCall) Header() http.Header {
func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, error) { func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header) reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210203") reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210211")
for k, v := range c.header_ { for k, v := range c.header_ {
reqHeaders[k] = v reqHeaders[k] = v
} }

20
vendor/modules.txt vendored
View file

@ -1,4 +1,4 @@
# cloud.google.com/go v0.76.0 # cloud.google.com/go v0.77.0
cloud.google.com/go cloud.google.com/go
cloud.google.com/go/compute/metadata cloud.google.com/go/compute/metadata
cloud.google.com/go/iam cloud.google.com/go/iam
@ -14,14 +14,14 @@ github.com/VictoriaMetrics/fastcache
github.com/VictoriaMetrics/fasthttp github.com/VictoriaMetrics/fasthttp
github.com/VictoriaMetrics/fasthttp/fasthttputil github.com/VictoriaMetrics/fasthttp/fasthttputil
github.com/VictoriaMetrics/fasthttp/stackless github.com/VictoriaMetrics/fasthttp/stackless
# github.com/VictoriaMetrics/metrics v1.13.1 # github.com/VictoriaMetrics/metrics v1.14.0
github.com/VictoriaMetrics/metrics github.com/VictoriaMetrics/metrics
# github.com/VictoriaMetrics/metricsql v0.10.0 # github.com/VictoriaMetrics/metricsql v0.10.1
github.com/VictoriaMetrics/metricsql github.com/VictoriaMetrics/metricsql
github.com/VictoriaMetrics/metricsql/binaryop github.com/VictoriaMetrics/metricsql/binaryop
# github.com/VividCortex/ewma v1.1.1 # github.com/VividCortex/ewma v1.1.1
github.com/VividCortex/ewma github.com/VividCortex/ewma
# github.com/aws/aws-sdk-go v1.37.7 # github.com/aws/aws-sdk-go v1.37.12
github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws
github.com/aws/aws-sdk-go/aws/arn github.com/aws/aws-sdk-go/aws/arn
github.com/aws/aws-sdk-go/aws/awserr github.com/aws/aws-sdk-go/aws/awserr
@ -77,7 +77,7 @@ github.com/aws/aws-sdk-go/service/sts/stsiface
github.com/beorn7/perks/quantile github.com/beorn7/perks/quantile
# github.com/cespare/xxhash/v2 v2.1.1 # github.com/cespare/xxhash/v2 v2.1.1
github.com/cespare/xxhash/v2 github.com/cespare/xxhash/v2
# github.com/cheggaaa/pb/v3 v3.0.5 # github.com/cheggaaa/pb/v3 v3.0.6
github.com/cheggaaa/pb/v3 github.com/cheggaaa/pb/v3
github.com/cheggaaa/pb/v3/termutil github.com/cheggaaa/pb/v3/termutil
# github.com/cpuguy83/go-md2man/v2 v2.0.0 # github.com/cpuguy83/go-md2man/v2 v2.0.0
@ -143,7 +143,7 @@ github.com/prometheus/client_model/go
github.com/prometheus/common/expfmt github.com/prometheus/common/expfmt
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
github.com/prometheus/common/model github.com/prometheus/common/model
# github.com/prometheus/procfs v0.4.1 # github.com/prometheus/procfs v0.6.0
github.com/prometheus/procfs github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util github.com/prometheus/procfs/internal/util
@ -218,7 +218,7 @@ golang.org/x/net/http2/hpack
golang.org/x/net/idna golang.org/x/net/idna
golang.org/x/net/internal/timeseries golang.org/x/net/internal/timeseries
golang.org/x/net/trace golang.org/x/net/trace
# golang.org/x/oauth2 v0.0.0-20210201163806-010130855d6c # golang.org/x/oauth2 v0.0.0-20210216194517-16ff1888fd2e
golang.org/x/oauth2 golang.org/x/oauth2
golang.org/x/oauth2/google golang.org/x/oauth2/google
golang.org/x/oauth2/google/internal/externalaccount golang.org/x/oauth2/google/internal/externalaccount
@ -227,7 +227,7 @@ golang.org/x/oauth2/jws
golang.org/x/oauth2/jwt golang.org/x/oauth2/jwt
# golang.org/x/sync v0.0.0-20201207232520-09787c993a3a # golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
golang.org/x/sync/errgroup golang.org/x/sync/errgroup
# golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c # golang.org/x/sys v0.0.0-20210216163648-f7da38b97c65
golang.org/x/sys/execabs golang.org/x/sys/execabs
golang.org/x/sys/internal/unsafeheader golang.org/x/sys/internal/unsafeheader
golang.org/x/sys/unix golang.org/x/sys/unix
@ -253,7 +253,7 @@ golang.org/x/tools/internal/imports
# golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 # golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1
golang.org/x/xerrors golang.org/x/xerrors
golang.org/x/xerrors/internal golang.org/x/xerrors/internal
# google.golang.org/api v0.39.0 # google.golang.org/api v0.40.0
google.golang.org/api/googleapi google.golang.org/api/googleapi
google.golang.org/api/googleapi/transport google.golang.org/api/googleapi/transport
google.golang.org/api/internal google.golang.org/api/internal
@ -279,7 +279,7 @@ google.golang.org/appengine/internal/modules
google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/remote_api
google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/internal/urlfetch
google.golang.org/appengine/urlfetch google.golang.org/appengine/urlfetch
# google.golang.org/genproto v0.0.0-20210207032614-bba0dbe2a9ea # google.golang.org/genproto v0.0.0-20210212180131-e7f2df4ecc2d
google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/annotations
google.golang.org/genproto/googleapis/iam/v1 google.golang.org/genproto/googleapis/iam/v1
google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/code