mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
all: follow-up after 05cf8a6ecc
This commit is contained in:
parent
4822406b64
commit
c7ce4979ec
112 changed files with 30891 additions and 1133 deletions
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/prometheus"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
)
|
||||
|
||||
type prometheusProcessor struct {
|
||||
|
@ -123,7 +124,15 @@ func (pp *prometheusProcessor) do(b tsdb.BlockReader) error {
|
|||
var timestamps []int64
|
||||
var values []float64
|
||||
it := series.Iterator()
|
||||
for it.Next() {
|
||||
for {
|
||||
typ := it.Next()
|
||||
if typ == chunkenc.ValNone {
|
||||
break
|
||||
}
|
||||
if typ != chunkenc.ValFloat {
|
||||
// Skip unsupported values
|
||||
continue
|
||||
}
|
||||
t, v := it.At()
|
||||
timestamps = append(timestamps, t)
|
||||
values = append(values, v)
|
||||
|
|
|
@ -276,7 +276,15 @@ func parseSamples(chunk []byte) ([]prompb.Sample, error) {
|
|||
|
||||
var samples []prompb.Sample
|
||||
it := c.Iterator(nil)
|
||||
for it.Next() {
|
||||
for {
|
||||
typ := it.Next()
|
||||
if typ == chunkenc.ValNone {
|
||||
break
|
||||
}
|
||||
if typ != chunkenc.ValFloat {
|
||||
// Skip unsupported values
|
||||
continue
|
||||
}
|
||||
if it.Err() != nil {
|
||||
return nil, fmt.Errorf("error iterate over chunks: %w", it.Err())
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ The following tip changes can be tested by building VictoriaMetrics components f
|
|||
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add `exported_` prefix to metric names exported by scrape targets if these metric names clash with [automatically generated metrics](https://docs.victoriametrics.com/vmagent.html#automatically-generated-metrics) such as `up`, `scrape_samples_scraped`, etc. This prevents from corruption of automatically generated metrics. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3406).
|
||||
* FEATURE: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): improve error message when the requested path cannot be properly parsed, so users could identify the issue and properly fix the path. Now the error message links to [url format docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3402).
|
||||
* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): add ability to copy data from sources via Prometheus `remote_read` protocol. See [these docs](https://docs.victoriametrics.com/vmctl.html#migrating-data-by-remote-read-protocol). The related issues: [one](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3132) and [two](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1101).
|
||||
|
||||
|
||||
## [v1.84.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.84.0)
|
||||
|
|
22
go.mod
22
go.mod
|
@ -12,18 +12,19 @@ require (
|
|||
// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
|
||||
github.com/VictoriaMetrics/fasthttp v1.1.0
|
||||
github.com/VictoriaMetrics/metrics v1.23.0
|
||||
github.com/VictoriaMetrics/metricsql v0.49.0
|
||||
github.com/VictoriaMetrics/metricsql v0.49.1
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.1
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.3
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.42
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.29.4
|
||||
github.com/cespare/xxhash/v2 v2.1.2
|
||||
github.com/cheggaaa/pb/v3 v3.1.0
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/snappy v0.0.4
|
||||
github.com/googleapis/gax-go/v2 v2.7.0
|
||||
github.com/influxdata/influxdb v1.10.0
|
||||
github.com/klauspost/compress v1.15.12
|
||||
github.com/prometheus/prometheus v0.39.1
|
||||
github.com/prometheus/prometheus v0.40.4
|
||||
github.com/urfave/cli/v2 v2.23.5
|
||||
github.com/valyala/fastjson v1.6.3
|
||||
github.com/valyala/fastrand v1.1.0
|
||||
|
@ -37,8 +38,6 @@ require (
|
|||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
require github.com/gogo/protobuf v1.3.2
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.107.0 // indirect
|
||||
cloud.google.com/go/compute v1.12.1 // indirect
|
||||
|
@ -47,7 +46,7 @@ require (
|
|||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
|
||||
github.com/aws/aws-sdk-go v1.44.102 // indirect
|
||||
github.com/aws/aws-sdk-go v1.44.149 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.19 // indirect
|
||||
|
@ -78,7 +77,7 @@ require (
|
|||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect
|
||||
github.com/grafana/regexp v0.0.0-20221005092906-f072a00f63e9 // indirect
|
||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect
|
||||
github.com/hashicorp/go-hclog v0.16.2 // indirect
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
|
@ -103,15 +102,16 @@ require (
|
|||
github.com/valyala/histogram v1.2.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.3 // indirect
|
||||
go.opentelemetry.io/otel v1.11.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.32.3 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.11.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.4 // indirect
|
||||
go.opentelemetry.io/otel v1.11.1 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.33.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.11.1 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/goleak v1.2.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9 // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/text v0.4.0 // indirect
|
||||
golang.org/x/time v0.1.0 // indirect
|
||||
golang.org/x/time v0.2.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect
|
||||
|
|
86
go.sum
86
go.sum
|
@ -71,8 +71,8 @@ github.com/VictoriaMetrics/fasthttp v1.1.0/go.mod h1:/7DMcogqd+aaD3G3Hg5kFgoFwlR
|
|||
github.com/VictoriaMetrics/metrics v1.18.1/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA=
|
||||
github.com/VictoriaMetrics/metrics v1.23.0 h1:WzfqyzCaxUZip+OBbg1+lV33WChDSu4ssYII3nxtpeA=
|
||||
github.com/VictoriaMetrics/metrics v1.23.0/go.mod h1:rAr/llLpEnAdTehiNlUxKgnjcOuROSzpw0GvjpEbvFc=
|
||||
github.com/VictoriaMetrics/metricsql v0.49.0 h1:7R04eab3gU0PKu8Ksak7SJnORXm0K+hSGt2+t3XGyKg=
|
||||
github.com/VictoriaMetrics/metricsql v0.49.0/go.mod h1:6pP1ZeLVJHqJrHlF6Ij3gmpQIznSsgktEcZgsAWYel0=
|
||||
github.com/VictoriaMetrics/metricsql v0.49.1 h1:9JAbpiZhlQnylclcf5xNtYRaBd5dr2CTPQ85RIoruuk=
|
||||
github.com/VictoriaMetrics/metricsql v0.49.1/go.mod h1:6pP1ZeLVJHqJrHlF6Ij3gmpQIznSsgktEcZgsAWYel0=
|
||||
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
|
||||
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
|
||||
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
||||
|
@ -89,8 +89,8 @@ github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu
|
|||
github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||
github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo=
|
||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.44.102 h1:6tUCTGL2UDbFZae1TLGk8vTgeXuzkb8KbAe2FiAeKHc=
|
||||
github.com/aws/aws-sdk-go v1.44.102/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go v1.44.149 h1:zTWaUTbSjgMHvwhaQ91s/6ER8wMb3mA8M1GCZFO9QIo=
|
||||
github.com/aws/aws-sdk-go v1.44.149/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.1 h1:02c72fDJr87N8RAC2s3Qu0YuvMRZKNZJ9F+lAehCazk=
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.1/go.mod h1:JLnGeGONAyi2lWXI1p0PCIOIy333JMVK1U7Hf0aRFLw=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.9 h1:RKci2D7tMwpvGpDNZnGQw9wk6v7o/xSwFcUAuNPoB8k=
|
||||
|
@ -152,12 +152,12 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
|||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
|
||||
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
|
||||
github.com/digitalocean/godo v1.84.1 h1:VgPsuxhrO9pUygvij6qOhqXfAkxAsDZYRpmjSDMEaHo=
|
||||
github.com/digitalocean/godo v1.88.0 h1:SAEdw63xOMmzlwCeCWjLH1GcyDPUjbSAR1Bh7VELxzc=
|
||||
github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c=
|
||||
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
|
||||
github.com/docker/docker v20.10.18+incompatible h1:SN84VYXTBNGn92T/QwIRPlum9zfemfitN7pbsp26WSc=
|
||||
github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ=
|
||||
github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
|
@ -165,14 +165,14 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
|
|||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.8 h1:B2cR/FAaiMtYDHv5BQpaqtkjGuWQIgr2KQZtHQ7f6i8=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.13 h1:TvDcILLkjuZV3ER58VkBmncKsLUBqBDxra/XctCzuMM=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
|
||||
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
|
||||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
|
||||
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
|
||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
|
@ -278,9 +278,9 @@ github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1Yu
|
|||
github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8=
|
||||
github.com/gophercloud/gophercloud v1.0.0 h1:9nTGx0jizmHxDobe4mck89FyQHVyA3CaXLIUSGJjP9k=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/grafana/regexp v0.0.0-20221005092906-f072a00f63e9 h1:/J04vzVQbRAlvBw1NSDlADwPAzkO7wUzgM0P1DPd+UE=
|
||||
github.com/grafana/regexp v0.0.0-20221005092906-f072a00f63e9/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
|
||||
github.com/hashicorp/consul/api v1.15.2 h1:3Q/pDqvJ7udgt/60QOOW/p/PeKioQN+ncYzzCdN2av0=
|
||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww=
|
||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
|
||||
github.com/hashicorp/consul/api v1.15.3 h1:WYONYL2rxTXtlekAqblR2SCdJsizMDIj/uXb5wNy9zU=
|
||||
github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||
github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs=
|
||||
|
@ -293,7 +293,7 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
|
|||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20220921012004-ddeeb1040edf h1:l/EZ57iRPNs8vd8c9qH0dB4Q+IiZHJouLAgxJ5j25tU=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20221102143410-8a95f1239005 h1:jKwXhVS4F7qk0g8laz+Anz0g/6yaSJ3HqmSAuSNLUcA=
|
||||
github.com/hashicorp/serf v0.9.7 h1:hkdgbqizGQHuU5IPqYM1JdSMV8nKfpuOnZYXssk9muY=
|
||||
github.com/hetznercloud/hcloud-go v1.35.3 h1:WCmFAhLRooih2QHAsbCbEdpIHnshQQmrPqsr3rHE1Ow=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
|
@ -323,7 +323,7 @@ github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8
|
|||
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=
|
||||
github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
||||
github.com/kolo/xmlrpc v0.0.0-20220919000247-3377102c83bd h1:b1taQnM42dp3NdiiQwfmM1WyyucHayZSKN5R0PRYWL0=
|
||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
|
@ -333,7 +333,7 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/linode/linodego v1.9.1 h1:29UpEPpYcGFnbwiJW8mbk/bjBZpgd/pv68io2IKTo34=
|
||||
github.com/linode/linodego v1.9.3 h1:+lxNZw4avRxhCqGjwfPgQ2PvMT+vOL0OMsTdzixR7hQ=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
|
@ -370,6 +370,7 @@ github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
|
|||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
|
||||
github.com/ovh/go-ovh v1.1.0 h1:bHXZmw8nTgZin4Nv7JuaLs0KG5x54EQR7migYTd1zrk=
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
|
@ -406,16 +407,12 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1
|
|||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||
github.com/prometheus/prometheus v0.39.1 h1:abZM6A+sKAv2eKTbRIaHq4amM/nT07MuxRm0+QTaTj0=
|
||||
github.com/prometheus/prometheus v0.39.1/go.mod h1:GjQjgLhHMc0oo4Ko7qt/yBSJMY4hUoiAZwsYQgjaePA=
|
||||
github.com/prometheus/prometheus v0.40.4 h1:6aLtQSvnhmC/uo5Tx910AQm3Fxq1nzaJA6uiYtsA6So=
|
||||
github.com/prometheus/prometheus v0.40.4/go.mod h1:bxgdmtoSNLmmIVPGmeTJ3OiP67VmuY4yalE4ZP6L/j8=
|
||||
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw=
|
||||
github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
|
@ -462,6 +459,7 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
|||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
|
@ -469,14 +467,14 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.3 h1:SGz6Fnp7blR+sskRZkyuFDb3qI1d8I0ygLh13F+sw6I=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.3/go.mod h1:+OXcluxum2GicWQ9lMXLQkLkOWoaw20OrVbYq6kkPks=
|
||||
go.opentelemetry.io/otel v1.11.0 h1:kfToEGMDq6TrVrJ9Vht84Y8y9enykSZzDDZglV0kIEk=
|
||||
go.opentelemetry.io/otel v1.11.0/go.mod h1:H2KtuEphyMvlhZ+F7tg9GRhAOe60moNx61Ex+WmiKkk=
|
||||
go.opentelemetry.io/otel/metric v0.32.3 h1:dMpnJYk2KULXr0j8ph6N7+IcuiIQXlPXD4kix9t7L9c=
|
||||
go.opentelemetry.io/otel/metric v0.32.3/go.mod h1:pgiGmKohxHyTPHGOff+vrtIH39/R9fiO/WoenUQ3kcc=
|
||||
go.opentelemetry.io/otel/trace v1.11.0 h1:20U/Vj42SX+mASlXLmSGBg6jpI1jQtv682lZtTAOVFI=
|
||||
go.opentelemetry.io/otel/trace v1.11.0/go.mod h1:nyYjis9jy0gytE9LXGU+/m1sHTKbRY0fX0hulNNDP1U=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.4 h1:aUEBEdCa6iamGzg6fuYxDA8ThxvOG240mAvWDU+XLio=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.4/go.mod h1:l2MdsbKTocpPS5nQZscqTR9jd8u96VYZdcpF8Sye7mA=
|
||||
go.opentelemetry.io/otel v1.11.1 h1:4WLLAmcfkmDk2ukNXJyq3/kiz/3UzCaYq6PskJsaou4=
|
||||
go.opentelemetry.io/otel v1.11.1/go.mod h1:1nNhXBbWSD0nsL38H6btgnFN2k4i0sNLHNNMZMSbUGE=
|
||||
go.opentelemetry.io/otel/metric v0.33.0 h1:xQAyl7uGEYvrLAiV/09iTJlp1pZnQ9Wl793qbVvED1E=
|
||||
go.opentelemetry.io/otel/metric v0.33.0/go.mod h1:QlTYc+EnYNq/M2mNk1qDDMRLpqCOj2f/r5c7Fd5FYaI=
|
||||
go.opentelemetry.io/otel/trace v1.11.1 h1:ofxdnzsNrGBYXbP7t7zpUK281+go5rF7dvdIZXF8gdQ=
|
||||
go.opentelemetry.io/otel/trace v1.11.1/go.mod h1:f/Q9G7vzk5u91PhbmKbg1Qn0rzH1LJ4vbPHFGkTPtOk=
|
||||
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
|
||||
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
|
||||
|
@ -488,7 +486,8 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U
|
|||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
@ -499,6 +498,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9 h1:yZNXmy+j/JpX19vZkVktWqAo7Gny4PBWYYK3zskGpx4=
|
||||
golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
@ -520,7 +521,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
|
|||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -556,6 +558,8 @@ golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qx
|
|||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU=
|
||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
|
@ -577,6 +581,7 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -625,11 +630,15 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.2.0 h1:z85xZCsEl7bi/KwbNADeBYoOP0++7W1ipu+aGnpwzRM=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -643,8 +652,8 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
|||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA=
|
||||
golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.2.0 h1:52I/1L54xyEQAYdtcSuxtiT84KGYTBGXwayxmIpNJhE=
|
||||
golang.org/x/time v0.2.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
|
@ -687,7 +696,8 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc
|
|||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -749,10 +759,8 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
|
|||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 h1:a2S6M0+660BgMNl++4JPlcAO/CjkqYItDEZwkoDQK7c=
|
||||
google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
|
@ -808,9 +816,9 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
|||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.25.1 h1:yL7du50yc93k17nH/Xe9jujAYrcDkI/i5DL1jPz4E3M=
|
||||
k8s.io/apimachinery v0.25.1 h1:t0XrnmCEHVgJlR2arwO8Awp9ylluDic706WePaYCBTI=
|
||||
k8s.io/client-go v0.25.1 h1:uFj4AJKtE1/ckcSKz8IhgAuZTdRXZDKev8g387ndD58=
|
||||
k8s.io/api v0.25.3 h1:Q1v5UFfYe87vi5H7NU0p4RXC26PPMT8KOpr1TLQbCMQ=
|
||||
k8s.io/apimachinery v0.25.3 h1:7o9ium4uyUOM76t6aunP0nZuex7gDf8VGwkR5RcJnQc=
|
||||
k8s.io/client-go v0.25.3 h1:oB4Dyl8d6UbfDHD8Bv8evKylzs3BXzzufLiO27xuPs0=
|
||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
||||
k8s.io/klog/v2 v2.80.0 h1:lyJt0TWMPaGoODa8B8bUuxgHS3W/m/bNr2cca3brA/g=
|
||||
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA=
|
||||
|
|
14
vendor/github.com/VictoriaMetrics/metricsql/lexer.go
generated
vendored
14
vendor/github.com/VictoriaMetrics/metricsql/lexer.go
generated
vendored
|
@ -560,13 +560,17 @@ func DurationValue(s string, step int64) (int64, error) {
|
|||
if len(s) == 0 {
|
||||
return 0, fmt.Errorf("duration cannot be empty")
|
||||
}
|
||||
// Try parsing floating-point duration
|
||||
d, err := strconv.ParseFloat(s, 64)
|
||||
if err == nil {
|
||||
// Convert the duration to milliseconds.
|
||||
return int64(d * 1000), nil
|
||||
lastChar := s[len(s)-1]
|
||||
if lastChar >= '0' && lastChar <= '9' || lastChar == '.' {
|
||||
// Try parsing floating-point duration
|
||||
d, err := strconv.ParseFloat(s, 64)
|
||||
if err == nil {
|
||||
// Convert the duration to milliseconds.
|
||||
return int64(d * 1000), nil
|
||||
}
|
||||
}
|
||||
isMinus := false
|
||||
d := float64(0)
|
||||
for len(s) > 0 {
|
||||
n := scanSingleDuration(s, true)
|
||||
if n <= 0 {
|
||||
|
|
2020
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
2020
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
File diff suppressed because it is too large
Load diff
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
@ -5,4 +5,4 @@ package aws
|
|||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.44.102"
|
||||
const SDKVersion = "1.44.149"
|
||||
|
|
18
vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go
generated
vendored
18
vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go
generated
vendored
|
@ -1,9 +1,8 @@
|
|||
package shareddefaults
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// SharedCredentialsFilename returns the SDK's default file path
|
||||
|
@ -31,10 +30,17 @@ func SharedConfigFilename() string {
|
|||
// UserHomeDir returns the home directory for the user the process is
|
||||
// running under.
|
||||
func UserHomeDir() string {
|
||||
if runtime.GOOS == "windows" { // Windows
|
||||
return os.Getenv("USERPROFILE")
|
||||
var home string
|
||||
|
||||
home = userHomeDir()
|
||||
if len(home) > 0 {
|
||||
return home
|
||||
}
|
||||
|
||||
// *nix
|
||||
return os.Getenv("HOME")
|
||||
currUser, _ := user.Current()
|
||||
if currUser != nil {
|
||||
home = currUser.HomeDir
|
||||
}
|
||||
|
||||
return home
|
||||
}
|
||||
|
|
18
vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go
generated
vendored
Normal file
18
vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
//go:build !go1.12
|
||||
// +build !go1.12
|
||||
|
||||
package shareddefaults
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func userHomeDir() string {
|
||||
if runtime.GOOS == "windows" { // Windows
|
||||
return os.Getenv("USERPROFILE")
|
||||
}
|
||||
|
||||
// *nix
|
||||
return os.Getenv("HOME")
|
||||
}
|
13
vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go
generated
vendored
Normal file
13
vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
//go:build go1.12
|
||||
// +build go1.12
|
||||
|
||||
package shareddefaults
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
func userHomeDir() string {
|
||||
home, _ := os.UserHomeDir()
|
||||
return home
|
||||
}
|
19
vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go
generated
vendored
19
vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go
generated
vendored
|
@ -4,7 +4,6 @@ package jsonutil
|
|||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
|
@ -16,6 +15,12 @@ import (
|
|||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
)
|
||||
|
||||
const (
|
||||
floatNaN = "NaN"
|
||||
floatInf = "Infinity"
|
||||
floatNegInf = "-Infinity"
|
||||
)
|
||||
|
||||
var timeType = reflect.ValueOf(time.Time{}).Type()
|
||||
var byteSliceType = reflect.ValueOf([]byte{}).Type()
|
||||
|
||||
|
@ -211,10 +216,16 @@ func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) erro
|
|||
buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10))
|
||||
case reflect.Float64:
|
||||
f := value.Float()
|
||||
if math.IsInf(f, 0) || math.IsNaN(f) {
|
||||
return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)}
|
||||
switch {
|
||||
case math.IsNaN(f):
|
||||
writeString(floatNaN, buf)
|
||||
case math.IsInf(f, 1):
|
||||
writeString(floatInf, buf)
|
||||
case math.IsInf(f, -1):
|
||||
writeString(floatNegInf, buf)
|
||||
default:
|
||||
buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64))
|
||||
}
|
||||
buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64))
|
||||
default:
|
||||
switch converted := value.Interface().(type) {
|
||||
case time.Time:
|
||||
|
|
13
vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go
generated
vendored
13
vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go
generated
vendored
|
@ -6,6 +6,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
@ -258,6 +259,18 @@ func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag
|
|||
return err
|
||||
}
|
||||
value.Set(reflect.ValueOf(v))
|
||||
case *float64:
|
||||
// These are regular strings when parsed by encoding/json's unmarshaler.
|
||||
switch {
|
||||
case strings.EqualFold(d, floatNaN):
|
||||
value.Set(reflect.ValueOf(aws.Float64(math.NaN())))
|
||||
case strings.EqualFold(d, floatInf):
|
||||
value.Set(reflect.ValueOf(aws.Float64(math.Inf(1))))
|
||||
case strings.EqualFold(d, floatNegInf):
|
||||
value.Set(reflect.ValueOf(aws.Float64(math.Inf(-1))))
|
||||
default:
|
||||
return fmt.Errorf("unknown JSON number value: %s", d)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
|
||||
}
|
||||
|
|
34
vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
generated
vendored
34
vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
generated
vendored
|
@ -3,6 +3,7 @@ package queryutil
|
|||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"sort"
|
||||
|
@ -13,6 +14,12 @@ import (
|
|||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
)
|
||||
|
||||
const (
|
||||
floatNaN = "NaN"
|
||||
floatInf = "Infinity"
|
||||
floatNegInf = "-Infinity"
|
||||
)
|
||||
|
||||
// Parse parses an object i and fills a url.Values object. The isEC2 flag
|
||||
// indicates if this is the EC2 Query sub-protocol.
|
||||
func Parse(body url.Values, i interface{}, isEC2 bool) error {
|
||||
|
@ -228,9 +235,32 @@ func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, ta
|
|||
case int:
|
||||
v.Set(name, strconv.Itoa(value))
|
||||
case float64:
|
||||
v.Set(name, strconv.FormatFloat(value, 'f', -1, 64))
|
||||
var str string
|
||||
switch {
|
||||
case math.IsNaN(value):
|
||||
str = floatNaN
|
||||
case math.IsInf(value, 1):
|
||||
str = floatInf
|
||||
case math.IsInf(value, -1):
|
||||
str = floatNegInf
|
||||
default:
|
||||
str = strconv.FormatFloat(value, 'f', -1, 64)
|
||||
}
|
||||
v.Set(name, str)
|
||||
case float32:
|
||||
v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32))
|
||||
asFloat64 := float64(value)
|
||||
var str string
|
||||
switch {
|
||||
case math.IsNaN(asFloat64):
|
||||
str = floatNaN
|
||||
case math.IsInf(asFloat64, 1):
|
||||
str = floatInf
|
||||
case math.IsInf(asFloat64, -1):
|
||||
str = floatNegInf
|
||||
default:
|
||||
str = strconv.FormatFloat(asFloat64, 'f', -1, 32)
|
||||
}
|
||||
v.Set(name, str)
|
||||
case time.Time:
|
||||
const ISO8601UTC = "2006-01-02T15:04:05Z"
|
||||
format := tag.Get("timestampFormat")
|
||||
|
|
18
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
generated
vendored
18
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
generated
vendored
|
@ -6,6 +6,7 @@ import (
|
|||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
|
@ -20,6 +21,12 @@ import (
|
|||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
)
|
||||
|
||||
const (
|
||||
floatNaN = "NaN"
|
||||
floatInf = "Infinity"
|
||||
floatNegInf = "-Infinity"
|
||||
)
|
||||
|
||||
// Whether the byte value can be sent without escaping in AWS URLs
|
||||
var noEscape [256]bool
|
||||
|
||||
|
@ -302,7 +309,16 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error)
|
|||
case int64:
|
||||
str = strconv.FormatInt(value, 10)
|
||||
case float64:
|
||||
str = strconv.FormatFloat(value, 'f', -1, 64)
|
||||
switch {
|
||||
case math.IsNaN(value):
|
||||
str = floatNaN
|
||||
case math.IsInf(value, 1):
|
||||
str = floatInf
|
||||
case math.IsInf(value, -1):
|
||||
str = floatNegInf
|
||||
default:
|
||||
str = strconv.FormatFloat(value, 'f', -1, 64)
|
||||
}
|
||||
case time.Time:
|
||||
format := tag.Get("timestampFormat")
|
||||
if len(format) == 0 {
|
||||
|
|
18
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
generated
vendored
18
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
generated
vendored
|
@ -6,6 +6,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
@ -231,9 +232,20 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro
|
|||
}
|
||||
v.Set(reflect.ValueOf(&i))
|
||||
case *float64:
|
||||
f, err := strconv.ParseFloat(header, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
var f float64
|
||||
switch {
|
||||
case strings.EqualFold(header, floatNaN):
|
||||
f = math.NaN()
|
||||
case strings.EqualFold(header, floatInf):
|
||||
f = math.Inf(1)
|
||||
case strings.EqualFold(header, floatNegInf):
|
||||
f = math.Inf(-1)
|
||||
default:
|
||||
var err error
|
||||
f, err = strconv.ParseFloat(header, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
v.Set(reflect.ValueOf(&f))
|
||||
case *time.Time:
|
||||
|
|
32
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
generated
vendored
32
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
generated
vendored
|
@ -5,6 +5,7 @@ import (
|
|||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
@ -14,6 +15,12 @@ import (
|
|||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
)
|
||||
|
||||
const (
|
||||
floatNaN = "NaN"
|
||||
floatInf = "Infinity"
|
||||
floatNegInf = "-Infinity"
|
||||
)
|
||||
|
||||
// BuildXML will serialize params into an xml.Encoder. Error will be returned
|
||||
// if the serialization of any of the params or nested values fails.
|
||||
func BuildXML(params interface{}, e *xml.Encoder) error {
|
||||
|
@ -275,6 +282,7 @@ func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect
|
|||
// Error will be returned if the value type is unsupported.
|
||||
func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
|
||||
var str string
|
||||
|
||||
switch converted := value.Interface().(type) {
|
||||
case string:
|
||||
str = converted
|
||||
|
@ -289,9 +297,29 @@ func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag refl
|
|||
case int:
|
||||
str = strconv.Itoa(converted)
|
||||
case float64:
|
||||
str = strconv.FormatFloat(converted, 'f', -1, 64)
|
||||
switch {
|
||||
case math.IsNaN(converted):
|
||||
str = floatNaN
|
||||
case math.IsInf(converted, 1):
|
||||
str = floatInf
|
||||
case math.IsInf(converted, -1):
|
||||
str = floatNegInf
|
||||
default:
|
||||
str = strconv.FormatFloat(converted, 'f', -1, 64)
|
||||
}
|
||||
case float32:
|
||||
str = strconv.FormatFloat(float64(converted), 'f', -1, 32)
|
||||
// The SDK doesn't render float32 values in types, only float64. This case would never be hit currently.
|
||||
asFloat64 := float64(converted)
|
||||
switch {
|
||||
case math.IsNaN(asFloat64):
|
||||
str = floatNaN
|
||||
case math.IsInf(asFloat64, 1):
|
||||
str = floatInf
|
||||
case math.IsInf(asFloat64, -1):
|
||||
str = floatNegInf
|
||||
default:
|
||||
str = strconv.FormatFloat(asFloat64, 'f', -1, 32)
|
||||
}
|
||||
case time.Time:
|
||||
format := tag.Get("timestampFormat")
|
||||
if len(format) == 0 {
|
||||
|
|
18
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
generated
vendored
18
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
generated
vendored
|
@ -6,6 +6,7 @@ import (
|
|||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -276,9 +277,20 @@ func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
|
|||
}
|
||||
r.Set(reflect.ValueOf(&v))
|
||||
case *float64:
|
||||
v, err := strconv.ParseFloat(node.Text, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
var v float64
|
||||
switch {
|
||||
case strings.EqualFold(node.Text, floatNaN):
|
||||
v = math.NaN()
|
||||
case strings.EqualFold(node.Text, floatInf):
|
||||
v = math.Inf(1)
|
||||
case strings.EqualFold(node.Text, floatNegInf):
|
||||
v = math.Inf(-1)
|
||||
default:
|
||||
var err error
|
||||
v, err = strconv.ParseFloat(node.Text, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
r.Set(reflect.ValueOf(&v))
|
||||
case *time.Time:
|
||||
|
|
213
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
213
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
|
@ -74,16 +74,16 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
|
|||
//
|
||||
// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||
// to this operation. You can pass a single JSON policy document to use as an
|
||||
// inline session policy. You can also specify up to 10 managed policies to
|
||||
// use as managed session policies. The plaintext that you use for both inline
|
||||
// and managed session policies can't exceed 2,048 characters. Passing policies
|
||||
// to this operation returns new temporary credentials. The resulting session's
|
||||
// permissions are the intersection of the role's identity-based policy and
|
||||
// the session policies. You can use the role's temporary credentials in subsequent
|
||||
// Amazon Web Services API calls to access resources in the account that owns
|
||||
// the role. You cannot use session policies to grant more permissions than
|
||||
// those allowed by the identity-based policy of the role that is being assumed.
|
||||
// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||
// inline session policy. You can also specify up to 10 managed policy Amazon
|
||||
// Resource Names (ARNs) to use as managed session policies. The plaintext that
|
||||
// you use for both inline and managed session policies can't exceed 2,048 characters.
|
||||
// Passing policies to this operation returns new temporary credentials. The
|
||||
// resulting session's permissions are the intersection of the role's identity-based
|
||||
// policy and the session policies. You can use the role's temporary credentials
|
||||
// in subsequent Amazon Web Services API calls to access resources in the account
|
||||
// that owns the role. You cannot use session policies to grant more permissions
|
||||
// than those allowed by the identity-based policy of the role that is being
|
||||
// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// When you create a role, you create two policies: A role trust policy that
|
||||
|
@ -307,16 +307,16 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
|
|||
//
|
||||
// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||
// to this operation. You can pass a single JSON policy document to use as an
|
||||
// inline session policy. You can also specify up to 10 managed policies to
|
||||
// use as managed session policies. The plaintext that you use for both inline
|
||||
// and managed session policies can't exceed 2,048 characters. Passing policies
|
||||
// to this operation returns new temporary credentials. The resulting session's
|
||||
// permissions are the intersection of the role's identity-based policy and
|
||||
// the session policies. You can use the role's temporary credentials in subsequent
|
||||
// Amazon Web Services API calls to access resources in the account that owns
|
||||
// the role. You cannot use session policies to grant more permissions than
|
||||
// those allowed by the identity-based policy of the role that is being assumed.
|
||||
// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||
// inline session policy. You can also specify up to 10 managed policy Amazon
|
||||
// Resource Names (ARNs) to use as managed session policies. The plaintext that
|
||||
// you use for both inline and managed session policies can't exceed 2,048 characters.
|
||||
// Passing policies to this operation returns new temporary credentials. The
|
||||
// resulting session's permissions are the intersection of the role's identity-based
|
||||
// policy and the session policies. You can use the role's temporary credentials
|
||||
// in subsequent Amazon Web Services API calls to access resources in the account
|
||||
// that owns the role. You cannot use session policies to grant more permissions
|
||||
// than those allowed by the identity-based policy of the role that is being
|
||||
// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// Calling AssumeRoleWithSAML does not require the use of Amazon Web Services
|
||||
|
@ -343,11 +343,12 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
|
|||
// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// An Amazon Web Services conversion compresses the passed session policies
|
||||
// and session tags into a packed binary format that has a separate limit. Your
|
||||
// request can fail for this limit even if your plaintext meets the other requirements.
|
||||
// The PackedPolicySize response element indicates by percentage how close the
|
||||
// policies and tags for your request are to the upper size limit.
|
||||
// An Amazon Web Services conversion compresses the passed inline session policy,
|
||||
// managed policy ARNs, and session tags into a packed binary format that has
|
||||
// a separate limit. Your request can fail for this limit even if your plaintext
|
||||
// meets the other requirements. The PackedPolicySize response element indicates
|
||||
// by percentage how close the policies and tags for your request are to the
|
||||
// upper size limit.
|
||||
//
|
||||
// You can pass a session tag with the same key as a tag that is attached to
|
||||
// the role. When you do, session tags override the role's tags with the same
|
||||
|
@ -563,16 +564,16 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
|
|||
//
|
||||
// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||
// to this operation. You can pass a single JSON policy document to use as an
|
||||
// inline session policy. You can also specify up to 10 managed policies to
|
||||
// use as managed session policies. The plaintext that you use for both inline
|
||||
// and managed session policies can't exceed 2,048 characters. Passing policies
|
||||
// to this operation returns new temporary credentials. The resulting session's
|
||||
// permissions are the intersection of the role's identity-based policy and
|
||||
// the session policies. You can use the role's temporary credentials in subsequent
|
||||
// Amazon Web Services API calls to access resources in the account that owns
|
||||
// the role. You cannot use session policies to grant more permissions than
|
||||
// those allowed by the identity-based policy of the role that is being assumed.
|
||||
// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||
// inline session policy. You can also specify up to 10 managed policy Amazon
|
||||
// Resource Names (ARNs) to use as managed session policies. The plaintext that
|
||||
// you use for both inline and managed session policies can't exceed 2,048 characters.
|
||||
// Passing policies to this operation returns new temporary credentials. The
|
||||
// resulting session's permissions are the intersection of the role's identity-based
|
||||
// policy and the session policies. You can use the role's temporary credentials
|
||||
// in subsequent Amazon Web Services API calls to access resources in the account
|
||||
// that owns the role. You cannot use session policies to grant more permissions
|
||||
// than those allowed by the identity-based policy of the role that is being
|
||||
// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// # Tags
|
||||
|
@ -588,11 +589,12 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
|
|||
// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// An Amazon Web Services conversion compresses the passed session policies
|
||||
// and session tags into a packed binary format that has a separate limit. Your
|
||||
// request can fail for this limit even if your plaintext meets the other requirements.
|
||||
// The PackedPolicySize response element indicates by percentage how close the
|
||||
// policies and tags for your request are to the upper size limit.
|
||||
// An Amazon Web Services conversion compresses the passed inline session policy,
|
||||
// managed policy ARNs, and session tags into a packed binary format that has
|
||||
// a separate limit. Your request can fail for this limit even if your plaintext
|
||||
// meets the other requirements. The PackedPolicySize response element indicates
|
||||
// by percentage how close the policies and tags for your request are to the
|
||||
// upper size limit.
|
||||
//
|
||||
// You can pass a session tag with the same key as a tag that is attached to
|
||||
// the role. When you do, the session tag overrides the role tag with the same
|
||||
|
@ -1110,9 +1112,9 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
|
|||
//
|
||||
// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||
// to this operation. You can pass a single JSON policy document to use as an
|
||||
// inline session policy. You can also specify up to 10 managed policies to
|
||||
// use as managed session policies. The plaintext that you use for both inline
|
||||
// and managed session policies can't exceed 2,048 characters.
|
||||
// inline session policy. You can also specify up to 10 managed policy Amazon
|
||||
// Resource Names (ARNs) to use as managed session policies. The plaintext that
|
||||
// you use for both inline and managed session policies can't exceed 2,048 characters.
|
||||
//
|
||||
// Though the session policy parameters are optional, if you do not pass a policy,
|
||||
// then the resulting federated user session has no permissions. When you pass
|
||||
|
@ -1424,11 +1426,12 @@ type AssumeRoleInput struct {
|
|||
// \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
|
||||
// return (\u000D) characters.
|
||||
//
|
||||
// An Amazon Web Services conversion compresses the passed session policies
|
||||
// and session tags into a packed binary format that has a separate limit. Your
|
||||
// request can fail for this limit even if your plaintext meets the other requirements.
|
||||
// The PackedPolicySize response element indicates by percentage how close the
|
||||
// policies and tags for your request are to the upper size limit.
|
||||
// An Amazon Web Services conversion compresses the passed inline session policy,
|
||||
// managed policy ARNs, and session tags into a packed binary format that has
|
||||
// a separate limit. Your request can fail for this limit even if your plaintext
|
||||
// meets the other requirements. The PackedPolicySize response element indicates
|
||||
// by percentage how close the policies and tags for your request are to the
|
||||
// upper size limit.
|
||||
Policy *string `min:"1" type:"string"`
|
||||
|
||||
// The Amazon Resource Names (ARNs) of the IAM managed policies that you want
|
||||
|
@ -1441,11 +1444,12 @@ type AssumeRoleInput struct {
|
|||
// Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
|
||||
// in the Amazon Web Services General Reference.
|
||||
//
|
||||
// An Amazon Web Services conversion compresses the passed session policies
|
||||
// and session tags into a packed binary format that has a separate limit. Your
|
||||
// request can fail for this limit even if your plaintext meets the other requirements.
|
||||
// The PackedPolicySize response element indicates by percentage how close the
|
||||
// policies and tags for your request are to the upper size limit.
|
||||
// An Amazon Web Services conversion compresses the passed inline session policy,
|
||||
// managed policy ARNs, and session tags into a packed binary format that has
|
||||
// a separate limit. Your request can fail for this limit even if your plaintext
|
||||
// meets the other requirements. The PackedPolicySize response element indicates
|
||||
// by percentage how close the policies and tags for your request are to the
|
||||
// upper size limit.
|
||||
//
|
||||
// Passing policies to this operation returns new temporary credentials. The
|
||||
// resulting session's permissions are the intersection of the role's identity-based
|
||||
|
@ -1520,11 +1524,12 @@ type AssumeRoleInput struct {
|
|||
// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// An Amazon Web Services conversion compresses the passed session policies
|
||||
// and session tags into a packed binary format that has a separate limit. Your
|
||||
// request can fail for this limit even if your plaintext meets the other requirements.
|
||||
// The PackedPolicySize response element indicates by percentage how close the
|
||||
// policies and tags for your request are to the upper size limit.
|
||||
// An Amazon Web Services conversion compresses the passed inline session policy,
|
||||
// managed policy ARNs, and session tags into a packed binary format that has
|
||||
// a separate limit. Your request can fail for this limit even if your plaintext
|
||||
// meets the other requirements. The PackedPolicySize response element indicates
|
||||
// by percentage how close the policies and tags for your request are to the
|
||||
// upper size limit.
|
||||
//
|
||||
// You can pass a session tag with the same key as a tag that is already attached
|
||||
// to the role. When you do, session tags override a role tag with the same
|
||||
|
@ -1843,11 +1848,12 @@ type AssumeRoleWithSAMLInput struct {
|
|||
// \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
|
||||
// return (\u000D) characters.
|
||||
//
|
||||
// An Amazon Web Services conversion compresses the passed session policies
|
||||
// and session tags into a packed binary format that has a separate limit. Your
|
||||
// request can fail for this limit even if your plaintext meets the other requirements.
|
||||
// The PackedPolicySize response element indicates by percentage how close the
|
||||
// policies and tags for your request are to the upper size limit.
|
||||
// An Amazon Web Services conversion compresses the passed inline session policy,
|
||||
// managed policy ARNs, and session tags into a packed binary format that has
|
||||
// a separate limit. Your request can fail for this limit even if your plaintext
|
||||
// meets the other requirements. The PackedPolicySize response element indicates
|
||||
// by percentage how close the policies and tags for your request are to the
|
||||
// upper size limit.
|
||||
Policy *string `min:"1" type:"string"`
|
||||
|
||||
// The Amazon Resource Names (ARNs) of the IAM managed policies that you want
|
||||
|
@ -1860,11 +1866,12 @@ type AssumeRoleWithSAMLInput struct {
|
|||
// Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
|
||||
// in the Amazon Web Services General Reference.
|
||||
//
|
||||
// An Amazon Web Services conversion compresses the passed session policies
|
||||
// and session tags into a packed binary format that has a separate limit. Your
|
||||
// request can fail for this limit even if your plaintext meets the other requirements.
|
||||
// The PackedPolicySize response element indicates by percentage how close the
|
||||
// policies and tags for your request are to the upper size limit.
|
||||
// An Amazon Web Services conversion compresses the passed inline session policy,
|
||||
// managed policy ARNs, and session tags into a packed binary format that has
|
||||
// a separate limit. Your request can fail for this limit even if your plaintext
|
||||
// meets the other requirements. The PackedPolicySize response element indicates
|
||||
// by percentage how close the policies and tags for your request are to the
|
||||
// upper size limit.
|
||||
//
|
||||
// Passing policies to this operation returns new temporary credentials. The
|
||||
// resulting session's permissions are the intersection of the role's identity-based
|
||||
|
@ -2190,11 +2197,12 @@ type AssumeRoleWithWebIdentityInput struct {
|
|||
// \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
|
||||
// return (\u000D) characters.
|
||||
//
|
||||
// An Amazon Web Services conversion compresses the passed session policies
|
||||
// and session tags into a packed binary format that has a separate limit. Your
|
||||
// request can fail for this limit even if your plaintext meets the other requirements.
|
||||
// The PackedPolicySize response element indicates by percentage how close the
|
||||
// policies and tags for your request are to the upper size limit.
|
||||
// An Amazon Web Services conversion compresses the passed inline session policy,
|
||||
// managed policy ARNs, and session tags into a packed binary format that has
|
||||
// a separate limit. Your request can fail for this limit even if your plaintext
|
||||
// meets the other requirements. The PackedPolicySize response element indicates
|
||||
// by percentage how close the policies and tags for your request are to the
|
||||
// upper size limit.
|
||||
Policy *string `min:"1" type:"string"`
|
||||
|
||||
// The Amazon Resource Names (ARNs) of the IAM managed policies that you want
|
||||
|
@ -2207,11 +2215,12 @@ type AssumeRoleWithWebIdentityInput struct {
|
|||
// Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
|
||||
// in the Amazon Web Services General Reference.
|
||||
//
|
||||
// An Amazon Web Services conversion compresses the passed session policies
|
||||
// and session tags into a packed binary format that has a separate limit. Your
|
||||
// request can fail for this limit even if your plaintext meets the other requirements.
|
||||
// The PackedPolicySize response element indicates by percentage how close the
|
||||
// policies and tags for your request are to the upper size limit.
|
||||
// An Amazon Web Services conversion compresses the passed inline session policy,
|
||||
// managed policy ARNs, and session tags into a packed binary format that has
|
||||
// a separate limit. Your request can fail for this limit even if your plaintext
|
||||
// meets the other requirements. The PackedPolicySize response element indicates
|
||||
// by percentage how close the policies and tags for your request are to the
|
||||
// upper size limit.
|
||||
//
|
||||
// Passing policies to this operation returns new temporary credentials. The
|
||||
// resulting session's permissions are the intersection of the role's identity-based
|
||||
|
@ -2934,8 +2943,8 @@ type GetFederationTokenInput struct {
|
|||
//
|
||||
// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||
// to this operation. You can pass a single JSON policy document to use as an
|
||||
// inline session policy. You can also specify up to 10 managed policies to
|
||||
// use as managed session policies.
|
||||
// inline session policy. You can also specify up to 10 managed policy Amazon
|
||||
// Resource Names (ARNs) to use as managed session policies.
|
||||
//
|
||||
// This parameter is optional. However, if you do not pass any session policies,
|
||||
// then the resulting federated user session has no permissions.
|
||||
|
@ -2960,11 +2969,12 @@ type GetFederationTokenInput struct {
|
|||
// \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
|
||||
// return (\u000D) characters.
|
||||
//
|
||||
// An Amazon Web Services conversion compresses the passed session policies
|
||||
// and session tags into a packed binary format that has a separate limit. Your
|
||||
// request can fail for this limit even if your plaintext meets the other requirements.
|
||||
// The PackedPolicySize response element indicates by percentage how close the
|
||||
// policies and tags for your request are to the upper size limit.
|
||||
// An Amazon Web Services conversion compresses the passed inline session policy,
|
||||
// managed policy ARNs, and session tags into a packed binary format that has
|
||||
// a separate limit. Your request can fail for this limit even if your plaintext
|
||||
// meets the other requirements. The PackedPolicySize response element indicates
|
||||
// by percentage how close the policies and tags for your request are to the
|
||||
// upper size limit.
|
||||
Policy *string `min:"1" type:"string"`
|
||||
|
||||
// The Amazon Resource Names (ARNs) of the IAM managed policies that you want
|
||||
|
@ -2973,11 +2983,12 @@ type GetFederationTokenInput struct {
|
|||
//
|
||||
// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||
// to this operation. You can pass a single JSON policy document to use as an
|
||||
// inline session policy. You can also specify up to 10 managed policies to
|
||||
// use as managed session policies. The plaintext that you use for both inline
|
||||
// and managed session policies can't exceed 2,048 characters. You can provide
|
||||
// up to 10 managed policy ARNs. For more information about ARNs, see Amazon
|
||||
// Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
|
||||
// inline session policy. You can also specify up to 10 managed policy Amazon
|
||||
// Resource Names (ARNs) to use as managed session policies. The plaintext that
|
||||
// you use for both inline and managed session policies can't exceed 2,048 characters.
|
||||
// You can provide up to 10 managed policy ARNs. For more information about
|
||||
// ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces
|
||||
// (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
|
||||
// in the Amazon Web Services General Reference.
|
||||
//
|
||||
// This parameter is optional. However, if you do not pass any session policies,
|
||||
|
@ -2997,11 +3008,12 @@ type GetFederationTokenInput struct {
|
|||
// by the policy. These permissions are granted in addition to the permissions
|
||||
// that are granted by the session policies.
|
||||
//
|
||||
// An Amazon Web Services conversion compresses the passed session policies
|
||||
// and session tags into a packed binary format that has a separate limit. Your
|
||||
// request can fail for this limit even if your plaintext meets the other requirements.
|
||||
// The PackedPolicySize response element indicates by percentage how close the
|
||||
// policies and tags for your request are to the upper size limit.
|
||||
// An Amazon Web Services conversion compresses the passed inline session policy,
|
||||
// managed policy ARNs, and session tags into a packed binary format that has
|
||||
// a separate limit. Your request can fail for this limit even if your plaintext
|
||||
// meets the other requirements. The PackedPolicySize response element indicates
|
||||
// by percentage how close the policies and tags for your request are to the
|
||||
// upper size limit.
|
||||
PolicyArns []*PolicyDescriptorType `type:"list"`
|
||||
|
||||
// A list of session tags. Each session tag consists of a key name and an associated
|
||||
|
@ -3015,11 +3027,12 @@ type GetFederationTokenInput struct {
|
|||
// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// An Amazon Web Services conversion compresses the passed session policies
|
||||
// and session tags into a packed binary format that has a separate limit. Your
|
||||
// request can fail for this limit even if your plaintext meets the other requirements.
|
||||
// The PackedPolicySize response element indicates by percentage how close the
|
||||
// policies and tags for your request are to the upper size limit.
|
||||
// An Amazon Web Services conversion compresses the passed inline session policy,
|
||||
// managed policy ARNs, and session tags into a packed binary format that has
|
||||
// a separate limit. Your request can fail for this limit even if your plaintext
|
||||
// meets the other requirements. The PackedPolicySize response element indicates
|
||||
// by percentage how close the policies and tags for your request are to the
|
||||
// upper size limit.
|
||||
//
|
||||
// You can pass a session tag with the same key as a tag that is already attached
|
||||
// to the user you are federating. When you do, session tags override a user
|
||||
|
|
101
vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go
generated
vendored
Normal file
101
vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go
generated
vendored
Normal file
|
@ -0,0 +1,101 @@
|
|||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package sortkeys
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
func Strings(l []string) {
|
||||
sort.Strings(l)
|
||||
}
|
||||
|
||||
func Float64s(l []float64) {
|
||||
sort.Float64s(l)
|
||||
}
|
||||
|
||||
func Float32s(l []float32) {
|
||||
sort.Sort(Float32Slice(l))
|
||||
}
|
||||
|
||||
func Int64s(l []int64) {
|
||||
sort.Sort(Int64Slice(l))
|
||||
}
|
||||
|
||||
func Int32s(l []int32) {
|
||||
sort.Sort(Int32Slice(l))
|
||||
}
|
||||
|
||||
func Uint64s(l []uint64) {
|
||||
sort.Sort(Uint64Slice(l))
|
||||
}
|
||||
|
||||
func Uint32s(l []uint32) {
|
||||
sort.Sort(Uint32Slice(l))
|
||||
}
|
||||
|
||||
func Bools(l []bool) {
|
||||
sort.Sort(BoolSlice(l))
|
||||
}
|
||||
|
||||
type BoolSlice []bool
|
||||
|
||||
func (p BoolSlice) Len() int { return len(p) }
|
||||
func (p BoolSlice) Less(i, j int) bool { return p[j] }
|
||||
func (p BoolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
type Int64Slice []int64
|
||||
|
||||
func (p Int64Slice) Len() int { return len(p) }
|
||||
func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] }
|
||||
func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
type Int32Slice []int32
|
||||
|
||||
func (p Int32Slice) Len() int { return len(p) }
|
||||
func (p Int32Slice) Less(i, j int) bool { return p[i] < p[j] }
|
||||
func (p Int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
type Uint64Slice []uint64
|
||||
|
||||
func (p Uint64Slice) Len() int { return len(p) }
|
||||
func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
|
||||
func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
type Uint32Slice []uint32
|
||||
|
||||
func (p Uint32Slice) Len() int { return len(p) }
|
||||
func (p Uint32Slice) Less(i, j int) bool { return p[i] < p[j] }
|
||||
func (p Uint32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
type Float32Slice []float32
|
||||
|
||||
func (p Float32Slice) Len() int { return len(p) }
|
||||
func (p Float32Slice) Less(i, j int) bool { return p[i] < p[j] }
|
||||
func (p Float32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
140
vendor/github.com/gogo/protobuf/types/any.go
generated
vendored
Normal file
140
vendor/github.com/gogo/protobuf/types/any.go
generated
vendored
Normal file
|
@ -0,0 +1,140 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package types
|
||||
|
||||
// This file implements functions to marshal proto.Message to/from
|
||||
// google.protobuf.Any message.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
const googleApis = "type.googleapis.com/"
|
||||
|
||||
// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
|
||||
//
|
||||
// Note that regular type assertions should be done using the Is
|
||||
// function. AnyMessageName is provided for less common use cases like filtering a
|
||||
// sequence of Any messages based on a set of allowed message type names.
|
||||
func AnyMessageName(any *Any) (string, error) {
|
||||
if any == nil {
|
||||
return "", fmt.Errorf("message is nil")
|
||||
}
|
||||
slash := strings.LastIndex(any.TypeUrl, "/")
|
||||
if slash < 0 {
|
||||
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
|
||||
}
|
||||
return any.TypeUrl[slash+1:], nil
|
||||
}
|
||||
|
||||
// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
|
||||
func MarshalAny(pb proto.Message) (*Any, error) {
|
||||
value, err := proto.Marshal(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
|
||||
}
|
||||
|
||||
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
|
||||
// allocate a proto.Message for the type specified in a google.protobuf.Any
|
||||
// message. The allocated message is stored in the embedded proto.Message.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var x ptypes.DynamicAny
|
||||
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
|
||||
// fmt.Printf("unmarshaled message: %v", x.Message)
|
||||
type DynamicAny struct {
|
||||
proto.Message
|
||||
}
|
||||
|
||||
// Empty returns a new proto.Message of the type specified in a
|
||||
// google.protobuf.Any message. It returns an error if corresponding message
|
||||
// type isn't linked in.
|
||||
func EmptyAny(any *Any) (proto.Message, error) {
|
||||
aname, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t := proto.MessageType(aname)
|
||||
if t == nil {
|
||||
return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
|
||||
}
|
||||
return reflect.New(t.Elem()).Interface().(proto.Message), nil
|
||||
}
|
||||
|
||||
// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
|
||||
// message and places the decoded result in pb. It returns an error if type of
|
||||
// contents of Any message does not match type of pb message.
|
||||
//
|
||||
// pb can be a proto.Message, or a *DynamicAny.
|
||||
func UnmarshalAny(any *Any, pb proto.Message) error {
|
||||
if d, ok := pb.(*DynamicAny); ok {
|
||||
if d.Message == nil {
|
||||
var err error
|
||||
d.Message, err = EmptyAny(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return UnmarshalAny(any, d.Message)
|
||||
}
|
||||
|
||||
aname, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mname := proto.MessageName(pb)
|
||||
if aname != mname {
|
||||
return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
|
||||
}
|
||||
return proto.Unmarshal(any.Value, pb)
|
||||
}
|
||||
|
||||
// Is returns true if any value contains a given message type.
|
||||
func Is(any *Any, pb proto.Message) bool {
|
||||
// The following is equivalent to AnyMessageName(any) == proto.MessageName(pb),
|
||||
// but it avoids scanning TypeUrl for the slash.
|
||||
if any == nil {
|
||||
return false
|
||||
}
|
||||
name := proto.MessageName(pb)
|
||||
prefix := len(any.TypeUrl) - len(name)
|
||||
return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name
|
||||
}
|
694
vendor/github.com/gogo/protobuf/types/any.pb.go
generated
vendored
Normal file
694
vendor/github.com/gogo/protobuf/types/any.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,694 @@
|
|||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: google/protobuf/any.proto
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
bytes "bytes"
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// `Any` contains an arbitrary serialized protocol buffer message along with a
|
||||
// URL that describes the type of the serialized message.
|
||||
//
|
||||
// Protobuf library provides support to pack/unpack Any values in the form
|
||||
// of utility functions or additional generated methods of the Any type.
|
||||
//
|
||||
// Example 1: Pack and unpack a message in C++.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any;
|
||||
// any.PackFrom(foo);
|
||||
// ...
|
||||
// if (any.UnpackTo(&foo)) {
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// Example 2: Pack and unpack a message in Java.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any = Any.pack(foo);
|
||||
// ...
|
||||
// if (any.is(Foo.class)) {
|
||||
// foo = any.unpack(Foo.class);
|
||||
// }
|
||||
//
|
||||
// Example 3: Pack and unpack a message in Python.
|
||||
//
|
||||
// foo = Foo(...)
|
||||
// any = Any()
|
||||
// any.Pack(foo)
|
||||
// ...
|
||||
// if any.Is(Foo.DESCRIPTOR):
|
||||
// any.Unpack(foo)
|
||||
// ...
|
||||
//
|
||||
// Example 4: Pack and unpack a message in Go
|
||||
//
|
||||
// foo := &pb.Foo{...}
|
||||
// any, err := ptypes.MarshalAny(foo)
|
||||
// ...
|
||||
// foo := &pb.Foo{}
|
||||
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// The pack methods provided by protobuf library will by default use
|
||||
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
|
||||
// methods only use the fully qualified type name after the last '/'
|
||||
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
||||
// name "y.z".
|
||||
//
|
||||
//
|
||||
// JSON
|
||||
// ====
|
||||
// The JSON representation of an `Any` value uses the regular
|
||||
// representation of the deserialized, embedded message, with an
|
||||
// additional field `@type` which contains the type URL. Example:
|
||||
//
|
||||
// package google.profile;
|
||||
// message Person {
|
||||
// string first_name = 1;
|
||||
// string last_name = 2;
|
||||
// }
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.profile.Person",
|
||||
// "firstName": <string>,
|
||||
// "lastName": <string>
|
||||
// }
|
||||
//
|
||||
// If the embedded message type is well-known and has a custom JSON
|
||||
// representation, that representation will be embedded adding a field
|
||||
// `value` which holds the custom JSON in addition to the `@type`
|
||||
// field. Example (for message [google.protobuf.Duration][]):
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.protobuf.Duration",
|
||||
// "value": "1.212s"
|
||||
// }
|
||||
//
|
||||
type Any struct {
|
||||
// A URL/resource name that uniquely identifies the type of the serialized
|
||||
// protocol buffer message. This string must contain at least
|
||||
// one "/" character. The last segment of the URL's path must represent
|
||||
// the fully qualified name of the type (as in
|
||||
// `path/google.protobuf.Duration`). The name should be in a canonical form
|
||||
// (e.g., leading "." is not accepted).
|
||||
//
|
||||
// In practice, teams usually precompile into the binary all types that they
|
||||
// expect it to use in the context of Any. However, for URLs which use the
|
||||
// scheme `http`, `https`, or no scheme, one can optionally set up a type
|
||||
// server that maps type URLs to message definitions as follows:
|
||||
//
|
||||
// * If no scheme is provided, `https` is assumed.
|
||||
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
||||
// value in binary format, or produce an error.
|
||||
// * Applications are allowed to cache lookup results based on the
|
||||
// URL, or have them precompiled into a binary to avoid any
|
||||
// lookup. Therefore, binary compatibility needs to be preserved
|
||||
// on changes to types. (Use versioned type names to manage
|
||||
// breaking changes.)
|
||||
//
|
||||
// Note: this functionality is not currently available in the official
|
||||
// protobuf release, and it is not used for type URLs beginning with
|
||||
// type.googleapis.com.
|
||||
//
|
||||
// Schemes other than `http`, `https` (or the empty scheme) might be
|
||||
// used with implementation specific semantics.
|
||||
//
|
||||
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
|
||||
// Must be a valid serialized protocol buffer of the above specified type.
|
||||
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Any) Reset() { *m = Any{} }
|
||||
func (*Any) ProtoMessage() {}
|
||||
func (*Any) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b53526c13ae22eb4, []int{0}
|
||||
}
|
||||
func (*Any) XXX_WellKnownType() string { return "Any" }
|
||||
func (m *Any) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Any.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Any) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Any.Merge(m, src)
|
||||
}
|
||||
func (m *Any) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Any) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Any.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Any proto.InternalMessageInfo
|
||||
|
||||
func (m *Any) GetTypeUrl() string {
|
||||
if m != nil {
|
||||
return m.TypeUrl
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Any) GetValue() []byte {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*Any) XXX_MessageName() string {
|
||||
return "google.protobuf.Any"
|
||||
}
|
||||
func init() {
|
||||
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) }
|
||||
|
||||
var fileDescriptor_b53526c13ae22eb4 = []byte{
|
||||
// 211 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
|
||||
0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
|
||||
0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
|
||||
0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
|
||||
0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xaa, 0xbf, 0xf1, 0x50, 0x8e,
|
||||
0xe1, 0xc3, 0x43, 0x39, 0xc6, 0x1f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9, 0x31, 0xae, 0x78, 0x24,
|
||||
0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78,
|
||||
0x24, 0xc7, 0xf0, 0x01, 0x24, 0xfe, 0x58, 0x8e, 0xf1, 0xc4, 0x63, 0x39, 0x46, 0x2e, 0xe1, 0xe4,
|
||||
0xfc, 0x5c, 0x3d, 0x34, 0xeb, 0x9d, 0x38, 0x1c, 0xf3, 0x2a, 0x03, 0x40, 0x9c, 0x00, 0xc6, 0x28,
|
||||
0x56, 0x90, 0x8d, 0xc5, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x94,
|
||||
0x06, 0x40, 0x95, 0xea, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80, 0x94,
|
||||
0x25, 0xb1, 0x81, 0xcd, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb7, 0x81, 0x82, 0xd3, 0xed,
|
||||
0x00, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (this *Any) Compare(that interface{}) int {
|
||||
if that == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
that1, ok := that.(*Any)
|
||||
if !ok {
|
||||
that2, ok := that.(Any)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
} else if this == nil {
|
||||
return -1
|
||||
}
|
||||
if this.TypeUrl != that1.TypeUrl {
|
||||
if this.TypeUrl < that1.TypeUrl {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
if c := bytes.Compare(this.Value, that1.Value); c != 0 {
|
||||
return c
|
||||
}
|
||||
if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
|
||||
return c
|
||||
}
|
||||
return 0
|
||||
}
|
||||
func (this *Any) Equal(that interface{}) bool {
|
||||
if that == nil {
|
||||
return this == nil
|
||||
}
|
||||
|
||||
that1, ok := that.(*Any)
|
||||
if !ok {
|
||||
that2, ok := that.(Any)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
return this == nil
|
||||
} else if this == nil {
|
||||
return false
|
||||
}
|
||||
if this.TypeUrl != that1.TypeUrl {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(this.Value, that1.Value) {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
func (this *Any) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&types.Any{")
|
||||
s = append(s, "TypeUrl: "+fmt.Sprintf("%#v", this.TypeUrl)+",\n")
|
||||
s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func valueToGoStringAny(v interface{}, typ string) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||
}
|
||||
func (m *Any) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Any) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *Any) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if len(m.Value) > 0 {
|
||||
i -= len(m.Value)
|
||||
copy(dAtA[i:], m.Value)
|
||||
i = encodeVarintAny(dAtA, i, uint64(len(m.Value)))
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
if len(m.TypeUrl) > 0 {
|
||||
i -= len(m.TypeUrl)
|
||||
copy(dAtA[i:], m.TypeUrl)
|
||||
i = encodeVarintAny(dAtA, i, uint64(len(m.TypeUrl)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintAny(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovAny(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func NewPopulatedAny(r randyAny, easy bool) *Any {
|
||||
this := &Any{}
|
||||
this.TypeUrl = string(randStringAny(r))
|
||||
v1 := r.Intn(100)
|
||||
this.Value = make([]byte, v1)
|
||||
for i := 0; i < v1; i++ {
|
||||
this.Value[i] = byte(r.Intn(256))
|
||||
}
|
||||
if !easy && r.Intn(10) != 0 {
|
||||
this.XXX_unrecognized = randUnrecognizedAny(r, 3)
|
||||
}
|
||||
return this
|
||||
}
|
||||
|
||||
type randyAny interface {
|
||||
Float32() float32
|
||||
Float64() float64
|
||||
Int63() int64
|
||||
Int31() int32
|
||||
Uint32() uint32
|
||||
Intn(n int) int
|
||||
}
|
||||
|
||||
func randUTF8RuneAny(r randyAny) rune {
|
||||
ru := r.Intn(62)
|
||||
if ru < 10 {
|
||||
return rune(ru + 48)
|
||||
} else if ru < 36 {
|
||||
return rune(ru + 55)
|
||||
}
|
||||
return rune(ru + 61)
|
||||
}
|
||||
func randStringAny(r randyAny) string {
|
||||
v2 := r.Intn(100)
|
||||
tmps := make([]rune, v2)
|
||||
for i := 0; i < v2; i++ {
|
||||
tmps[i] = randUTF8RuneAny(r)
|
||||
}
|
||||
return string(tmps)
|
||||
}
|
||||
func randUnrecognizedAny(r randyAny, maxFieldNumber int) (dAtA []byte) {
|
||||
l := r.Intn(5)
|
||||
for i := 0; i < l; i++ {
|
||||
wire := r.Intn(4)
|
||||
if wire == 3 {
|
||||
wire = 5
|
||||
}
|
||||
fieldNumber := maxFieldNumber + r.Intn(100)
|
||||
dAtA = randFieldAny(dAtA, r, fieldNumber, wire)
|
||||
}
|
||||
return dAtA
|
||||
}
|
||||
func randFieldAny(dAtA []byte, r randyAny, fieldNumber int, wire int) []byte {
|
||||
key := uint32(fieldNumber)<<3 | uint32(wire)
|
||||
switch wire {
|
||||
case 0:
|
||||
dAtA = encodeVarintPopulateAny(dAtA, uint64(key))
|
||||
v3 := r.Int63()
|
||||
if r.Intn(2) == 0 {
|
||||
v3 *= -1
|
||||
}
|
||||
dAtA = encodeVarintPopulateAny(dAtA, uint64(v3))
|
||||
case 1:
|
||||
dAtA = encodeVarintPopulateAny(dAtA, uint64(key))
|
||||
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||
case 2:
|
||||
dAtA = encodeVarintPopulateAny(dAtA, uint64(key))
|
||||
ll := r.Intn(100)
|
||||
dAtA = encodeVarintPopulateAny(dAtA, uint64(ll))
|
||||
for j := 0; j < ll; j++ {
|
||||
dAtA = append(dAtA, byte(r.Intn(256)))
|
||||
}
|
||||
default:
|
||||
dAtA = encodeVarintPopulateAny(dAtA, uint64(key))
|
||||
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||
}
|
||||
return dAtA
|
||||
}
|
||||
func encodeVarintPopulateAny(dAtA []byte, v uint64) []byte {
|
||||
for v >= 1<<7 {
|
||||
dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80))
|
||||
v >>= 7
|
||||
}
|
||||
dAtA = append(dAtA, uint8(v))
|
||||
return dAtA
|
||||
}
|
||||
func (m *Any) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.TypeUrl)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovAny(uint64(l))
|
||||
}
|
||||
l = len(m.Value)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovAny(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovAny(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozAny(x uint64) (n int) {
|
||||
return sovAny(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (this *Any) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&Any{`,
|
||||
`TypeUrl:` + fmt.Sprintf("%v", this.TypeUrl) + `,`,
|
||||
`Value:` + fmt.Sprintf("%v", this.Value) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func valueToStringAny(v interface{}) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("*%v", pv)
|
||||
}
|
||||
func (m *Any) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowAny
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: Any: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: Any: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowAny
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthAny
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthAny
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.TypeUrl = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowAny
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if byteLen < 0 {
|
||||
return ErrInvalidLengthAny
|
||||
}
|
||||
postIndex := iNdEx + byteLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthAny
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Value == nil {
|
||||
m.Value = []byte{}
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipAny(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthAny
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipAny(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowAny
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowAny
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowAny
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthAny
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupAny
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthAny
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthAny = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowAny = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupAny = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
2134
vendor/github.com/gogo/protobuf/types/api.pb.go
generated
vendored
Normal file
2134
vendor/github.com/gogo/protobuf/types/api.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
35
vendor/github.com/gogo/protobuf/types/doc.go
generated
vendored
Normal file
35
vendor/github.com/gogo/protobuf/types/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
/*
|
||||
Package types contains code for interacting with well-known types.
|
||||
*/
|
||||
package types
|
100
vendor/github.com/gogo/protobuf/types/duration.go
generated
vendored
Normal file
100
vendor/github.com/gogo/protobuf/types/duration.go
generated
vendored
Normal file
|
@ -0,0 +1,100 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package types
|
||||
|
||||
// This file implements conversions between google.protobuf.Duration
|
||||
// and time.Duration.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// Range of a Duration in seconds, as specified in
|
||||
// google/protobuf/duration.proto. This is about 10,000 years in seconds.
|
||||
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
|
||||
minSeconds = -maxSeconds
|
||||
)
|
||||
|
||||
// validateDuration determines whether the Duration is valid according to the
|
||||
// definition in google/protobuf/duration.proto. A valid Duration
|
||||
// may still be too large to fit into a time.Duration (the range of Duration
|
||||
// is about 10,000 years, and the range of time.Duration is about 290).
|
||||
func validateDuration(d *Duration) error {
|
||||
if d == nil {
|
||||
return errors.New("duration: nil Duration")
|
||||
}
|
||||
if d.Seconds < minSeconds || d.Seconds > maxSeconds {
|
||||
return fmt.Errorf("duration: %#v: seconds out of range", d)
|
||||
}
|
||||
if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
|
||||
return fmt.Errorf("duration: %#v: nanos out of range", d)
|
||||
}
|
||||
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
|
||||
if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
|
||||
return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DurationFromProto converts a Duration to a time.Duration. DurationFromProto
|
||||
// returns an error if the Duration is invalid or is too large to be
|
||||
// represented in a time.Duration.
|
||||
func DurationFromProto(p *Duration) (time.Duration, error) {
|
||||
if err := validateDuration(p); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
d := time.Duration(p.Seconds) * time.Second
|
||||
if int64(d/time.Second) != p.Seconds {
|
||||
return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
|
||||
}
|
||||
if p.Nanos != 0 {
|
||||
d += time.Duration(p.Nanos) * time.Nanosecond
|
||||
if (d < 0) != (p.Nanos < 0) {
|
||||
return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
|
||||
}
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// DurationProto converts a time.Duration to a Duration.
|
||||
func DurationProto(d time.Duration) *Duration {
|
||||
nanos := d.Nanoseconds()
|
||||
secs := nanos / 1e9
|
||||
nanos -= secs * 1e9
|
||||
return &Duration{
|
||||
Seconds: secs,
|
||||
Nanos: int32(nanos),
|
||||
}
|
||||
}
|
517
vendor/github.com/gogo/protobuf/types/duration.pb.go
generated
vendored
Normal file
517
vendor/github.com/gogo/protobuf/types/duration.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,517 @@
|
|||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: google/protobuf/duration.proto
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
bytes "bytes"
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// A Duration represents a signed, fixed-length span of time represented
|
||||
// as a count of seconds and fractions of seconds at nanosecond
|
||||
// resolution. It is independent of any calendar and concepts like "day"
|
||||
// or "month". It is related to Timestamp in that the difference between
|
||||
// two Timestamp values is a Duration and it can be added or subtracted
|
||||
// from a Timestamp. Range is approximately +-10,000 years.
|
||||
//
|
||||
// # Examples
|
||||
//
|
||||
// Example 1: Compute Duration from two Timestamps in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Timestamp end = ...;
|
||||
// Duration duration = ...;
|
||||
//
|
||||
// duration.seconds = end.seconds - start.seconds;
|
||||
// duration.nanos = end.nanos - start.nanos;
|
||||
//
|
||||
// if (duration.seconds < 0 && duration.nanos > 0) {
|
||||
// duration.seconds += 1;
|
||||
// duration.nanos -= 1000000000;
|
||||
// } else if (durations.seconds > 0 && duration.nanos < 0) {
|
||||
// duration.seconds -= 1;
|
||||
// duration.nanos += 1000000000;
|
||||
// }
|
||||
//
|
||||
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Duration duration = ...;
|
||||
// Timestamp end = ...;
|
||||
//
|
||||
// end.seconds = start.seconds + duration.seconds;
|
||||
// end.nanos = start.nanos + duration.nanos;
|
||||
//
|
||||
// if (end.nanos < 0) {
|
||||
// end.seconds -= 1;
|
||||
// end.nanos += 1000000000;
|
||||
// } else if (end.nanos >= 1000000000) {
|
||||
// end.seconds += 1;
|
||||
// end.nanos -= 1000000000;
|
||||
// }
|
||||
//
|
||||
// Example 3: Compute Duration from datetime.timedelta in Python.
|
||||
//
|
||||
// td = datetime.timedelta(days=3, minutes=10)
|
||||
// duration = Duration()
|
||||
// duration.FromTimedelta(td)
|
||||
//
|
||||
// # JSON Mapping
|
||||
//
|
||||
// In JSON format, the Duration type is encoded as a string rather than an
|
||||
// object, where the string ends in the suffix "s" (indicating seconds) and
|
||||
// is preceded by the number of seconds, with nanoseconds expressed as
|
||||
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
|
||||
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
|
||||
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
|
||||
// microsecond should be expressed in JSON format as "3.000001s".
|
||||
//
|
||||
//
|
||||
type Duration struct {
|
||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
||||
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
|
||||
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
|
||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
|
||||
// Signed fractions of a second at nanosecond resolution of the span
|
||||
// of time. Durations less than one second are represented with a 0
|
||||
// `seconds` field and a positive or negative `nanos` field. For durations
|
||||
// of one second or more, a non-zero value for the `nanos` field must be
|
||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
||||
// to +999,999,999 inclusive.
|
||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Duration) Reset() { *m = Duration{} }
|
||||
func (*Duration) ProtoMessage() {}
|
||||
func (*Duration) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_23597b2ebd7ac6c5, []int{0}
|
||||
}
|
||||
func (*Duration) XXX_WellKnownType() string { return "Duration" }
|
||||
func (m *Duration) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Duration) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Duration.Merge(m, src)
|
||||
}
|
||||
func (m *Duration) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Duration) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Duration.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Duration proto.InternalMessageInfo
|
||||
|
||||
func (m *Duration) GetSeconds() int64 {
|
||||
if m != nil {
|
||||
return m.Seconds
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Duration) GetNanos() int32 {
|
||||
if m != nil {
|
||||
return m.Nanos
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (*Duration) XXX_MessageName() string {
|
||||
return "google.protobuf.Duration"
|
||||
}
|
||||
func init() {
|
||||
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) }
|
||||
|
||||
var fileDescriptor_23597b2ebd7ac6c5 = []byte{
|
||||
// 209 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
|
||||
0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
|
||||
0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
|
||||
0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
|
||||
0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0x7f, 0xe3, 0xa1, 0x1c,
|
||||
0xc3, 0x87, 0x87, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91,
|
||||
0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, 0xc9, 0x31, 0x7c, 0x78, 0x24, 0xc7, 0xb8, 0xe2,
|
||||
0xb1, 0x1c, 0xe3, 0x89, 0xc7, 0x72, 0x8c, 0x5c, 0xc2, 0xc9, 0xf9, 0xb9, 0x7a, 0x68, 0x56, 0x3b,
|
||||
0xf1, 0xc2, 0x2c, 0x0e, 0x00, 0x89, 0x04, 0x30, 0x46, 0xb1, 0x96, 0x54, 0x16, 0xa4, 0x16, 0xff,
|
||||
0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0xa2, 0x25, 0x00,
|
||||
0xaa, 0x45, 0x2f, 0x3c, 0x35, 0x27, 0xc7, 0x3b, 0x2f, 0xbf, 0x3c, 0x2f, 0x04, 0xa4, 0x32, 0x89,
|
||||
0x0d, 0x6c, 0x96, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x1c, 0x64, 0x4e, 0xf6, 0x00, 0x00,
|
||||
0x00,
|
||||
}
|
||||
|
||||
func (this *Duration) Compare(that interface{}) int {
|
||||
if that == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
that1, ok := that.(*Duration)
|
||||
if !ok {
|
||||
that2, ok := that.(Duration)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
} else if this == nil {
|
||||
return -1
|
||||
}
|
||||
if this.Seconds != that1.Seconds {
|
||||
if this.Seconds < that1.Seconds {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
if this.Nanos != that1.Nanos {
|
||||
if this.Nanos < that1.Nanos {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
|
||||
return c
|
||||
}
|
||||
return 0
|
||||
}
|
||||
func (this *Duration) Equal(that interface{}) bool {
|
||||
if that == nil {
|
||||
return this == nil
|
||||
}
|
||||
|
||||
that1, ok := that.(*Duration)
|
||||
if !ok {
|
||||
that2, ok := that.(Duration)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
return this == nil
|
||||
} else if this == nil {
|
||||
return false
|
||||
}
|
||||
if this.Seconds != that1.Seconds {
|
||||
return false
|
||||
}
|
||||
if this.Nanos != that1.Nanos {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
func (this *Duration) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&types.Duration{")
|
||||
s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n")
|
||||
s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func valueToGoStringDuration(v interface{}, typ string) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||
}
|
||||
func (m *Duration) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Duration) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if m.Nanos != 0 {
|
||||
i = encodeVarintDuration(dAtA, i, uint64(m.Nanos))
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
}
|
||||
if m.Seconds != 0 {
|
||||
i = encodeVarintDuration(dAtA, i, uint64(m.Seconds))
|
||||
i--
|
||||
dAtA[i] = 0x8
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintDuration(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovDuration(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func (m *Duration) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.Seconds != 0 {
|
||||
n += 1 + sovDuration(uint64(m.Seconds))
|
||||
}
|
||||
if m.Nanos != 0 {
|
||||
n += 1 + sovDuration(uint64(m.Nanos))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovDuration(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozDuration(x uint64) (n int) {
|
||||
return sovDuration(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *Duration) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDuration
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: Duration: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType)
|
||||
}
|
||||
m.Seconds = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDuration
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Seconds |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType)
|
||||
}
|
||||
m.Nanos = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDuration
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Nanos |= int32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipDuration(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthDuration
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipDuration(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDuration
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDuration
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDuration
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthDuration
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupDuration
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthDuration
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthDuration = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowDuration = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupDuration = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
100
vendor/github.com/gogo/protobuf/types/duration_gogo.go
generated
vendored
Normal file
100
vendor/github.com/gogo/protobuf/types/duration_gogo.go
generated
vendored
Normal file
|
@ -0,0 +1,100 @@
|
|||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2016, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
func NewPopulatedDuration(r interface {
|
||||
Int63() int64
|
||||
}, easy bool) *Duration {
|
||||
this := &Duration{}
|
||||
maxSecs := time.Hour.Nanoseconds() / 1e9
|
||||
max := 2 * maxSecs
|
||||
s := int64(r.Int63()) % max
|
||||
s -= maxSecs
|
||||
neg := int64(1)
|
||||
if s < 0 {
|
||||
neg = -1
|
||||
}
|
||||
this.Seconds = s
|
||||
this.Nanos = int32(neg * (r.Int63() % 1e9))
|
||||
return this
|
||||
}
|
||||
|
||||
func (d *Duration) String() string {
|
||||
td, err := DurationFromProto(d)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("(%v)", err)
|
||||
}
|
||||
return td.String()
|
||||
}
|
||||
|
||||
func NewPopulatedStdDuration(r interface {
|
||||
Int63() int64
|
||||
}, easy bool) *time.Duration {
|
||||
dur := NewPopulatedDuration(r, easy)
|
||||
d, err := DurationFromProto(dur)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return &d
|
||||
}
|
||||
|
||||
func SizeOfStdDuration(d time.Duration) int {
|
||||
dur := DurationProto(d)
|
||||
return dur.Size()
|
||||
}
|
||||
|
||||
func StdDurationMarshal(d time.Duration) ([]byte, error) {
|
||||
size := SizeOfStdDuration(d)
|
||||
buf := make([]byte, size)
|
||||
_, err := StdDurationMarshalTo(d, buf)
|
||||
return buf, err
|
||||
}
|
||||
|
||||
func StdDurationMarshalTo(d time.Duration, data []byte) (int, error) {
|
||||
dur := DurationProto(d)
|
||||
return dur.MarshalTo(data)
|
||||
}
|
||||
|
||||
func StdDurationUnmarshal(d *time.Duration, data []byte) error {
|
||||
dur := &Duration{}
|
||||
if err := dur.Unmarshal(data); err != nil {
|
||||
return err
|
||||
}
|
||||
dd, err := DurationFromProto(dur)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*d = dd
|
||||
return nil
|
||||
}
|
462
vendor/github.com/gogo/protobuf/types/empty.pb.go
generated
vendored
Normal file
462
vendor/github.com/gogo/protobuf/types/empty.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,462 @@
|
|||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: google/protobuf/empty.proto
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
bytes "bytes"
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// A generic empty message that you can re-use to avoid defining duplicated
|
||||
// empty messages in your APIs. A typical example is to use it as the request
|
||||
// or the response type of an API method. For instance:
|
||||
//
|
||||
// service Foo {
|
||||
// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
|
||||
// }
|
||||
//
|
||||
// The JSON representation for `Empty` is empty JSON object `{}`.
|
||||
type Empty struct {
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Empty) Reset() { *m = Empty{} }
|
||||
func (*Empty) ProtoMessage() {}
|
||||
func (*Empty) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_900544acb223d5b8, []int{0}
|
||||
}
|
||||
func (*Empty) XXX_WellKnownType() string { return "Empty" }
|
||||
func (m *Empty) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Empty.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Empty) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Empty.Merge(m, src)
|
||||
}
|
||||
func (m *Empty) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Empty) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Empty.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Empty proto.InternalMessageInfo
|
||||
|
||||
func (*Empty) XXX_MessageName() string {
|
||||
return "google.protobuf.Empty"
|
||||
}
|
||||
func init() {
|
||||
proto.RegisterType((*Empty)(nil), "google.protobuf.Empty")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_900544acb223d5b8) }
|
||||
|
||||
var fileDescriptor_900544acb223d5b8 = []byte{
|
||||
// 176 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28,
|
||||
0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57,
|
||||
0x90, 0xbc, 0x53, 0x0b, 0xe3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xfe, 0x78, 0x28,
|
||||
0xc7, 0xd8, 0xf0, 0x48, 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c,
|
||||
0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0xf1, 0xc5, 0x23, 0x39, 0x86, 0x0f, 0x20, 0xf1, 0xc7, 0x72,
|
||||
0x8c, 0x27, 0x1e, 0xcb, 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe8, 0xc4, 0x05,
|
||||
0x36, 0x2e, 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x62, 0x2d, 0xa9, 0x2c, 0x48, 0x2d, 0xfe, 0xc1, 0xc8,
|
||||
0xb8, 0x88, 0x89, 0xd9, 0x3d, 0xc0, 0x69, 0x15, 0x93, 0x9c, 0x3b, 0x44, 0x7d, 0x00, 0x54, 0xbd,
|
||||
0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x65, 0x12, 0x1b, 0xd8,
|
||||
0x20, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x21, 0xbe, 0xb6, 0x31, 0xc6, 0x00, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (this *Empty) Compare(that interface{}) int {
|
||||
if that == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
that1, ok := that.(*Empty)
|
||||
if !ok {
|
||||
that2, ok := that.(Empty)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
} else if this == nil {
|
||||
return -1
|
||||
}
|
||||
if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
|
||||
return c
|
||||
}
|
||||
return 0
|
||||
}
|
||||
func (this *Empty) Equal(that interface{}) bool {
|
||||
if that == nil {
|
||||
return this == nil
|
||||
}
|
||||
|
||||
that1, ok := that.(*Empty)
|
||||
if !ok {
|
||||
that2, ok := that.(Empty)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
return this == nil
|
||||
} else if this == nil {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
func (this *Empty) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 4)
|
||||
s = append(s, "&types.Empty{")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func valueToGoStringEmpty(v interface{}, typ string) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||
}
|
||||
func (m *Empty) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Empty) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *Empty) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintEmpty(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovEmpty(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func NewPopulatedEmpty(r randyEmpty, easy bool) *Empty {
|
||||
this := &Empty{}
|
||||
if !easy && r.Intn(10) != 0 {
|
||||
this.XXX_unrecognized = randUnrecognizedEmpty(r, 1)
|
||||
}
|
||||
return this
|
||||
}
|
||||
|
||||
type randyEmpty interface {
|
||||
Float32() float32
|
||||
Float64() float64
|
||||
Int63() int64
|
||||
Int31() int32
|
||||
Uint32() uint32
|
||||
Intn(n int) int
|
||||
}
|
||||
|
||||
func randUTF8RuneEmpty(r randyEmpty) rune {
|
||||
ru := r.Intn(62)
|
||||
if ru < 10 {
|
||||
return rune(ru + 48)
|
||||
} else if ru < 36 {
|
||||
return rune(ru + 55)
|
||||
}
|
||||
return rune(ru + 61)
|
||||
}
|
||||
func randStringEmpty(r randyEmpty) string {
|
||||
v1 := r.Intn(100)
|
||||
tmps := make([]rune, v1)
|
||||
for i := 0; i < v1; i++ {
|
||||
tmps[i] = randUTF8RuneEmpty(r)
|
||||
}
|
||||
return string(tmps)
|
||||
}
|
||||
func randUnrecognizedEmpty(r randyEmpty, maxFieldNumber int) (dAtA []byte) {
|
||||
l := r.Intn(5)
|
||||
for i := 0; i < l; i++ {
|
||||
wire := r.Intn(4)
|
||||
if wire == 3 {
|
||||
wire = 5
|
||||
}
|
||||
fieldNumber := maxFieldNumber + r.Intn(100)
|
||||
dAtA = randFieldEmpty(dAtA, r, fieldNumber, wire)
|
||||
}
|
||||
return dAtA
|
||||
}
|
||||
func randFieldEmpty(dAtA []byte, r randyEmpty, fieldNumber int, wire int) []byte {
|
||||
key := uint32(fieldNumber)<<3 | uint32(wire)
|
||||
switch wire {
|
||||
case 0:
|
||||
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key))
|
||||
v2 := r.Int63()
|
||||
if r.Intn(2) == 0 {
|
||||
v2 *= -1
|
||||
}
|
||||
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(v2))
|
||||
case 1:
|
||||
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key))
|
||||
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||
case 2:
|
||||
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key))
|
||||
ll := r.Intn(100)
|
||||
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(ll))
|
||||
for j := 0; j < ll; j++ {
|
||||
dAtA = append(dAtA, byte(r.Intn(256)))
|
||||
}
|
||||
default:
|
||||
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key))
|
||||
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||
}
|
||||
return dAtA
|
||||
}
|
||||
func encodeVarintPopulateEmpty(dAtA []byte, v uint64) []byte {
|
||||
for v >= 1<<7 {
|
||||
dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80))
|
||||
v >>= 7
|
||||
}
|
||||
dAtA = append(dAtA, uint8(v))
|
||||
return dAtA
|
||||
}
|
||||
func (m *Empty) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovEmpty(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozEmpty(x uint64) (n int) {
|
||||
return sovEmpty(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (this *Empty) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&Empty{`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func valueToStringEmpty(v interface{}) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("*%v", pv)
|
||||
}
|
||||
func (m *Empty) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowEmpty
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: Empty: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: Empty: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipEmpty(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthEmpty
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipEmpty(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowEmpty
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowEmpty
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowEmpty
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthEmpty
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupEmpty
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthEmpty
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthEmpty = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowEmpty = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupEmpty = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
738
vendor/github.com/gogo/protobuf/types/field_mask.pb.go
generated
vendored
Normal file
738
vendor/github.com/gogo/protobuf/types/field_mask.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,738 @@
|
|||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: google/protobuf/field_mask.proto
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
bytes "bytes"
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// `FieldMask` represents a set of symbolic field paths, for example:
|
||||
//
|
||||
// paths: "f.a"
|
||||
// paths: "f.b.d"
|
||||
//
|
||||
// Here `f` represents a field in some root message, `a` and `b`
|
||||
// fields in the message found in `f`, and `d` a field found in the
|
||||
// message in `f.b`.
|
||||
//
|
||||
// Field masks are used to specify a subset of fields that should be
|
||||
// returned by a get operation or modified by an update operation.
|
||||
// Field masks also have a custom JSON encoding (see below).
|
||||
//
|
||||
// # Field Masks in Projections
|
||||
//
|
||||
// When used in the context of a projection, a response message or
|
||||
// sub-message is filtered by the API to only contain those fields as
|
||||
// specified in the mask. For example, if the mask in the previous
|
||||
// example is applied to a response message as follows:
|
||||
//
|
||||
// f {
|
||||
// a : 22
|
||||
// b {
|
||||
// d : 1
|
||||
// x : 2
|
||||
// }
|
||||
// y : 13
|
||||
// }
|
||||
// z: 8
|
||||
//
|
||||
// The result will not contain specific values for fields x,y and z
|
||||
// (their value will be set to the default, and omitted in proto text
|
||||
// output):
|
||||
//
|
||||
//
|
||||
// f {
|
||||
// a : 22
|
||||
// b {
|
||||
// d : 1
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// A repeated field is not allowed except at the last position of a
|
||||
// paths string.
|
||||
//
|
||||
// If a FieldMask object is not present in a get operation, the
|
||||
// operation applies to all fields (as if a FieldMask of all fields
|
||||
// had been specified).
|
||||
//
|
||||
// Note that a field mask does not necessarily apply to the
|
||||
// top-level response message. In case of a REST get operation, the
|
||||
// field mask applies directly to the response, but in case of a REST
|
||||
// list operation, the mask instead applies to each individual message
|
||||
// in the returned resource list. In case of a REST custom method,
|
||||
// other definitions may be used. Where the mask applies will be
|
||||
// clearly documented together with its declaration in the API. In
|
||||
// any case, the effect on the returned resource/resources is required
|
||||
// behavior for APIs.
|
||||
//
|
||||
// # Field Masks in Update Operations
|
||||
//
|
||||
// A field mask in update operations specifies which fields of the
|
||||
// targeted resource are going to be updated. The API is required
|
||||
// to only change the values of the fields as specified in the mask
|
||||
// and leave the others untouched. If a resource is passed in to
|
||||
// describe the updated values, the API ignores the values of all
|
||||
// fields not covered by the mask.
|
||||
//
|
||||
// If a repeated field is specified for an update operation, new values will
|
||||
// be appended to the existing repeated field in the target resource. Note that
|
||||
// a repeated field is only allowed in the last position of a `paths` string.
|
||||
//
|
||||
// If a sub-message is specified in the last position of the field mask for an
|
||||
// update operation, then new value will be merged into the existing sub-message
|
||||
// in the target resource.
|
||||
//
|
||||
// For example, given the target message:
|
||||
//
|
||||
// f {
|
||||
// b {
|
||||
// d: 1
|
||||
// x: 2
|
||||
// }
|
||||
// c: [1]
|
||||
// }
|
||||
//
|
||||
// And an update message:
|
||||
//
|
||||
// f {
|
||||
// b {
|
||||
// d: 10
|
||||
// }
|
||||
// c: [2]
|
||||
// }
|
||||
//
|
||||
// then if the field mask is:
|
||||
//
|
||||
// paths: ["f.b", "f.c"]
|
||||
//
|
||||
// then the result will be:
|
||||
//
|
||||
// f {
|
||||
// b {
|
||||
// d: 10
|
||||
// x: 2
|
||||
// }
|
||||
// c: [1, 2]
|
||||
// }
|
||||
//
|
||||
// An implementation may provide options to override this default behavior for
|
||||
// repeated and message fields.
|
||||
//
|
||||
// In order to reset a field's value to the default, the field must
|
||||
// be in the mask and set to the default value in the provided resource.
|
||||
// Hence, in order to reset all fields of a resource, provide a default
|
||||
// instance of the resource and set all fields in the mask, or do
|
||||
// not provide a mask as described below.
|
||||
//
|
||||
// If a field mask is not present on update, the operation applies to
|
||||
// all fields (as if a field mask of all fields has been specified).
|
||||
// Note that in the presence of schema evolution, this may mean that
|
||||
// fields the client does not know and has therefore not filled into
|
||||
// the request will be reset to their default. If this is unwanted
|
||||
// behavior, a specific service may require a client to always specify
|
||||
// a field mask, producing an error if not.
|
||||
//
|
||||
// As with get operations, the location of the resource which
|
||||
// describes the updated values in the request message depends on the
|
||||
// operation kind. In any case, the effect of the field mask is
|
||||
// required to be honored by the API.
|
||||
//
|
||||
// ## Considerations for HTTP REST
|
||||
//
|
||||
// The HTTP kind of an update operation which uses a field mask must
|
||||
// be set to PATCH instead of PUT in order to satisfy HTTP semantics
|
||||
// (PUT must only be used for full updates).
|
||||
//
|
||||
// # JSON Encoding of Field Masks
|
||||
//
|
||||
// In JSON, a field mask is encoded as a single string where paths are
|
||||
// separated by a comma. Fields name in each path are converted
|
||||
// to/from lower-camel naming conventions.
|
||||
//
|
||||
// As an example, consider the following message declarations:
|
||||
//
|
||||
// message Profile {
|
||||
// User user = 1;
|
||||
// Photo photo = 2;
|
||||
// }
|
||||
// message User {
|
||||
// string display_name = 1;
|
||||
// string address = 2;
|
||||
// }
|
||||
//
|
||||
// In proto a field mask for `Profile` may look as such:
|
||||
//
|
||||
// mask {
|
||||
// paths: "user.display_name"
|
||||
// paths: "photo"
|
||||
// }
|
||||
//
|
||||
// In JSON, the same mask is represented as below:
|
||||
//
|
||||
// {
|
||||
// mask: "user.displayName,photo"
|
||||
// }
|
||||
//
|
||||
// # Field Masks and Oneof Fields
|
||||
//
|
||||
// Field masks treat fields in oneofs just as regular fields. Consider the
|
||||
// following message:
|
||||
//
|
||||
// message SampleMessage {
|
||||
// oneof test_oneof {
|
||||
// string name = 4;
|
||||
// SubMessage sub_message = 9;
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// The field mask can be:
|
||||
//
|
||||
// mask {
|
||||
// paths: "name"
|
||||
// }
|
||||
//
|
||||
// Or:
|
||||
//
|
||||
// mask {
|
||||
// paths: "sub_message"
|
||||
// }
|
||||
//
|
||||
// Note that oneof type names ("test_oneof" in this case) cannot be used in
|
||||
// paths.
|
||||
//
|
||||
// ## Field Mask Verification
|
||||
//
|
||||
// The implementation of any API method which has a FieldMask type field in the
|
||||
// request should verify the included field paths, and return an
|
||||
// `INVALID_ARGUMENT` error if any path is duplicated or unmappable.
|
||||
type FieldMask struct {
|
||||
// The set of field mask paths.
|
||||
Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *FieldMask) Reset() { *m = FieldMask{} }
|
||||
func (*FieldMask) ProtoMessage() {}
|
||||
func (*FieldMask) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_5158202634f0da48, []int{0}
|
||||
}
|
||||
func (m *FieldMask) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *FieldMask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_FieldMask.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *FieldMask) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_FieldMask.Merge(m, src)
|
||||
}
|
||||
func (m *FieldMask) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *FieldMask) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_FieldMask.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_FieldMask proto.InternalMessageInfo
|
||||
|
||||
func (m *FieldMask) GetPaths() []string {
|
||||
if m != nil {
|
||||
return m.Paths
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*FieldMask) XXX_MessageName() string {
|
||||
return "google.protobuf.FieldMask"
|
||||
}
|
||||
func init() {
|
||||
proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/protobuf/field_mask.proto", fileDescriptor_5158202634f0da48) }
|
||||
|
||||
var fileDescriptor_5158202634f0da48 = []byte{
|
||||
// 203 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd,
|
||||
0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x03, 0x8b, 0x09, 0xf1, 0x43, 0x54, 0xe8, 0xc1, 0x54,
|
||||
0x28, 0x29, 0x72, 0x71, 0xba, 0x81, 0x14, 0xf9, 0x26, 0x16, 0x67, 0x0b, 0x89, 0x70, 0xb1, 0x16,
|
||||
0x24, 0x96, 0x64, 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x06, 0x41, 0x38, 0x4e, 0x1d, 0x8c,
|
||||
0x37, 0x1e, 0xca, 0x31, 0x7c, 0x78, 0x28, 0xc7, 0xf8, 0xe3, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39,
|
||||
0xc6, 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23,
|
||||
0x39, 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x80, 0xc4, 0x1f, 0xcb, 0x31, 0x9e, 0x78, 0x2c, 0xc7,
|
||||
0xc8, 0x25, 0x9c, 0x9c, 0x9f, 0xab, 0x87, 0x66, 0x95, 0x13, 0x1f, 0xdc, 0xa2, 0x00, 0x90, 0x50,
|
||||
0x00, 0x63, 0x14, 0x6b, 0x49, 0x65, 0x41, 0x6a, 0xf1, 0x0f, 0x46, 0xc6, 0x45, 0x4c, 0xcc, 0xee,
|
||||
0x01, 0x4e, 0xab, 0x98, 0xe4, 0xdc, 0x21, 0x7a, 0x02, 0xa0, 0x7a, 0xf4, 0xc2, 0x53, 0x73, 0x72,
|
||||
0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0x2a, 0x93, 0xd8, 0xc0, 0x86, 0x19, 0x03, 0x02, 0x00,
|
||||
0x00, 0xff, 0xff, 0x43, 0xa0, 0x83, 0xd0, 0xe9, 0x00, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (this *FieldMask) Compare(that interface{}) int {
|
||||
if that == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
that1, ok := that.(*FieldMask)
|
||||
if !ok {
|
||||
that2, ok := that.(FieldMask)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
} else if this == nil {
|
||||
return -1
|
||||
}
|
||||
if len(this.Paths) != len(that1.Paths) {
|
||||
if len(this.Paths) < len(that1.Paths) {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
for i := range this.Paths {
|
||||
if this.Paths[i] != that1.Paths[i] {
|
||||
if this.Paths[i] < that1.Paths[i] {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
}
|
||||
if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
|
||||
return c
|
||||
}
|
||||
return 0
|
||||
}
|
||||
func (this *FieldMask) Equal(that interface{}) bool {
|
||||
if that == nil {
|
||||
return this == nil
|
||||
}
|
||||
|
||||
that1, ok := that.(*FieldMask)
|
||||
if !ok {
|
||||
that2, ok := that.(FieldMask)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
return this == nil
|
||||
} else if this == nil {
|
||||
return false
|
||||
}
|
||||
if len(this.Paths) != len(that1.Paths) {
|
||||
return false
|
||||
}
|
||||
for i := range this.Paths {
|
||||
if this.Paths[i] != that1.Paths[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
func (this *FieldMask) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 5)
|
||||
s = append(s, "&types.FieldMask{")
|
||||
s = append(s, "Paths: "+fmt.Sprintf("%#v", this.Paths)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func valueToGoStringFieldMask(v interface{}, typ string) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||
}
|
||||
func (m *FieldMask) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *FieldMask) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *FieldMask) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if len(m.Paths) > 0 {
|
||||
for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- {
|
||||
i -= len(m.Paths[iNdEx])
|
||||
copy(dAtA[i:], m.Paths[iNdEx])
|
||||
i = encodeVarintFieldMask(dAtA, i, uint64(len(m.Paths[iNdEx])))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintFieldMask(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovFieldMask(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func NewPopulatedFieldMask(r randyFieldMask, easy bool) *FieldMask {
|
||||
this := &FieldMask{}
|
||||
v1 := r.Intn(10)
|
||||
this.Paths = make([]string, v1)
|
||||
for i := 0; i < v1; i++ {
|
||||
this.Paths[i] = string(randStringFieldMask(r))
|
||||
}
|
||||
if !easy && r.Intn(10) != 0 {
|
||||
this.XXX_unrecognized = randUnrecognizedFieldMask(r, 2)
|
||||
}
|
||||
return this
|
||||
}
|
||||
|
||||
type randyFieldMask interface {
|
||||
Float32() float32
|
||||
Float64() float64
|
||||
Int63() int64
|
||||
Int31() int32
|
||||
Uint32() uint32
|
||||
Intn(n int) int
|
||||
}
|
||||
|
||||
func randUTF8RuneFieldMask(r randyFieldMask) rune {
|
||||
ru := r.Intn(62)
|
||||
if ru < 10 {
|
||||
return rune(ru + 48)
|
||||
} else if ru < 36 {
|
||||
return rune(ru + 55)
|
||||
}
|
||||
return rune(ru + 61)
|
||||
}
|
||||
func randStringFieldMask(r randyFieldMask) string {
|
||||
v2 := r.Intn(100)
|
||||
tmps := make([]rune, v2)
|
||||
for i := 0; i < v2; i++ {
|
||||
tmps[i] = randUTF8RuneFieldMask(r)
|
||||
}
|
||||
return string(tmps)
|
||||
}
|
||||
func randUnrecognizedFieldMask(r randyFieldMask, maxFieldNumber int) (dAtA []byte) {
|
||||
l := r.Intn(5)
|
||||
for i := 0; i < l; i++ {
|
||||
wire := r.Intn(4)
|
||||
if wire == 3 {
|
||||
wire = 5
|
||||
}
|
||||
fieldNumber := maxFieldNumber + r.Intn(100)
|
||||
dAtA = randFieldFieldMask(dAtA, r, fieldNumber, wire)
|
||||
}
|
||||
return dAtA
|
||||
}
|
||||
func randFieldFieldMask(dAtA []byte, r randyFieldMask, fieldNumber int, wire int) []byte {
|
||||
key := uint32(fieldNumber)<<3 | uint32(wire)
|
||||
switch wire {
|
||||
case 0:
|
||||
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key))
|
||||
v3 := r.Int63()
|
||||
if r.Intn(2) == 0 {
|
||||
v3 *= -1
|
||||
}
|
||||
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(v3))
|
||||
case 1:
|
||||
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key))
|
||||
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||
case 2:
|
||||
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key))
|
||||
ll := r.Intn(100)
|
||||
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(ll))
|
||||
for j := 0; j < ll; j++ {
|
||||
dAtA = append(dAtA, byte(r.Intn(256)))
|
||||
}
|
||||
default:
|
||||
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key))
|
||||
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||
}
|
||||
return dAtA
|
||||
}
|
||||
func encodeVarintPopulateFieldMask(dAtA []byte, v uint64) []byte {
|
||||
for v >= 1<<7 {
|
||||
dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80))
|
||||
v >>= 7
|
||||
}
|
||||
dAtA = append(dAtA, uint8(v))
|
||||
return dAtA
|
||||
}
|
||||
func (m *FieldMask) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Paths) > 0 {
|
||||
for _, s := range m.Paths {
|
||||
l = len(s)
|
||||
n += 1 + l + sovFieldMask(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovFieldMask(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozFieldMask(x uint64) (n int) {
|
||||
return sovFieldMask(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (this *FieldMask) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&FieldMask{`,
|
||||
`Paths:` + fmt.Sprintf("%v", this.Paths) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func valueToStringFieldMask(v interface{}) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("*%v", pv)
|
||||
}
|
||||
func (m *FieldMask) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowFieldMask
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: FieldMask: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: FieldMask: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowFieldMask
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthFieldMask
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthFieldMask
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipFieldMask(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthFieldMask
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipFieldMask(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowFieldMask
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowFieldMask
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowFieldMask
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthFieldMask
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupFieldMask
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthFieldMask
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthFieldMask = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowFieldMask = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupFieldMask = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
34
vendor/github.com/gogo/protobuf/types/protosize.go
generated
vendored
Normal file
34
vendor/github.com/gogo/protobuf/types/protosize.go
generated
vendored
Normal file
|
@ -0,0 +1,34 @@
|
|||
package types
|
||||
|
||||
func (m *Any) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Api) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Method) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Mixin) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Duration) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Empty) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *FieldMask) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *SourceContext) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Struct) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Value) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Value_NullValue) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Value_NumberValue) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Value_StringValue) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Value_BoolValue) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Value_StructValue) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Value_ListValue) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *ListValue) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Timestamp) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Type) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Field) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Enum) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *EnumValue) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Option) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *DoubleValue) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *FloatValue) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Int64Value) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *UInt64Value) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Int32Value) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *UInt32Value) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *BoolValue) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *StringValue) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *BytesValue) ProtoSize() (n int) { return m.Size() }
|
524
vendor/github.com/gogo/protobuf/types/source_context.pb.go
generated
vendored
Normal file
524
vendor/github.com/gogo/protobuf/types/source_context.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,524 @@
|
|||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: google/protobuf/source_context.proto
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
bytes "bytes"
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// `SourceContext` represents information about the source of a
|
||||
// protobuf element, like the file in which it is defined.
|
||||
type SourceContext struct {
|
||||
// The path-qualified name of the .proto file that contained the associated
|
||||
// protobuf element. For example: `"google/protobuf/source_context.proto"`.
|
||||
FileName string `protobuf:"bytes,1,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SourceContext) Reset() { *m = SourceContext{} }
|
||||
func (*SourceContext) ProtoMessage() {}
|
||||
func (*SourceContext) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b686cdb126d509db, []int{0}
|
||||
}
|
||||
func (m *SourceContext) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *SourceContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_SourceContext.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *SourceContext) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SourceContext.Merge(m, src)
|
||||
}
|
||||
func (m *SourceContext) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *SourceContext) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SourceContext.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SourceContext proto.InternalMessageInfo
|
||||
|
||||
func (m *SourceContext) GetFileName() string {
|
||||
if m != nil {
|
||||
return m.FileName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (*SourceContext) XXX_MessageName() string {
|
||||
return "google.protobuf.SourceContext"
|
||||
}
|
||||
func init() {
|
||||
proto.RegisterType((*SourceContext)(nil), "google.protobuf.SourceContext")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/protobuf/source_context.proto", fileDescriptor_b686cdb126d509db)
|
||||
}
|
||||
|
||||
var fileDescriptor_b686cdb126d509db = []byte{
|
||||
// 212 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xce, 0x2f, 0x2d,
|
||||
0x4a, 0x4e, 0x8d, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0xd1, 0x03, 0x8b, 0x0b, 0xf1, 0x43,
|
||||
0x54, 0xe9, 0xc1, 0x54, 0x29, 0xe9, 0x70, 0xf1, 0x06, 0x83, 0x15, 0x3a, 0x43, 0xd4, 0x09, 0x49,
|
||||
0x73, 0x71, 0xa6, 0x65, 0xe6, 0xa4, 0xc6, 0xe7, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a,
|
||||
0x70, 0x06, 0x71, 0x80, 0x04, 0xfc, 0x12, 0x73, 0x53, 0x9d, 0x3a, 0x19, 0x6f, 0x3c, 0x94, 0x63,
|
||||
0xf8, 0xf0, 0x50, 0x8e, 0xf1, 0xc7, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xc9,
|
||||
0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e,
|
||||
0xc9, 0x31, 0x7c, 0x00, 0x89, 0x3f, 0x96, 0x63, 0x3c, 0xf1, 0x58, 0x8e, 0x91, 0x4b, 0x38, 0x39,
|
||||
0x3f, 0x57, 0x0f, 0xcd, 0x56, 0x27, 0x21, 0x14, 0x3b, 0x03, 0x40, 0xc2, 0x01, 0x8c, 0x51, 0xac,
|
||||
0x25, 0x95, 0x05, 0xa9, 0xc5, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43,
|
||||
0x34, 0x05, 0x40, 0x35, 0xe9, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80,
|
||||
0x94, 0x25, 0xb1, 0x81, 0x4d, 0x33, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x37, 0x2a, 0xa1,
|
||||
0xf9, 0x00, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (this *SourceContext) Compare(that interface{}) int {
|
||||
if that == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
that1, ok := that.(*SourceContext)
|
||||
if !ok {
|
||||
that2, ok := that.(SourceContext)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
} else if this == nil {
|
||||
return -1
|
||||
}
|
||||
if this.FileName != that1.FileName {
|
||||
if this.FileName < that1.FileName {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
|
||||
return c
|
||||
}
|
||||
return 0
|
||||
}
|
||||
func (this *SourceContext) Equal(that interface{}) bool {
|
||||
if that == nil {
|
||||
return this == nil
|
||||
}
|
||||
|
||||
that1, ok := that.(*SourceContext)
|
||||
if !ok {
|
||||
that2, ok := that.(SourceContext)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
return this == nil
|
||||
} else if this == nil {
|
||||
return false
|
||||
}
|
||||
if this.FileName != that1.FileName {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
func (this *SourceContext) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 5)
|
||||
s = append(s, "&types.SourceContext{")
|
||||
s = append(s, "FileName: "+fmt.Sprintf("%#v", this.FileName)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func valueToGoStringSourceContext(v interface{}, typ string) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||
}
|
||||
func (m *SourceContext) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *SourceContext) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *SourceContext) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if len(m.FileName) > 0 {
|
||||
i -= len(m.FileName)
|
||||
copy(dAtA[i:], m.FileName)
|
||||
i = encodeVarintSourceContext(dAtA, i, uint64(len(m.FileName)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintSourceContext(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovSourceContext(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func NewPopulatedSourceContext(r randySourceContext, easy bool) *SourceContext {
|
||||
this := &SourceContext{}
|
||||
this.FileName = string(randStringSourceContext(r))
|
||||
if !easy && r.Intn(10) != 0 {
|
||||
this.XXX_unrecognized = randUnrecognizedSourceContext(r, 2)
|
||||
}
|
||||
return this
|
||||
}
|
||||
|
||||
type randySourceContext interface {
|
||||
Float32() float32
|
||||
Float64() float64
|
||||
Int63() int64
|
||||
Int31() int32
|
||||
Uint32() uint32
|
||||
Intn(n int) int
|
||||
}
|
||||
|
||||
func randUTF8RuneSourceContext(r randySourceContext) rune {
|
||||
ru := r.Intn(62)
|
||||
if ru < 10 {
|
||||
return rune(ru + 48)
|
||||
} else if ru < 36 {
|
||||
return rune(ru + 55)
|
||||
}
|
||||
return rune(ru + 61)
|
||||
}
|
||||
func randStringSourceContext(r randySourceContext) string {
|
||||
v1 := r.Intn(100)
|
||||
tmps := make([]rune, v1)
|
||||
for i := 0; i < v1; i++ {
|
||||
tmps[i] = randUTF8RuneSourceContext(r)
|
||||
}
|
||||
return string(tmps)
|
||||
}
|
||||
func randUnrecognizedSourceContext(r randySourceContext, maxFieldNumber int) (dAtA []byte) {
|
||||
l := r.Intn(5)
|
||||
for i := 0; i < l; i++ {
|
||||
wire := r.Intn(4)
|
||||
if wire == 3 {
|
||||
wire = 5
|
||||
}
|
||||
fieldNumber := maxFieldNumber + r.Intn(100)
|
||||
dAtA = randFieldSourceContext(dAtA, r, fieldNumber, wire)
|
||||
}
|
||||
return dAtA
|
||||
}
|
||||
func randFieldSourceContext(dAtA []byte, r randySourceContext, fieldNumber int, wire int) []byte {
|
||||
key := uint32(fieldNumber)<<3 | uint32(wire)
|
||||
switch wire {
|
||||
case 0:
|
||||
dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key))
|
||||
v2 := r.Int63()
|
||||
if r.Intn(2) == 0 {
|
||||
v2 *= -1
|
||||
}
|
||||
dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(v2))
|
||||
case 1:
|
||||
dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key))
|
||||
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||
case 2:
|
||||
dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key))
|
||||
ll := r.Intn(100)
|
||||
dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(ll))
|
||||
for j := 0; j < ll; j++ {
|
||||
dAtA = append(dAtA, byte(r.Intn(256)))
|
||||
}
|
||||
default:
|
||||
dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key))
|
||||
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||
}
|
||||
return dAtA
|
||||
}
|
||||
func encodeVarintPopulateSourceContext(dAtA []byte, v uint64) []byte {
|
||||
for v >= 1<<7 {
|
||||
dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80))
|
||||
v >>= 7
|
||||
}
|
||||
dAtA = append(dAtA, uint8(v))
|
||||
return dAtA
|
||||
}
|
||||
func (m *SourceContext) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.FileName)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovSourceContext(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovSourceContext(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozSourceContext(x uint64) (n int) {
|
||||
return sovSourceContext(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (this *SourceContext) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&SourceContext{`,
|
||||
`FileName:` + fmt.Sprintf("%v", this.FileName) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func valueToStringSourceContext(v interface{}) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("*%v", pv)
|
||||
}
|
||||
func (m *SourceContext) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowSourceContext
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: SourceContext: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: SourceContext: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field FileName", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowSourceContext
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthSourceContext
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthSourceContext
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.FileName = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipSourceContext(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthSourceContext
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipSourceContext(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowSourceContext
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowSourceContext
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowSourceContext
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthSourceContext
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupSourceContext
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthSourceContext
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthSourceContext = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowSourceContext = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupSourceContext = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
2271
vendor/github.com/gogo/protobuf/types/struct.pb.go
generated
vendored
Normal file
2271
vendor/github.com/gogo/protobuf/types/struct.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
130
vendor/github.com/gogo/protobuf/types/timestamp.go
generated
vendored
Normal file
130
vendor/github.com/gogo/protobuf/types/timestamp.go
generated
vendored
Normal file
|
@ -0,0 +1,130 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package types
|
||||
|
||||
// This file implements operations on google.protobuf.Timestamp.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// Seconds field of the earliest valid Timestamp.
|
||||
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
minValidSeconds = -62135596800
|
||||
// Seconds field just after the latest valid Timestamp.
|
||||
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
maxValidSeconds = 253402300800
|
||||
)
|
||||
|
||||
// validateTimestamp determines whether a Timestamp is valid.
|
||||
// A valid timestamp represents a time in the range
|
||||
// [0001-01-01, 10000-01-01) and has a Nanos field
|
||||
// in the range [0, 1e9).
|
||||
//
|
||||
// If the Timestamp is valid, validateTimestamp returns nil.
|
||||
// Otherwise, it returns an error that describes
|
||||
// the problem.
|
||||
//
|
||||
// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
|
||||
func validateTimestamp(ts *Timestamp) error {
|
||||
if ts == nil {
|
||||
return errors.New("timestamp: nil Timestamp")
|
||||
}
|
||||
if ts.Seconds < minValidSeconds {
|
||||
return fmt.Errorf("timestamp: %#v before 0001-01-01", ts)
|
||||
}
|
||||
if ts.Seconds >= maxValidSeconds {
|
||||
return fmt.Errorf("timestamp: %#v after 10000-01-01", ts)
|
||||
}
|
||||
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
|
||||
return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time.
|
||||
// It returns an error if the argument is invalid.
|
||||
//
|
||||
// Unlike most Go functions, if Timestamp returns an error, the first return value
|
||||
// is not the zero time.Time. Instead, it is the value obtained from the
|
||||
// time.Unix function when passed the contents of the Timestamp, in the UTC
|
||||
// locale. This may or may not be a meaningful time; many invalid Timestamps
|
||||
// do map to valid time.Times.
|
||||
//
|
||||
// A nil Timestamp returns an error. The first return value in that case is
|
||||
// undefined.
|
||||
func TimestampFromProto(ts *Timestamp) (time.Time, error) {
|
||||
// Don't return the zero value on error, because corresponds to a valid
|
||||
// timestamp. Instead return whatever time.Unix gives us.
|
||||
var t time.Time
|
||||
if ts == nil {
|
||||
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
|
||||
} else {
|
||||
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
|
||||
}
|
||||
return t, validateTimestamp(ts)
|
||||
}
|
||||
|
||||
// TimestampNow returns a google.protobuf.Timestamp for the current time.
|
||||
func TimestampNow() *Timestamp {
|
||||
ts, err := TimestampProto(time.Now())
|
||||
if err != nil {
|
||||
panic("ptypes: time.Now() out of Timestamp range")
|
||||
}
|
||||
return ts
|
||||
}
|
||||
|
||||
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
|
||||
// It returns an error if the resulting Timestamp is invalid.
|
||||
func TimestampProto(t time.Time) (*Timestamp, error) {
|
||||
ts := &Timestamp{
|
||||
Seconds: t.Unix(),
|
||||
Nanos: int32(t.Nanosecond()),
|
||||
}
|
||||
if err := validateTimestamp(ts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ts, nil
|
||||
}
|
||||
|
||||
// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
|
||||
// Timestamps, it returns an error message in parentheses.
|
||||
func TimestampString(ts *Timestamp) string {
|
||||
t, err := TimestampFromProto(ts)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("(%v)", err)
|
||||
}
|
||||
return t.Format(time.RFC3339Nano)
|
||||
}
|
539
vendor/github.com/gogo/protobuf/types/timestamp.pb.go
generated
vendored
Normal file
539
vendor/github.com/gogo/protobuf/types/timestamp.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,539 @@
|
|||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: google/protobuf/timestamp.proto
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
bytes "bytes"
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// A Timestamp represents a point in time independent of any time zone or local
|
||||
// calendar, encoded as a count of seconds and fractions of seconds at
|
||||
// nanosecond resolution. The count is relative to an epoch at UTC midnight on
|
||||
// January 1, 1970, in the proleptic Gregorian calendar which extends the
|
||||
// Gregorian calendar backwards to year one.
|
||||
//
|
||||
// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
|
||||
// second table is needed for interpretation, using a [24-hour linear
|
||||
// smear](https://developers.google.com/time/smear).
|
||||
//
|
||||
// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
|
||||
// restricting to that range, we ensure that we can convert to and from [RFC
|
||||
// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
|
||||
//
|
||||
// # Examples
|
||||
//
|
||||
// Example 1: Compute Timestamp from POSIX `time()`.
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(time(NULL));
|
||||
// timestamp.set_nanos(0);
|
||||
//
|
||||
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
|
||||
//
|
||||
// struct timeval tv;
|
||||
// gettimeofday(&tv, NULL);
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(tv.tv_sec);
|
||||
// timestamp.set_nanos(tv.tv_usec * 1000);
|
||||
//
|
||||
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
|
||||
//
|
||||
// FILETIME ft;
|
||||
// GetSystemTimeAsFileTime(&ft);
|
||||
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
|
||||
//
|
||||
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
|
||||
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
|
||||
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
|
||||
//
|
||||
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
|
||||
//
|
||||
// long millis = System.currentTimeMillis();
|
||||
//
|
||||
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
|
||||
// .setNanos((int) ((millis % 1000) * 1000000)).build();
|
||||
//
|
||||
//
|
||||
// Example 5: Compute Timestamp from current time in Python.
|
||||
//
|
||||
// timestamp = Timestamp()
|
||||
// timestamp.GetCurrentTime()
|
||||
//
|
||||
// # JSON Mapping
|
||||
//
|
||||
// In JSON format, the Timestamp type is encoded as a string in the
|
||||
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
|
||||
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
|
||||
// where {year} is always expressed using four digits while {month}, {day},
|
||||
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
|
||||
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
|
||||
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
|
||||
// is required. A proto3 JSON serializer should always use UTC (as indicated by
|
||||
// "Z") when printing the Timestamp type and a proto3 JSON parser should be
|
||||
// able to accept both UTC and other timezones (as indicated by an offset).
|
||||
//
|
||||
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
|
||||
// 01:30 UTC on January 15, 2017.
|
||||
//
|
||||
// In JavaScript, one can convert a Date object to this format using the
|
||||
// standard
|
||||
// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
|
||||
// method. In Python, a standard `datetime.datetime` object can be converted
|
||||
// to this format using
|
||||
// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
|
||||
// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
|
||||
// the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
||||
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
|
||||
// ) to obtain a formatter capable of generating timestamps in this format.
|
||||
//
|
||||
//
|
||||
type Timestamp struct {
|
||||
// Represents seconds of UTC time since Unix epoch
|
||||
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
||||
// 9999-12-31T23:59:59Z inclusive.
|
||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
|
||||
// Non-negative fractions of a second at nanosecond resolution. Negative
|
||||
// second values with fractions must still have non-negative nanos values
|
||||
// that count forward in time. Must be from 0 to 999,999,999
|
||||
// inclusive.
|
||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Timestamp) Reset() { *m = Timestamp{} }
|
||||
func (*Timestamp) ProtoMessage() {}
|
||||
func (*Timestamp) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_292007bbfe81227e, []int{0}
|
||||
}
|
||||
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
|
||||
func (m *Timestamp) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Timestamp) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Timestamp.Merge(m, src)
|
||||
}
|
||||
func (m *Timestamp) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Timestamp) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Timestamp.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Timestamp proto.InternalMessageInfo
|
||||
|
||||
func (m *Timestamp) GetSeconds() int64 {
|
||||
if m != nil {
|
||||
return m.Seconds
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Timestamp) GetNanos() int32 {
|
||||
if m != nil {
|
||||
return m.Nanos
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (*Timestamp) XXX_MessageName() string {
|
||||
return "google.protobuf.Timestamp"
|
||||
}
|
||||
func init() {
|
||||
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) }
|
||||
|
||||
var fileDescriptor_292007bbfe81227e = []byte{
|
||||
// 212 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
|
||||
0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
|
||||
0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
|
||||
0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
|
||||
0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x03, 0xe3, 0x8d,
|
||||
0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xae, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3,
|
||||
0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7, 0xf0, 0xe1, 0x91, 0x1c,
|
||||
0xe3, 0x8a, 0xc7, 0x72, 0x8c, 0x27, 0x1e, 0xcb, 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1,
|
||||
0x59, 0xee, 0xc4, 0x07, 0xb7, 0x3a, 0x00, 0x24, 0x14, 0xc0, 0x18, 0xc5, 0x5a, 0x52, 0x59, 0x90,
|
||||
0x5a, 0xfc, 0x83, 0x91, 0x71, 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88,
|
||||
0x9e, 0x00, 0xa8, 0x1e, 0xbd, 0xf0, 0xd4, 0x9c, 0x1c, 0xef, 0xbc, 0xfc, 0xf2, 0xbc, 0x10, 0x90,
|
||||
0xca, 0x24, 0x36, 0xb0, 0x61, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0b, 0x23, 0x83, 0xdd,
|
||||
0xfa, 0x00, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (this *Timestamp) Compare(that interface{}) int {
|
||||
if that == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
that1, ok := that.(*Timestamp)
|
||||
if !ok {
|
||||
that2, ok := that.(Timestamp)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
} else if this == nil {
|
||||
return -1
|
||||
}
|
||||
if this.Seconds != that1.Seconds {
|
||||
if this.Seconds < that1.Seconds {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
if this.Nanos != that1.Nanos {
|
||||
if this.Nanos < that1.Nanos {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
|
||||
return c
|
||||
}
|
||||
return 0
|
||||
}
|
||||
func (this *Timestamp) Equal(that interface{}) bool {
|
||||
if that == nil {
|
||||
return this == nil
|
||||
}
|
||||
|
||||
that1, ok := that.(*Timestamp)
|
||||
if !ok {
|
||||
that2, ok := that.(Timestamp)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
return this == nil
|
||||
} else if this == nil {
|
||||
return false
|
||||
}
|
||||
if this.Seconds != that1.Seconds {
|
||||
return false
|
||||
}
|
||||
if this.Nanos != that1.Nanos {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
func (this *Timestamp) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&types.Timestamp{")
|
||||
s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n")
|
||||
s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func valueToGoStringTimestamp(v interface{}, typ string) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||
}
|
||||
func (m *Timestamp) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *Timestamp) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if m.Nanos != 0 {
|
||||
i = encodeVarintTimestamp(dAtA, i, uint64(m.Nanos))
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
}
|
||||
if m.Seconds != 0 {
|
||||
i = encodeVarintTimestamp(dAtA, i, uint64(m.Seconds))
|
||||
i--
|
||||
dAtA[i] = 0x8
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintTimestamp(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovTimestamp(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func (m *Timestamp) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.Seconds != 0 {
|
||||
n += 1 + sovTimestamp(uint64(m.Seconds))
|
||||
}
|
||||
if m.Nanos != 0 {
|
||||
n += 1 + sovTimestamp(uint64(m.Nanos))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovTimestamp(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozTimestamp(x uint64) (n int) {
|
||||
return sovTimestamp(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *Timestamp) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTimestamp
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: Timestamp: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType)
|
||||
}
|
||||
m.Seconds = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTimestamp
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Seconds |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType)
|
||||
}
|
||||
m.Nanos = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTimestamp
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Nanos |= int32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTimestamp(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthTimestamp
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipTimestamp(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowTimestamp
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowTimestamp
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowTimestamp
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthTimestamp
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupTimestamp
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthTimestamp
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthTimestamp = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowTimestamp = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupTimestamp = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
94
vendor/github.com/gogo/protobuf/types/timestamp_gogo.go
generated
vendored
Normal file
94
vendor/github.com/gogo/protobuf/types/timestamp_gogo.go
generated
vendored
Normal file
|
@ -0,0 +1,94 @@
|
|||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2016, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func NewPopulatedTimestamp(r interface {
|
||||
Int63() int64
|
||||
}, easy bool) *Timestamp {
|
||||
this := &Timestamp{}
|
||||
ns := int64(r.Int63())
|
||||
this.Seconds = ns / 1e9
|
||||
this.Nanos = int32(ns % 1e9)
|
||||
return this
|
||||
}
|
||||
|
||||
func (ts *Timestamp) String() string {
|
||||
return TimestampString(ts)
|
||||
}
|
||||
|
||||
func NewPopulatedStdTime(r interface {
|
||||
Int63() int64
|
||||
}, easy bool) *time.Time {
|
||||
timestamp := NewPopulatedTimestamp(r, easy)
|
||||
t, err := TimestampFromProto(timestamp)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return &t
|
||||
}
|
||||
|
||||
func SizeOfStdTime(t time.Time) int {
|
||||
ts, err := TimestampProto(t)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return ts.Size()
|
||||
}
|
||||
|
||||
func StdTimeMarshal(t time.Time) ([]byte, error) {
|
||||
size := SizeOfStdTime(t)
|
||||
buf := make([]byte, size)
|
||||
_, err := StdTimeMarshalTo(t, buf)
|
||||
return buf, err
|
||||
}
|
||||
|
||||
func StdTimeMarshalTo(t time.Time, data []byte) (int, error) {
|
||||
ts, err := TimestampProto(t)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return ts.MarshalTo(data)
|
||||
}
|
||||
|
||||
func StdTimeUnmarshal(t *time.Time, data []byte) error {
|
||||
ts := &Timestamp{}
|
||||
if err := ts.Unmarshal(data); err != nil {
|
||||
return err
|
||||
}
|
||||
tt, err := TimestampFromProto(ts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = tt
|
||||
return nil
|
||||
}
|
3355
vendor/github.com/gogo/protobuf/types/type.pb.go
generated
vendored
Normal file
3355
vendor/github.com/gogo/protobuf/types/type.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
2703
vendor/github.com/gogo/protobuf/types/wrappers.pb.go
generated
vendored
Normal file
2703
vendor/github.com/gogo/protobuf/types/wrappers.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
300
vendor/github.com/gogo/protobuf/types/wrappers_gogo.go
generated
vendored
Normal file
300
vendor/github.com/gogo/protobuf/types/wrappers_gogo.go
generated
vendored
Normal file
|
@ -0,0 +1,300 @@
|
|||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2018, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package types
|
||||
|
||||
func NewPopulatedStdDouble(r randyWrappers, easy bool) *float64 {
|
||||
v := NewPopulatedDoubleValue(r, easy)
|
||||
return &v.Value
|
||||
}
|
||||
|
||||
func SizeOfStdDouble(v float64) int {
|
||||
pv := &DoubleValue{Value: v}
|
||||
return pv.Size()
|
||||
}
|
||||
|
||||
func StdDoubleMarshal(v float64) ([]byte, error) {
|
||||
size := SizeOfStdDouble(v)
|
||||
buf := make([]byte, size)
|
||||
_, err := StdDoubleMarshalTo(v, buf)
|
||||
return buf, err
|
||||
}
|
||||
|
||||
func StdDoubleMarshalTo(v float64, data []byte) (int, error) {
|
||||
pv := &DoubleValue{Value: v}
|
||||
return pv.MarshalTo(data)
|
||||
}
|
||||
|
||||
func StdDoubleUnmarshal(v *float64, data []byte) error {
|
||||
pv := &DoubleValue{}
|
||||
if err := pv.Unmarshal(data); err != nil {
|
||||
return err
|
||||
}
|
||||
*v = pv.Value
|
||||
return nil
|
||||
}
|
||||
func NewPopulatedStdFloat(r randyWrappers, easy bool) *float32 {
|
||||
v := NewPopulatedFloatValue(r, easy)
|
||||
return &v.Value
|
||||
}
|
||||
|
||||
func SizeOfStdFloat(v float32) int {
|
||||
pv := &FloatValue{Value: v}
|
||||
return pv.Size()
|
||||
}
|
||||
|
||||
func StdFloatMarshal(v float32) ([]byte, error) {
|
||||
size := SizeOfStdFloat(v)
|
||||
buf := make([]byte, size)
|
||||
_, err := StdFloatMarshalTo(v, buf)
|
||||
return buf, err
|
||||
}
|
||||
|
||||
func StdFloatMarshalTo(v float32, data []byte) (int, error) {
|
||||
pv := &FloatValue{Value: v}
|
||||
return pv.MarshalTo(data)
|
||||
}
|
||||
|
||||
func StdFloatUnmarshal(v *float32, data []byte) error {
|
||||
pv := &FloatValue{}
|
||||
if err := pv.Unmarshal(data); err != nil {
|
||||
return err
|
||||
}
|
||||
*v = pv.Value
|
||||
return nil
|
||||
}
|
||||
func NewPopulatedStdInt64(r randyWrappers, easy bool) *int64 {
|
||||
v := NewPopulatedInt64Value(r, easy)
|
||||
return &v.Value
|
||||
}
|
||||
|
||||
func SizeOfStdInt64(v int64) int {
|
||||
pv := &Int64Value{Value: v}
|
||||
return pv.Size()
|
||||
}
|
||||
|
||||
func StdInt64Marshal(v int64) ([]byte, error) {
|
||||
size := SizeOfStdInt64(v)
|
||||
buf := make([]byte, size)
|
||||
_, err := StdInt64MarshalTo(v, buf)
|
||||
return buf, err
|
||||
}
|
||||
|
||||
func StdInt64MarshalTo(v int64, data []byte) (int, error) {
|
||||
pv := &Int64Value{Value: v}
|
||||
return pv.MarshalTo(data)
|
||||
}
|
||||
|
||||
func StdInt64Unmarshal(v *int64, data []byte) error {
|
||||
pv := &Int64Value{}
|
||||
if err := pv.Unmarshal(data); err != nil {
|
||||
return err
|
||||
}
|
||||
*v = pv.Value
|
||||
return nil
|
||||
}
|
||||
func NewPopulatedStdUInt64(r randyWrappers, easy bool) *uint64 {
|
||||
v := NewPopulatedUInt64Value(r, easy)
|
||||
return &v.Value
|
||||
}
|
||||
|
||||
func SizeOfStdUInt64(v uint64) int {
|
||||
pv := &UInt64Value{Value: v}
|
||||
return pv.Size()
|
||||
}
|
||||
|
||||
func StdUInt64Marshal(v uint64) ([]byte, error) {
|
||||
size := SizeOfStdUInt64(v)
|
||||
buf := make([]byte, size)
|
||||
_, err := StdUInt64MarshalTo(v, buf)
|
||||
return buf, err
|
||||
}
|
||||
|
||||
func StdUInt64MarshalTo(v uint64, data []byte) (int, error) {
|
||||
pv := &UInt64Value{Value: v}
|
||||
return pv.MarshalTo(data)
|
||||
}
|
||||
|
||||
func StdUInt64Unmarshal(v *uint64, data []byte) error {
|
||||
pv := &UInt64Value{}
|
||||
if err := pv.Unmarshal(data); err != nil {
|
||||
return err
|
||||
}
|
||||
*v = pv.Value
|
||||
return nil
|
||||
}
|
||||
func NewPopulatedStdInt32(r randyWrappers, easy bool) *int32 {
|
||||
v := NewPopulatedInt32Value(r, easy)
|
||||
return &v.Value
|
||||
}
|
||||
|
||||
func SizeOfStdInt32(v int32) int {
|
||||
pv := &Int32Value{Value: v}
|
||||
return pv.Size()
|
||||
}
|
||||
|
||||
func StdInt32Marshal(v int32) ([]byte, error) {
|
||||
size := SizeOfStdInt32(v)
|
||||
buf := make([]byte, size)
|
||||
_, err := StdInt32MarshalTo(v, buf)
|
||||
return buf, err
|
||||
}
|
||||
|
||||
func StdInt32MarshalTo(v int32, data []byte) (int, error) {
|
||||
pv := &Int32Value{Value: v}
|
||||
return pv.MarshalTo(data)
|
||||
}
|
||||
|
||||
func StdInt32Unmarshal(v *int32, data []byte) error {
|
||||
pv := &Int32Value{}
|
||||
if err := pv.Unmarshal(data); err != nil {
|
||||
return err
|
||||
}
|
||||
*v = pv.Value
|
||||
return nil
|
||||
}
|
||||
func NewPopulatedStdUInt32(r randyWrappers, easy bool) *uint32 {
|
||||
v := NewPopulatedUInt32Value(r, easy)
|
||||
return &v.Value
|
||||
}
|
||||
|
||||
func SizeOfStdUInt32(v uint32) int {
|
||||
pv := &UInt32Value{Value: v}
|
||||
return pv.Size()
|
||||
}
|
||||
|
||||
func StdUInt32Marshal(v uint32) ([]byte, error) {
|
||||
size := SizeOfStdUInt32(v)
|
||||
buf := make([]byte, size)
|
||||
_, err := StdUInt32MarshalTo(v, buf)
|
||||
return buf, err
|
||||
}
|
||||
|
||||
func StdUInt32MarshalTo(v uint32, data []byte) (int, error) {
|
||||
pv := &UInt32Value{Value: v}
|
||||
return pv.MarshalTo(data)
|
||||
}
|
||||
|
||||
func StdUInt32Unmarshal(v *uint32, data []byte) error {
|
||||
pv := &UInt32Value{}
|
||||
if err := pv.Unmarshal(data); err != nil {
|
||||
return err
|
||||
}
|
||||
*v = pv.Value
|
||||
return nil
|
||||
}
|
||||
func NewPopulatedStdBool(r randyWrappers, easy bool) *bool {
|
||||
v := NewPopulatedBoolValue(r, easy)
|
||||
return &v.Value
|
||||
}
|
||||
|
||||
func SizeOfStdBool(v bool) int {
|
||||
pv := &BoolValue{Value: v}
|
||||
return pv.Size()
|
||||
}
|
||||
|
||||
func StdBoolMarshal(v bool) ([]byte, error) {
|
||||
size := SizeOfStdBool(v)
|
||||
buf := make([]byte, size)
|
||||
_, err := StdBoolMarshalTo(v, buf)
|
||||
return buf, err
|
||||
}
|
||||
|
||||
func StdBoolMarshalTo(v bool, data []byte) (int, error) {
|
||||
pv := &BoolValue{Value: v}
|
||||
return pv.MarshalTo(data)
|
||||
}
|
||||
|
||||
func StdBoolUnmarshal(v *bool, data []byte) error {
|
||||
pv := &BoolValue{}
|
||||
if err := pv.Unmarshal(data); err != nil {
|
||||
return err
|
||||
}
|
||||
*v = pv.Value
|
||||
return nil
|
||||
}
|
||||
func NewPopulatedStdString(r randyWrappers, easy bool) *string {
|
||||
v := NewPopulatedStringValue(r, easy)
|
||||
return &v.Value
|
||||
}
|
||||
|
||||
func SizeOfStdString(v string) int {
|
||||
pv := &StringValue{Value: v}
|
||||
return pv.Size()
|
||||
}
|
||||
|
||||
func StdStringMarshal(v string) ([]byte, error) {
|
||||
size := SizeOfStdString(v)
|
||||
buf := make([]byte, size)
|
||||
_, err := StdStringMarshalTo(v, buf)
|
||||
return buf, err
|
||||
}
|
||||
|
||||
func StdStringMarshalTo(v string, data []byte) (int, error) {
|
||||
pv := &StringValue{Value: v}
|
||||
return pv.MarshalTo(data)
|
||||
}
|
||||
|
||||
func StdStringUnmarshal(v *string, data []byte) error {
|
||||
pv := &StringValue{}
|
||||
if err := pv.Unmarshal(data); err != nil {
|
||||
return err
|
||||
}
|
||||
*v = pv.Value
|
||||
return nil
|
||||
}
|
||||
func NewPopulatedStdBytes(r randyWrappers, easy bool) *[]byte {
|
||||
v := NewPopulatedBytesValue(r, easy)
|
||||
return &v.Value
|
||||
}
|
||||
|
||||
func SizeOfStdBytes(v []byte) int {
|
||||
pv := &BytesValue{Value: v}
|
||||
return pv.Size()
|
||||
}
|
||||
|
||||
func StdBytesMarshal(v []byte) ([]byte, error) {
|
||||
size := SizeOfStdBytes(v)
|
||||
buf := make([]byte, size)
|
||||
_, err := StdBytesMarshalTo(v, buf)
|
||||
return buf, err
|
||||
}
|
||||
|
||||
func StdBytesMarshalTo(v []byte, data []byte) (int, error) {
|
||||
pv := &BytesValue{Value: v}
|
||||
return pv.MarshalTo(data)
|
||||
}
|
||||
|
||||
func StdBytesUnmarshal(v *[]byte, data []byte) error {
|
||||
pv := &BytesValue{}
|
||||
if err := pv.Unmarshal(data); err != nil {
|
||||
return err
|
||||
}
|
||||
*v = pv.Value
|
||||
return nil
|
||||
}
|
13
vendor/github.com/prometheus/prometheus/config/config.go
generated
vendored
13
vendor/github.com/prometheus/prometheus/config/config.go
generated
vendored
|
@ -776,12 +776,13 @@ func CheckTargetAddress(address model.LabelValue) error {
|
|||
|
||||
// RemoteWriteConfig is the configuration for writing to remote storage.
|
||||
type RemoteWriteConfig struct {
|
||||
URL *config.URL `yaml:"url"`
|
||||
RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"`
|
||||
Headers map[string]string `yaml:"headers,omitempty"`
|
||||
WriteRelabelConfigs []*relabel.Config `yaml:"write_relabel_configs,omitempty"`
|
||||
Name string `yaml:"name,omitempty"`
|
||||
SendExemplars bool `yaml:"send_exemplars,omitempty"`
|
||||
URL *config.URL `yaml:"url"`
|
||||
RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"`
|
||||
Headers map[string]string `yaml:"headers,omitempty"`
|
||||
WriteRelabelConfigs []*relabel.Config `yaml:"write_relabel_configs,omitempty"`
|
||||
Name string `yaml:"name,omitempty"`
|
||||
SendExemplars bool `yaml:"send_exemplars,omitempty"`
|
||||
SendNativeHistograms bool `yaml:"send_native_histograms,omitempty"`
|
||||
|
||||
// We cannot do proper Go type embedding below as the parser will then parse
|
||||
// values arbitrarily into the overflow maps of further-down types.
|
||||
|
|
871
vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go
generated
vendored
Normal file
871
vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go
generated
vendored
Normal file
|
@ -0,0 +1,871 @@
|
|||
// Copyright 2021 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package histogram
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// FloatHistogram is similar to Histogram but uses float64 for all
|
||||
// counts. Additionally, bucket counts are absolute and not deltas.
|
||||
//
|
||||
// A FloatHistogram is needed by PromQL to handle operations that might result
|
||||
// in fractional counts. Since the counts in a histogram are unlikely to be too
|
||||
// large to be represented precisely by a float64, a FloatHistogram can also be
|
||||
// used to represent a histogram with integer counts and thus serves as a more
|
||||
// generalized representation.
|
||||
type FloatHistogram struct {
|
||||
// Currently valid schema numbers are -4 <= n <= 8. They are all for
|
||||
// base-2 bucket schemas, where 1 is a bucket boundary in each case, and
|
||||
// then each power of two is divided into 2^n logarithmic buckets. Or
|
||||
// in other words, each bucket boundary is the previous boundary times
|
||||
// 2^(2^-n).
|
||||
Schema int32
|
||||
// Width of the zero bucket.
|
||||
ZeroThreshold float64
|
||||
// Observations falling into the zero bucket. Must be zero or positive.
|
||||
ZeroCount float64
|
||||
// Total number of observations. Must be zero or positive.
|
||||
Count float64
|
||||
// Sum of observations. This is also used as the stale marker.
|
||||
Sum float64
|
||||
// Spans for positive and negative buckets (see Span below).
|
||||
PositiveSpans, NegativeSpans []Span
|
||||
// Observation counts in buckets. Each represents an absolute count and
|
||||
// must be zero or positive.
|
||||
PositiveBuckets, NegativeBuckets []float64
|
||||
}
|
||||
|
||||
// Copy returns a deep copy of the Histogram.
|
||||
func (h *FloatHistogram) Copy() *FloatHistogram {
|
||||
c := *h
|
||||
|
||||
if h.PositiveSpans != nil {
|
||||
c.PositiveSpans = make([]Span, len(h.PositiveSpans))
|
||||
copy(c.PositiveSpans, h.PositiveSpans)
|
||||
}
|
||||
if h.NegativeSpans != nil {
|
||||
c.NegativeSpans = make([]Span, len(h.NegativeSpans))
|
||||
copy(c.NegativeSpans, h.NegativeSpans)
|
||||
}
|
||||
if h.PositiveBuckets != nil {
|
||||
c.PositiveBuckets = make([]float64, len(h.PositiveBuckets))
|
||||
copy(c.PositiveBuckets, h.PositiveBuckets)
|
||||
}
|
||||
if h.NegativeBuckets != nil {
|
||||
c.NegativeBuckets = make([]float64, len(h.NegativeBuckets))
|
||||
copy(c.NegativeBuckets, h.NegativeBuckets)
|
||||
}
|
||||
|
||||
return &c
|
||||
}
|
||||
|
||||
// CopyToSchema works like Copy, but the returned deep copy has the provided
|
||||
// target schema, which must be ≤ the original schema (i.e. it must have a lower
|
||||
// resolution).
|
||||
func (h *FloatHistogram) CopyToSchema(targetSchema int32) *FloatHistogram {
|
||||
if targetSchema == h.Schema {
|
||||
// Fast path.
|
||||
return h.Copy()
|
||||
}
|
||||
if targetSchema > h.Schema {
|
||||
panic(fmt.Errorf("cannot copy from schema %d to %d", h.Schema, targetSchema))
|
||||
}
|
||||
c := FloatHistogram{
|
||||
Schema: targetSchema,
|
||||
ZeroThreshold: h.ZeroThreshold,
|
||||
ZeroCount: h.ZeroCount,
|
||||
Count: h.Count,
|
||||
Sum: h.Sum,
|
||||
}
|
||||
|
||||
// TODO(beorn7): This is a straight-forward implementation using merging
|
||||
// iterators for the original buckets and then adding one merged bucket
|
||||
// after another to the newly created FloatHistogram. It's well possible
|
||||
// that a more involved implementation performs much better, which we
|
||||
// could do if this code path turns out to be performance-critical.
|
||||
var iInSpan, index int32
|
||||
for iSpan, iBucket, it := -1, -1, h.floatBucketIterator(true, 0, targetSchema); it.Next(); {
|
||||
b := it.At()
|
||||
c.PositiveSpans, c.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket(
|
||||
b, c.PositiveSpans, c.PositiveBuckets, iSpan, iBucket, iInSpan, index,
|
||||
)
|
||||
index = b.Index
|
||||
}
|
||||
for iSpan, iBucket, it := -1, -1, h.floatBucketIterator(false, 0, targetSchema); it.Next(); {
|
||||
b := it.At()
|
||||
c.NegativeSpans, c.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket(
|
||||
b, c.NegativeSpans, c.NegativeBuckets, iSpan, iBucket, iInSpan, index,
|
||||
)
|
||||
index = b.Index
|
||||
}
|
||||
|
||||
return &c
|
||||
}
|
||||
|
||||
// String returns a string representation of the Histogram.
|
||||
func (h *FloatHistogram) String() string {
|
||||
var sb strings.Builder
|
||||
fmt.Fprintf(&sb, "{count:%g, sum:%g", h.Count, h.Sum)
|
||||
|
||||
var nBuckets []Bucket[float64]
|
||||
for it := h.NegativeBucketIterator(); it.Next(); {
|
||||
bucket := it.At()
|
||||
if bucket.Count != 0 {
|
||||
nBuckets = append(nBuckets, it.At())
|
||||
}
|
||||
}
|
||||
for i := len(nBuckets) - 1; i >= 0; i-- {
|
||||
fmt.Fprintf(&sb, ", %s", nBuckets[i].String())
|
||||
}
|
||||
|
||||
if h.ZeroCount != 0 {
|
||||
fmt.Fprintf(&sb, ", %s", h.ZeroBucket().String())
|
||||
}
|
||||
|
||||
for it := h.PositiveBucketIterator(); it.Next(); {
|
||||
bucket := it.At()
|
||||
if bucket.Count != 0 {
|
||||
fmt.Fprintf(&sb, ", %s", bucket.String())
|
||||
}
|
||||
}
|
||||
|
||||
sb.WriteRune('}')
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// ZeroBucket returns the zero bucket.
|
||||
func (h *FloatHistogram) ZeroBucket() Bucket[float64] {
|
||||
return Bucket[float64]{
|
||||
Lower: -h.ZeroThreshold,
|
||||
Upper: h.ZeroThreshold,
|
||||
LowerInclusive: true,
|
||||
UpperInclusive: true,
|
||||
Count: h.ZeroCount,
|
||||
}
|
||||
}
|
||||
|
||||
// Scale scales the FloatHistogram by the provided factor, i.e. it scales all
|
||||
// bucket counts including the zero bucket and the count and the sum of
|
||||
// observations. The bucket layout stays the same. This method changes the
|
||||
// receiving histogram directly (rather than acting on a copy). It returns a
|
||||
// pointer to the receiving histogram for convenience.
|
||||
func (h *FloatHistogram) Scale(factor float64) *FloatHistogram {
|
||||
h.ZeroCount *= factor
|
||||
h.Count *= factor
|
||||
h.Sum *= factor
|
||||
for i := range h.PositiveBuckets {
|
||||
h.PositiveBuckets[i] *= factor
|
||||
}
|
||||
for i := range h.NegativeBuckets {
|
||||
h.NegativeBuckets[i] *= factor
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// Add adds the provided other histogram to the receiving histogram. Count, Sum,
|
||||
// and buckets from the other histogram are added to the corresponding
|
||||
// components of the receiving histogram. Buckets in the other histogram that do
|
||||
// not exist in the receiving histogram are inserted into the latter. The
|
||||
// resulting histogram might have buckets with a population of zero or directly
|
||||
// adjacent spans (offset=0). To normalize those, call the Compact method.
|
||||
//
|
||||
// The method reconciles differences in the zero threshold and in the schema,
|
||||
// but the schema of the other histogram must be ≥ the schema of the receiving
|
||||
// histogram (i.e. must have an equal or higher resolution). This means that the
|
||||
// schema of the receiving histogram won't change. Its zero threshold, however,
|
||||
// will change if needed. The other histogram will not be modified in any case.
|
||||
//
|
||||
// This method returns a pointer to the receiving histogram for convenience.
|
||||
func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram {
|
||||
otherZeroCount := h.reconcileZeroBuckets(other)
|
||||
h.ZeroCount += otherZeroCount
|
||||
h.Count += other.Count
|
||||
h.Sum += other.Sum
|
||||
|
||||
// TODO(beorn7): If needed, this can be optimized by inspecting the
|
||||
// spans in other and create missing buckets in h in batches.
|
||||
var iInSpan, index int32
|
||||
for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(true, h.ZeroThreshold, h.Schema); it.Next(); {
|
||||
b := it.At()
|
||||
h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket(
|
||||
b, h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan, index,
|
||||
)
|
||||
index = b.Index
|
||||
}
|
||||
for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(false, h.ZeroThreshold, h.Schema); it.Next(); {
|
||||
b := it.At()
|
||||
h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket(
|
||||
b, h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan, index,
|
||||
)
|
||||
index = b.Index
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// Sub works like Add but subtracts the other histogram.
|
||||
func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram {
|
||||
otherZeroCount := h.reconcileZeroBuckets(other)
|
||||
h.ZeroCount -= otherZeroCount
|
||||
h.Count -= other.Count
|
||||
h.Sum -= other.Sum
|
||||
|
||||
// TODO(beorn7): If needed, this can be optimized by inspecting the
|
||||
// spans in other and create missing buckets in h in batches.
|
||||
var iInSpan, index int32
|
||||
for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(true, h.ZeroThreshold, h.Schema); it.Next(); {
|
||||
b := it.At()
|
||||
b.Count *= -1
|
||||
h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan = addBucket(
|
||||
b, h.PositiveSpans, h.PositiveBuckets, iSpan, iBucket, iInSpan, index,
|
||||
)
|
||||
index = b.Index
|
||||
}
|
||||
for iSpan, iBucket, it := -1, -1, other.floatBucketIterator(false, h.ZeroThreshold, h.Schema); it.Next(); {
|
||||
b := it.At()
|
||||
b.Count *= -1
|
||||
h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan = addBucket(
|
||||
b, h.NegativeSpans, h.NegativeBuckets, iSpan, iBucket, iInSpan, index,
|
||||
)
|
||||
index = b.Index
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// addBucket takes the "coordinates" of the last bucket that was handled and
|
||||
// adds the provided bucket after it. If a corresponding bucket exists, the
|
||||
// count is added. If not, the bucket is inserted. The updated slices and the
|
||||
// coordinates of the inserted or added-to bucket are returned.
|
||||
func addBucket(
|
||||
b Bucket[float64],
|
||||
spans []Span, buckets []float64,
|
||||
iSpan, iBucket int,
|
||||
iInSpan, index int32,
|
||||
) (
|
||||
newSpans []Span, newBuckets []float64,
|
||||
newISpan, newIBucket int, newIInSpan int32,
|
||||
) {
|
||||
if iSpan == -1 {
|
||||
// First add, check if it is before all spans.
|
||||
if len(spans) == 0 || spans[0].Offset > b.Index {
|
||||
// Add bucket before all others.
|
||||
buckets = append(buckets, 0)
|
||||
copy(buckets[1:], buckets)
|
||||
buckets[0] = b.Count
|
||||
if len(spans) > 0 && spans[0].Offset == b.Index+1 {
|
||||
spans[0].Length++
|
||||
spans[0].Offset--
|
||||
return spans, buckets, 0, 0, 0
|
||||
}
|
||||
spans = append(spans, Span{})
|
||||
copy(spans[1:], spans)
|
||||
spans[0] = Span{Offset: b.Index, Length: 1}
|
||||
if len(spans) > 1 {
|
||||
// Convert the absolute offset in the formerly
|
||||
// first span to a relative offset.
|
||||
spans[1].Offset -= b.Index + 1
|
||||
}
|
||||
return spans, buckets, 0, 0, 0
|
||||
}
|
||||
if spans[0].Offset == b.Index {
|
||||
// Just add to first bucket.
|
||||
buckets[0] += b.Count
|
||||
return spans, buckets, 0, 0, 0
|
||||
}
|
||||
// We are behind the first bucket, so set everything to the
|
||||
// first bucket and continue normally.
|
||||
iSpan, iBucket, iInSpan = 0, 0, 0
|
||||
index = spans[0].Offset
|
||||
}
|
||||
deltaIndex := b.Index - index
|
||||
for {
|
||||
remainingInSpan := int32(spans[iSpan].Length) - iInSpan
|
||||
if deltaIndex < remainingInSpan {
|
||||
// Bucket is in current span.
|
||||
iBucket += int(deltaIndex)
|
||||
iInSpan += deltaIndex
|
||||
buckets[iBucket] += b.Count
|
||||
return spans, buckets, iSpan, iBucket, iInSpan
|
||||
}
|
||||
deltaIndex -= remainingInSpan
|
||||
iBucket += int(remainingInSpan)
|
||||
iSpan++
|
||||
if iSpan == len(spans) || deltaIndex < spans[iSpan].Offset {
|
||||
// Bucket is in gap behind previous span (or there are no further spans).
|
||||
buckets = append(buckets, 0)
|
||||
copy(buckets[iBucket+1:], buckets[iBucket:])
|
||||
buckets[iBucket] = b.Count
|
||||
if deltaIndex == 0 {
|
||||
// Directly after previous span, extend previous span.
|
||||
if iSpan < len(spans) {
|
||||
spans[iSpan].Offset--
|
||||
}
|
||||
iSpan--
|
||||
iInSpan = int32(spans[iSpan].Length)
|
||||
spans[iSpan].Length++
|
||||
return spans, buckets, iSpan, iBucket, iInSpan
|
||||
}
|
||||
if iSpan < len(spans) && deltaIndex == spans[iSpan].Offset-1 {
|
||||
// Directly before next span, extend next span.
|
||||
iInSpan = 0
|
||||
spans[iSpan].Offset--
|
||||
spans[iSpan].Length++
|
||||
return spans, buckets, iSpan, iBucket, iInSpan
|
||||
}
|
||||
// No next span, or next span is not directly adjacent to new bucket.
|
||||
// Add new span.
|
||||
iInSpan = 0
|
||||
if iSpan < len(spans) {
|
||||
spans[iSpan].Offset -= deltaIndex + 1
|
||||
}
|
||||
spans = append(spans, Span{})
|
||||
copy(spans[iSpan+1:], spans[iSpan:])
|
||||
spans[iSpan] = Span{Length: 1, Offset: deltaIndex}
|
||||
return spans, buckets, iSpan, iBucket, iInSpan
|
||||
}
|
||||
// Try start of next span.
|
||||
deltaIndex -= spans[iSpan].Offset
|
||||
iInSpan = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Compact eliminates empty buckets at the beginning and end of each span, then
|
||||
// merges spans that are consecutive or at most maxEmptyBuckets apart, and
|
||||
// finally splits spans that contain more consecutive empty buckets than
|
||||
// maxEmptyBuckets. (The actual implementation might do something more efficient
|
||||
// but with the same result.) The compaction happens "in place" in the
|
||||
// receiving histogram, but a pointer to it is returned for convenience.
|
||||
//
|
||||
// The ideal value for maxEmptyBuckets depends on circumstances. The motivation
|
||||
// to set maxEmptyBuckets > 0 is the assumption that is is less overhead to
|
||||
// represent very few empty buckets explicitly within one span than cutting the
|
||||
// one span into two to treat the empty buckets as a gap between the two spans,
|
||||
// both in terms of storage requirement as well as in terms of encoding and
|
||||
// decoding effort. However, the tradeoffs are subtle. For one, they are
|
||||
// different in the exposition format vs. in a TSDB chunk vs. for the in-memory
|
||||
// representation as Go types. In the TSDB, as an additional aspects, the span
|
||||
// layout is only stored once per chunk, while many histograms with that same
|
||||
// chunk layout are then only stored with their buckets (so that even a single
|
||||
// empty bucket will be stored many times).
|
||||
//
|
||||
// For the Go types, an additional Span takes 8 bytes. Similarly, an additional
|
||||
// bucket takes 8 bytes. Therefore, with a single separating empty bucket, both
|
||||
// options have the same storage requirement, but the single-span solution is
|
||||
// easier to iterate through. Still, the safest bet is to use maxEmptyBuckets==0
|
||||
// and only use a larger number if you know what you are doing.
|
||||
func (h *FloatHistogram) Compact(maxEmptyBuckets int) *FloatHistogram {
|
||||
h.PositiveBuckets, h.PositiveSpans = compactBuckets(
|
||||
h.PositiveBuckets, h.PositiveSpans, maxEmptyBuckets, false,
|
||||
)
|
||||
h.NegativeBuckets, h.NegativeSpans = compactBuckets(
|
||||
h.NegativeBuckets, h.NegativeSpans, maxEmptyBuckets, false,
|
||||
)
|
||||
return h
|
||||
}
|
||||
|
||||
// DetectReset returns true if the receiving histogram is missing any buckets
|
||||
// that have a non-zero population in the provided previous histogram. It also
|
||||
// returns true if any count (in any bucket, in the zero count, or in the count
|
||||
// of observations, but NOT the sum of observations) is smaller in the receiving
|
||||
// histogram compared to the previous histogram. Otherwise, it returns false.
|
||||
//
|
||||
// Special behavior in case the Schema or the ZeroThreshold are not the same in
|
||||
// both histograms:
|
||||
//
|
||||
// - A decrease of the ZeroThreshold or an increase of the Schema (i.e. an
|
||||
// increase of resolution) can only happen together with a reset. Thus, the
|
||||
// method returns true in either case.
|
||||
//
|
||||
// - Upon an increase of the ZeroThreshold, the buckets in the previous
|
||||
// histogram that fall within the new ZeroThreshold are added to the ZeroCount
|
||||
// of the previous histogram (without mutating the provided previous
|
||||
// histogram). The scenario that a populated bucket of the previous histogram
|
||||
// is partially within, partially outside of the new ZeroThreshold, can only
|
||||
// happen together with a counter reset and therefore shortcuts to returning
|
||||
// true.
|
||||
//
|
||||
// - Upon a decrease of the Schema, the buckets of the previous histogram are
|
||||
// merged so that they match the new, lower-resolution schema (again without
|
||||
// mutating the provided previous histogram).
|
||||
//
|
||||
// Note that this kind of reset detection is quite expensive. Ideally, resets
|
||||
// are detected at ingest time and stored in the TSDB, so that the reset
|
||||
// information can be read directly from there rather than be detected each time
|
||||
// again.
|
||||
func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool {
|
||||
if h.Count < previous.Count {
|
||||
return true
|
||||
}
|
||||
if h.Schema > previous.Schema {
|
||||
return true
|
||||
}
|
||||
if h.ZeroThreshold < previous.ZeroThreshold {
|
||||
// ZeroThreshold decreased.
|
||||
return true
|
||||
}
|
||||
previousZeroCount, newThreshold := previous.zeroCountForLargerThreshold(h.ZeroThreshold)
|
||||
if newThreshold != h.ZeroThreshold {
|
||||
// ZeroThreshold is within a populated bucket in previous
|
||||
// histogram.
|
||||
return true
|
||||
}
|
||||
if h.ZeroCount < previousZeroCount {
|
||||
return true
|
||||
}
|
||||
currIt := h.floatBucketIterator(true, h.ZeroThreshold, h.Schema)
|
||||
prevIt := previous.floatBucketIterator(true, h.ZeroThreshold, h.Schema)
|
||||
if detectReset(currIt, prevIt) {
|
||||
return true
|
||||
}
|
||||
currIt = h.floatBucketIterator(false, h.ZeroThreshold, h.Schema)
|
||||
prevIt = previous.floatBucketIterator(false, h.ZeroThreshold, h.Schema)
|
||||
return detectReset(currIt, prevIt)
|
||||
}
|
||||
|
||||
func detectReset(currIt, prevIt BucketIterator[float64]) bool {
|
||||
if !prevIt.Next() {
|
||||
return false // If no buckets in previous histogram, nothing can be reset.
|
||||
}
|
||||
prevBucket := prevIt.At()
|
||||
if !currIt.Next() {
|
||||
// No bucket in current, but at least one in previous
|
||||
// histogram. Check if any of those are non-zero, in which case
|
||||
// this is a reset.
|
||||
for {
|
||||
if prevBucket.Count != 0 {
|
||||
return true
|
||||
}
|
||||
if !prevIt.Next() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
currBucket := currIt.At()
|
||||
for {
|
||||
// Forward currIt until we find the bucket corresponding to prevBucket.
|
||||
for currBucket.Index < prevBucket.Index {
|
||||
if !currIt.Next() {
|
||||
// Reached end of currIt early, therefore
|
||||
// previous histogram has a bucket that the
|
||||
// current one does not have. Unlass all
|
||||
// remaining buckets in the previous histogram
|
||||
// are unpopulated, this is a reset.
|
||||
for {
|
||||
if prevBucket.Count != 0 {
|
||||
return true
|
||||
}
|
||||
if !prevIt.Next() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
currBucket = currIt.At()
|
||||
}
|
||||
if currBucket.Index > prevBucket.Index {
|
||||
// Previous histogram has a bucket the current one does
|
||||
// not have. If it's populated, it's a reset.
|
||||
if prevBucket.Count != 0 {
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
// We have reached corresponding buckets in both iterators.
|
||||
// We can finally compare the counts.
|
||||
if currBucket.Count < prevBucket.Count {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if !prevIt.Next() {
|
||||
// Reached end of prevIt without finding offending buckets.
|
||||
return false
|
||||
}
|
||||
prevBucket = prevIt.At()
|
||||
}
|
||||
}
|
||||
|
||||
// PositiveBucketIterator returns a BucketIterator to iterate over all positive
|
||||
// buckets in ascending order (starting next to the zero bucket and going up).
|
||||
func (h *FloatHistogram) PositiveBucketIterator() BucketIterator[float64] {
|
||||
return h.floatBucketIterator(true, 0, h.Schema)
|
||||
}
|
||||
|
||||
// NegativeBucketIterator returns a BucketIterator to iterate over all negative
|
||||
// buckets in descending order (starting next to the zero bucket and going
|
||||
// down).
|
||||
func (h *FloatHistogram) NegativeBucketIterator() BucketIterator[float64] {
|
||||
return h.floatBucketIterator(false, 0, h.Schema)
|
||||
}
|
||||
|
||||
// PositiveReverseBucketIterator returns a BucketIterator to iterate over all
|
||||
// positive buckets in descending order (starting at the highest bucket and
|
||||
// going down towards the zero bucket).
|
||||
func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64] {
|
||||
return newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true)
|
||||
}
|
||||
|
||||
// NegativeReverseBucketIterator returns a BucketIterator to iterate over all
|
||||
// negative buckets in ascending order (starting at the lowest bucket and going
|
||||
// up towards the zero bucket).
|
||||
func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] {
|
||||
return newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false)
|
||||
}
|
||||
|
||||
// AllBucketIterator returns a BucketIterator to iterate over all negative,
|
||||
// zero, and positive buckets in ascending order (starting at the lowest bucket
|
||||
// and going up). If the highest negative bucket or the lowest positive bucket
|
||||
// overlap with the zero bucket, their upper or lower boundary, respectively, is
|
||||
// set to the zero threshold.
|
||||
func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] {
|
||||
return &allFloatBucketIterator{
|
||||
h: h,
|
||||
negIter: h.NegativeReverseBucketIterator(),
|
||||
posIter: h.PositiveBucketIterator(),
|
||||
state: -1,
|
||||
}
|
||||
}
|
||||
|
||||
// zeroCountForLargerThreshold returns what the histogram's zero count would be
|
||||
// if the ZeroThreshold had the provided larger (or equal) value. If the
|
||||
// provided value is less than the histogram's ZeroThreshold, the method panics.
|
||||
// If the largerThreshold ends up within a populated bucket of the histogram, it
|
||||
// is adjusted upwards to the lower limit of that bucket (all in terms of
|
||||
// absolute values) and that bucket's count is included in the returned
|
||||
// count. The adjusted threshold is returned, too.
|
||||
func (h *FloatHistogram) zeroCountForLargerThreshold(largerThreshold float64) (count, threshold float64) {
|
||||
// Fast path.
|
||||
if largerThreshold == h.ZeroThreshold {
|
||||
return h.ZeroCount, largerThreshold
|
||||
}
|
||||
if largerThreshold < h.ZeroThreshold {
|
||||
panic(fmt.Errorf("new threshold %f is less than old threshold %f", largerThreshold, h.ZeroThreshold))
|
||||
}
|
||||
outer:
|
||||
for {
|
||||
count = h.ZeroCount
|
||||
i := h.PositiveBucketIterator()
|
||||
for i.Next() {
|
||||
b := i.At()
|
||||
if b.Lower >= largerThreshold {
|
||||
break
|
||||
}
|
||||
count += b.Count // Bucket to be merged into zero bucket.
|
||||
if b.Upper > largerThreshold {
|
||||
// New threshold ended up within a bucket. if it's
|
||||
// populated, we need to adjust largerThreshold before
|
||||
// we are done here.
|
||||
if b.Count != 0 {
|
||||
largerThreshold = b.Upper
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
i = h.NegativeBucketIterator()
|
||||
for i.Next() {
|
||||
b := i.At()
|
||||
if b.Upper <= -largerThreshold {
|
||||
break
|
||||
}
|
||||
count += b.Count // Bucket to be merged into zero bucket.
|
||||
if b.Lower < -largerThreshold {
|
||||
// New threshold ended up within a bucket. If
|
||||
// it's populated, we need to adjust
|
||||
// largerThreshold and have to redo the whole
|
||||
// thing because the treatment of the positive
|
||||
// buckets is invalid now.
|
||||
if b.Count != 0 {
|
||||
largerThreshold = -b.Lower
|
||||
continue outer
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return count, largerThreshold
|
||||
}
|
||||
}
|
||||
|
||||
// trimBucketsInZeroBucket removes all buckets that are within the zero
|
||||
// bucket. It assumes that the zero threshold is at a bucket boundary and that
|
||||
// the counts in the buckets to remove are already part of the zero count.
|
||||
func (h *FloatHistogram) trimBucketsInZeroBucket() {
|
||||
i := h.PositiveBucketIterator()
|
||||
bucketsIdx := 0
|
||||
for i.Next() {
|
||||
b := i.At()
|
||||
if b.Lower >= h.ZeroThreshold {
|
||||
break
|
||||
}
|
||||
h.PositiveBuckets[bucketsIdx] = 0
|
||||
bucketsIdx++
|
||||
}
|
||||
i = h.NegativeBucketIterator()
|
||||
bucketsIdx = 0
|
||||
for i.Next() {
|
||||
b := i.At()
|
||||
if b.Upper <= -h.ZeroThreshold {
|
||||
break
|
||||
}
|
||||
h.NegativeBuckets[bucketsIdx] = 0
|
||||
bucketsIdx++
|
||||
}
|
||||
// We are abusing Compact to trim the buckets set to zero
|
||||
// above. Premature compacting could cause additional cost, but this
|
||||
// code path is probably rarely used anyway.
|
||||
h.Compact(0)
|
||||
}
|
||||
|
||||
// reconcileZeroBuckets finds a zero bucket large enough to include the zero
|
||||
// buckets of both histograms (the receiving histogram and the other histogram)
|
||||
// with a zero threshold that is not within a populated bucket in either
|
||||
// histogram. This method modifies the receiving histogram accourdingly, but
|
||||
// leaves the other histogram as is. Instead, it returns the zero count the
|
||||
// other histogram would have if it were modified.
|
||||
func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 {
|
||||
otherZeroCount := other.ZeroCount
|
||||
otherZeroThreshold := other.ZeroThreshold
|
||||
|
||||
for otherZeroThreshold != h.ZeroThreshold {
|
||||
if h.ZeroThreshold > otherZeroThreshold {
|
||||
otherZeroCount, otherZeroThreshold = other.zeroCountForLargerThreshold(h.ZeroThreshold)
|
||||
}
|
||||
if otherZeroThreshold > h.ZeroThreshold {
|
||||
h.ZeroCount, h.ZeroThreshold = h.zeroCountForLargerThreshold(otherZeroThreshold)
|
||||
h.trimBucketsInZeroBucket()
|
||||
}
|
||||
}
|
||||
return otherZeroCount
|
||||
}
|
||||
|
||||
// floatBucketIterator is a low-level constructor for bucket iterators.
|
||||
//
|
||||
// If positive is true, the returned iterator iterates through the positive
|
||||
// buckets, otherwise through the negative buckets.
|
||||
//
|
||||
// If absoluteStartValue is < the lowest absolute value of any upper bucket
|
||||
// boundary, the iterator starts with the first bucket. Otherwise, it will skip
|
||||
// all buckets with an absolute value of their upper boundary ≤
|
||||
// absoluteStartValue.
|
||||
//
|
||||
// targetSchema must be ≤ the schema of FloatHistogram (and of course within the
|
||||
// legal values for schemas in general). The buckets are merged to match the
|
||||
// targetSchema prior to iterating (without mutating FloatHistogram).
|
||||
func (h *FloatHistogram) floatBucketIterator(
|
||||
positive bool, absoluteStartValue float64, targetSchema int32,
|
||||
) *floatBucketIterator {
|
||||
if targetSchema > h.Schema {
|
||||
panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema))
|
||||
}
|
||||
i := &floatBucketIterator{
|
||||
baseBucketIterator: baseBucketIterator[float64, float64]{
|
||||
schema: h.Schema,
|
||||
positive: positive,
|
||||
},
|
||||
targetSchema: targetSchema,
|
||||
absoluteStartValue: absoluteStartValue,
|
||||
}
|
||||
if positive {
|
||||
i.spans = h.PositiveSpans
|
||||
i.buckets = h.PositiveBuckets
|
||||
} else {
|
||||
i.spans = h.NegativeSpans
|
||||
i.buckets = h.NegativeBuckets
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// reverseFloatbucketiterator is a low-level constructor for reverse bucket iterators.
|
||||
func newReverseFloatBucketIterator(
|
||||
spans []Span, buckets []float64, schema int32, positive bool,
|
||||
) *reverseFloatBucketIterator {
|
||||
r := &reverseFloatBucketIterator{
|
||||
baseBucketIterator: baseBucketIterator[float64, float64]{
|
||||
schema: schema,
|
||||
spans: spans,
|
||||
buckets: buckets,
|
||||
positive: positive,
|
||||
},
|
||||
}
|
||||
|
||||
r.spansIdx = len(r.spans) - 1
|
||||
r.bucketsIdx = len(r.buckets) - 1
|
||||
if r.spansIdx >= 0 {
|
||||
r.idxInSpan = int32(r.spans[r.spansIdx].Length) - 1
|
||||
}
|
||||
r.currIdx = 0
|
||||
for _, s := range r.spans {
|
||||
r.currIdx += s.Offset + int32(s.Length)
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
type floatBucketIterator struct {
|
||||
baseBucketIterator[float64, float64]
|
||||
|
||||
targetSchema int32 // targetSchema is the schema to merge to and must be ≤ schema.
|
||||
origIdx int32 // The bucket index within the original schema.
|
||||
absoluteStartValue float64 // Never return buckets with an upper bound ≤ this value.
|
||||
}
|
||||
|
||||
func (i *floatBucketIterator) Next() bool {
|
||||
if i.spansIdx >= len(i.spans) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Copy all of these into local variables so that we can forward to the
|
||||
// next bucket and then roll back if needed.
|
||||
origIdx, spansIdx, idxInSpan := i.origIdx, i.spansIdx, i.idxInSpan
|
||||
span := i.spans[spansIdx]
|
||||
firstPass := true
|
||||
i.currCount = 0
|
||||
|
||||
mergeLoop: // Merge together all buckets from the original schema that fall into one bucket in the targetSchema.
|
||||
for {
|
||||
if i.bucketsIdx == 0 {
|
||||
// Seed origIdx for the first bucket.
|
||||
origIdx = span.Offset
|
||||
} else {
|
||||
origIdx++
|
||||
}
|
||||
for idxInSpan >= span.Length {
|
||||
// We have exhausted the current span and have to find a new
|
||||
// one. We even handle pathologic spans of length 0 here.
|
||||
idxInSpan = 0
|
||||
spansIdx++
|
||||
if spansIdx >= len(i.spans) {
|
||||
if firstPass {
|
||||
return false
|
||||
}
|
||||
break mergeLoop
|
||||
}
|
||||
span = i.spans[spansIdx]
|
||||
origIdx += span.Offset
|
||||
}
|
||||
currIdx := i.targetIdx(origIdx)
|
||||
if firstPass {
|
||||
i.currIdx = currIdx
|
||||
firstPass = false
|
||||
} else if currIdx != i.currIdx {
|
||||
// Reached next bucket in targetSchema.
|
||||
// Do not actually forward to the next bucket, but break out.
|
||||
break mergeLoop
|
||||
}
|
||||
i.currCount += i.buckets[i.bucketsIdx]
|
||||
idxInSpan++
|
||||
i.bucketsIdx++
|
||||
i.origIdx, i.spansIdx, i.idxInSpan = origIdx, spansIdx, idxInSpan
|
||||
if i.schema == i.targetSchema {
|
||||
// Don't need to test the next bucket for mergeability
|
||||
// if we have no schema change anyway.
|
||||
break mergeLoop
|
||||
}
|
||||
}
|
||||
// Skip buckets before absoluteStartValue.
|
||||
// TODO(beorn7): Maybe do something more efficient than this recursive call.
|
||||
if getBound(i.currIdx, i.targetSchema) <= i.absoluteStartValue {
|
||||
return i.Next()
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// targetIdx returns the bucket index within i.targetSchema for the given bucket
|
||||
// index within i.schema.
|
||||
func (i *floatBucketIterator) targetIdx(idx int32) int32 {
|
||||
if i.schema == i.targetSchema {
|
||||
// Fast path for the common case. The below would yield the same
|
||||
// result, just with more effort.
|
||||
return idx
|
||||
}
|
||||
return ((idx - 1) >> (i.schema - i.targetSchema)) + 1
|
||||
}
|
||||
|
||||
type reverseFloatBucketIterator struct {
|
||||
baseBucketIterator[float64, float64]
|
||||
idxInSpan int32 // Changed from uint32 to allow negative values for exhaustion detection.
|
||||
}
|
||||
|
||||
func (i *reverseFloatBucketIterator) Next() bool {
|
||||
i.currIdx--
|
||||
if i.bucketsIdx < 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
for i.idxInSpan < 0 {
|
||||
// We have exhausted the current span and have to find a new
|
||||
// one. We'll even handle pathologic spans of length 0.
|
||||
i.spansIdx--
|
||||
i.idxInSpan = int32(i.spans[i.spansIdx].Length) - 1
|
||||
i.currIdx -= i.spans[i.spansIdx+1].Offset
|
||||
}
|
||||
|
||||
i.currCount = i.buckets[i.bucketsIdx]
|
||||
i.bucketsIdx--
|
||||
i.idxInSpan--
|
||||
return true
|
||||
}
|
||||
|
||||
type allFloatBucketIterator struct {
|
||||
h *FloatHistogram
|
||||
negIter, posIter BucketIterator[float64]
|
||||
// -1 means we are iterating negative buckets.
|
||||
// 0 means it is time for the zero bucket.
|
||||
// 1 means we are iterating positive buckets.
|
||||
// Anything else means iteration is over.
|
||||
state int8
|
||||
currBucket Bucket[float64]
|
||||
}
|
||||
|
||||
func (i *allFloatBucketIterator) Next() bool {
|
||||
switch i.state {
|
||||
case -1:
|
||||
if i.negIter.Next() {
|
||||
i.currBucket = i.negIter.At()
|
||||
if i.currBucket.Upper > -i.h.ZeroThreshold {
|
||||
i.currBucket.Upper = -i.h.ZeroThreshold
|
||||
}
|
||||
return true
|
||||
}
|
||||
i.state = 0
|
||||
return i.Next()
|
||||
case 0:
|
||||
i.state = 1
|
||||
if i.h.ZeroCount > 0 {
|
||||
i.currBucket = Bucket[float64]{
|
||||
Lower: -i.h.ZeroThreshold,
|
||||
Upper: i.h.ZeroThreshold,
|
||||
LowerInclusive: true,
|
||||
UpperInclusive: true,
|
||||
Count: i.h.ZeroCount,
|
||||
// Index is irrelevant for the zero bucket.
|
||||
}
|
||||
return true
|
||||
}
|
||||
return i.Next()
|
||||
case 1:
|
||||
if i.posIter.Next() {
|
||||
i.currBucket = i.posIter.At()
|
||||
if i.currBucket.Lower < i.h.ZeroThreshold {
|
||||
i.currBucket.Lower = i.h.ZeroThreshold
|
||||
}
|
||||
return true
|
||||
}
|
||||
i.state = 42
|
||||
return false
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (i *allFloatBucketIterator) At() Bucket[float64] {
|
||||
return i.currBucket
|
||||
}
|
536
vendor/github.com/prometheus/prometheus/model/histogram/generic.go
generated
vendored
Normal file
536
vendor/github.com/prometheus/prometheus/model/histogram/generic.go
generated
vendored
Normal file
|
@ -0,0 +1,536 @@
|
|||
// Copyright 2022 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package histogram
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// BucketCount is a type constraint for the count in a bucket, which can be
|
||||
// float64 (for type FloatHistogram) or uint64 (for type Histogram).
|
||||
type BucketCount interface {
|
||||
float64 | uint64
|
||||
}
|
||||
|
||||
// internalBucketCount is used internally by Histogram and FloatHistogram. The
|
||||
// difference to the BucketCount above is that Histogram internally uses deltas
|
||||
// between buckets rather than absolute counts (while FloatHistogram uses
|
||||
// absolute counts directly). Go type parameters don't allow type
|
||||
// specialization. Therefore, where special treatment of deltas between buckets
|
||||
// vs. absolute counts is important, this information has to be provided as a
|
||||
// separate boolean parameter "deltaBuckets"
|
||||
type internalBucketCount interface {
|
||||
float64 | int64
|
||||
}
|
||||
|
||||
// Bucket represents a bucket with lower and upper limit and the absolute count
|
||||
// of samples in the bucket. It also specifies if each limit is inclusive or
|
||||
// not. (Mathematically, inclusive limits create a closed interval, and
|
||||
// non-inclusive limits an open interval.)
|
||||
//
|
||||
// To represent cumulative buckets, Lower is set to -Inf, and the Count is then
|
||||
// cumulative (including the counts of all buckets for smaller values).
|
||||
type Bucket[BC BucketCount] struct {
|
||||
Lower, Upper float64
|
||||
LowerInclusive, UpperInclusive bool
|
||||
Count BC
|
||||
|
||||
// Index within schema. To easily compare buckets that share the same
|
||||
// schema and sign (positive or negative). Irrelevant for the zero bucket.
|
||||
Index int32
|
||||
}
|
||||
|
||||
// String returns a string representation of a Bucket, using the usual
|
||||
// mathematical notation of '['/']' for inclusive bounds and '('/')' for
|
||||
// non-inclusive bounds.
|
||||
func (b Bucket[BC]) String() string {
|
||||
var sb strings.Builder
|
||||
if b.LowerInclusive {
|
||||
sb.WriteRune('[')
|
||||
} else {
|
||||
sb.WriteRune('(')
|
||||
}
|
||||
fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper)
|
||||
if b.UpperInclusive {
|
||||
sb.WriteRune(']')
|
||||
} else {
|
||||
sb.WriteRune(')')
|
||||
}
|
||||
fmt.Fprintf(&sb, ":%v", b.Count)
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// BucketIterator iterates over the buckets of a Histogram, returning decoded
|
||||
// buckets.
|
||||
type BucketIterator[BC BucketCount] interface {
|
||||
// Next advances the iterator by one.
|
||||
Next() bool
|
||||
// At returns the current bucket.
|
||||
At() Bucket[BC]
|
||||
}
|
||||
|
||||
// baseBucketIterator provides a struct that is shared by most BucketIterator
|
||||
// implementations, together with an implementation of the At method. This
|
||||
// iterator can be embedded in full implementations of BucketIterator to save on
|
||||
// code replication.
|
||||
type baseBucketIterator[BC BucketCount, IBC internalBucketCount] struct {
|
||||
schema int32
|
||||
spans []Span
|
||||
buckets []IBC
|
||||
|
||||
positive bool // Whether this is for positive buckets.
|
||||
|
||||
spansIdx int // Current span within spans slice.
|
||||
idxInSpan uint32 // Index in the current span. 0 <= idxInSpan < span.Length.
|
||||
bucketsIdx int // Current bucket within buckets slice.
|
||||
|
||||
currCount IBC // Count in the current bucket.
|
||||
currIdx int32 // The actual bucket index.
|
||||
}
|
||||
|
||||
func (b baseBucketIterator[BC, IBC]) At() Bucket[BC] {
|
||||
bucket := Bucket[BC]{
|
||||
Count: BC(b.currCount),
|
||||
Index: b.currIdx,
|
||||
}
|
||||
if b.positive {
|
||||
bucket.Upper = getBound(b.currIdx, b.schema)
|
||||
bucket.Lower = getBound(b.currIdx-1, b.schema)
|
||||
} else {
|
||||
bucket.Lower = -getBound(b.currIdx, b.schema)
|
||||
bucket.Upper = -getBound(b.currIdx-1, b.schema)
|
||||
}
|
||||
bucket.LowerInclusive = bucket.Lower < 0
|
||||
bucket.UpperInclusive = bucket.Upper > 0
|
||||
return bucket
|
||||
}
|
||||
|
||||
// compactBuckets is a generic function used by both Histogram.Compact and
|
||||
// FloatHistogram.Compact. Set deltaBuckets to true if the provided buckets are
|
||||
// deltas. Set it to false if the buckets contain absolute counts.
|
||||
func compactBuckets[IBC internalBucketCount](buckets []IBC, spans []Span, maxEmptyBuckets int, deltaBuckets bool) ([]IBC, []Span) {
|
||||
// Fast path: If there are no empty buckets AND no offset in any span is
|
||||
// <= maxEmptyBuckets AND no span has length 0, there is nothing to do and we can return
|
||||
// immediately. We check that first because it's cheap and presumably
|
||||
// common.
|
||||
nothingToDo := true
|
||||
var currentBucketAbsolute IBC
|
||||
for _, bucket := range buckets {
|
||||
if deltaBuckets {
|
||||
currentBucketAbsolute += bucket
|
||||
} else {
|
||||
currentBucketAbsolute = bucket
|
||||
}
|
||||
if currentBucketAbsolute == 0 {
|
||||
nothingToDo = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if nothingToDo {
|
||||
for _, span := range spans {
|
||||
if int(span.Offset) <= maxEmptyBuckets || span.Length == 0 {
|
||||
nothingToDo = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if nothingToDo {
|
||||
return buckets, spans
|
||||
}
|
||||
}
|
||||
|
||||
var iBucket, iSpan int
|
||||
var posInSpan uint32
|
||||
currentBucketAbsolute = 0
|
||||
|
||||
// Helper function.
|
||||
emptyBucketsHere := func() int {
|
||||
i := 0
|
||||
abs := currentBucketAbsolute
|
||||
for uint32(i)+posInSpan < spans[iSpan].Length && abs == 0 {
|
||||
i++
|
||||
if i+iBucket >= len(buckets) {
|
||||
break
|
||||
}
|
||||
abs = buckets[i+iBucket]
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// Merge spans with zero-offset to avoid special cases later.
|
||||
if len(spans) > 1 {
|
||||
for i, span := range spans[1:] {
|
||||
if span.Offset == 0 {
|
||||
spans[iSpan].Length += span.Length
|
||||
continue
|
||||
}
|
||||
iSpan++
|
||||
if i+1 != iSpan {
|
||||
spans[iSpan] = span
|
||||
}
|
||||
}
|
||||
spans = spans[:iSpan+1]
|
||||
iSpan = 0
|
||||
}
|
||||
|
||||
// Merge spans with zero-length to avoid special cases later.
|
||||
for i, span := range spans {
|
||||
if span.Length == 0 {
|
||||
if i+1 < len(spans) {
|
||||
spans[i+1].Offset += span.Offset
|
||||
}
|
||||
continue
|
||||
}
|
||||
if i != iSpan {
|
||||
spans[iSpan] = span
|
||||
}
|
||||
iSpan++
|
||||
}
|
||||
spans = spans[:iSpan]
|
||||
iSpan = 0
|
||||
|
||||
// Cut out empty buckets from start and end of spans, no matter
|
||||
// what. Also cut out empty buckets from the middle of a span but only
|
||||
// if there are more than maxEmptyBuckets consecutive empty buckets.
|
||||
for iBucket < len(buckets) {
|
||||
if deltaBuckets {
|
||||
currentBucketAbsolute += buckets[iBucket]
|
||||
} else {
|
||||
currentBucketAbsolute = buckets[iBucket]
|
||||
}
|
||||
if nEmpty := emptyBucketsHere(); nEmpty > 0 {
|
||||
if posInSpan > 0 &&
|
||||
nEmpty < int(spans[iSpan].Length-posInSpan) &&
|
||||
nEmpty <= maxEmptyBuckets {
|
||||
// The empty buckets are in the middle of a
|
||||
// span, and there are few enough to not bother.
|
||||
// Just fast-forward.
|
||||
iBucket += nEmpty
|
||||
if deltaBuckets {
|
||||
currentBucketAbsolute = 0
|
||||
}
|
||||
posInSpan += uint32(nEmpty)
|
||||
continue
|
||||
}
|
||||
// In all other cases, we cut out the empty buckets.
|
||||
if deltaBuckets && iBucket+nEmpty < len(buckets) {
|
||||
currentBucketAbsolute = -buckets[iBucket]
|
||||
buckets[iBucket+nEmpty] += buckets[iBucket]
|
||||
}
|
||||
buckets = append(buckets[:iBucket], buckets[iBucket+nEmpty:]...)
|
||||
if posInSpan == 0 {
|
||||
// Start of span.
|
||||
if nEmpty == int(spans[iSpan].Length) {
|
||||
// The whole span is empty.
|
||||
offset := spans[iSpan].Offset
|
||||
spans = append(spans[:iSpan], spans[iSpan+1:]...)
|
||||
if len(spans) > iSpan {
|
||||
spans[iSpan].Offset += offset + int32(nEmpty)
|
||||
}
|
||||
continue
|
||||
}
|
||||
spans[iSpan].Length -= uint32(nEmpty)
|
||||
spans[iSpan].Offset += int32(nEmpty)
|
||||
continue
|
||||
}
|
||||
// It's in the middle or in the end of the span.
|
||||
// Split the current span.
|
||||
newSpan := Span{
|
||||
Offset: int32(nEmpty),
|
||||
Length: spans[iSpan].Length - posInSpan - uint32(nEmpty),
|
||||
}
|
||||
spans[iSpan].Length = posInSpan
|
||||
// In any case, we have to split to the next span.
|
||||
iSpan++
|
||||
posInSpan = 0
|
||||
if newSpan.Length == 0 {
|
||||
// The span is empty, so we were already at the end of a span.
|
||||
// We don't have to insert the new span, just adjust the next
|
||||
// span's offset, if there is one.
|
||||
if iSpan < len(spans) {
|
||||
spans[iSpan].Offset += int32(nEmpty)
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Insert the new span.
|
||||
spans = append(spans, Span{})
|
||||
if iSpan+1 < len(spans) {
|
||||
copy(spans[iSpan+1:], spans[iSpan:])
|
||||
}
|
||||
spans[iSpan] = newSpan
|
||||
continue
|
||||
}
|
||||
iBucket++
|
||||
posInSpan++
|
||||
if posInSpan >= spans[iSpan].Length {
|
||||
posInSpan = 0
|
||||
iSpan++
|
||||
}
|
||||
}
|
||||
if maxEmptyBuckets == 0 || len(buckets) == 0 {
|
||||
return buckets, spans
|
||||
}
|
||||
|
||||
// Finally, check if any offsets between spans are small enough to merge
|
||||
// the spans.
|
||||
iBucket = int(spans[0].Length)
|
||||
if deltaBuckets {
|
||||
currentBucketAbsolute = 0
|
||||
for _, bucket := range buckets[:iBucket] {
|
||||
currentBucketAbsolute += bucket
|
||||
}
|
||||
}
|
||||
iSpan = 1
|
||||
for iSpan < len(spans) {
|
||||
if int(spans[iSpan].Offset) > maxEmptyBuckets {
|
||||
l := int(spans[iSpan].Length)
|
||||
if deltaBuckets {
|
||||
for _, bucket := range buckets[iBucket : iBucket+l] {
|
||||
currentBucketAbsolute += bucket
|
||||
}
|
||||
}
|
||||
iBucket += l
|
||||
iSpan++
|
||||
continue
|
||||
}
|
||||
// Merge span with previous one and insert empty buckets.
|
||||
offset := int(spans[iSpan].Offset)
|
||||
spans[iSpan-1].Length += uint32(offset) + spans[iSpan].Length
|
||||
spans = append(spans[:iSpan], spans[iSpan+1:]...)
|
||||
newBuckets := make([]IBC, len(buckets)+offset)
|
||||
copy(newBuckets, buckets[:iBucket])
|
||||
copy(newBuckets[iBucket+offset:], buckets[iBucket:])
|
||||
if deltaBuckets {
|
||||
newBuckets[iBucket] = -currentBucketAbsolute
|
||||
newBuckets[iBucket+offset] += currentBucketAbsolute
|
||||
}
|
||||
iBucket += offset
|
||||
buckets = newBuckets
|
||||
currentBucketAbsolute = buckets[iBucket]
|
||||
// Note that with many merges, it would be more efficient to
|
||||
// first record all the chunks of empty buckets to insert and
|
||||
// then do it in one go through all the buckets.
|
||||
}
|
||||
|
||||
return buckets, spans
|
||||
}
|
||||
|
||||
func getBound(idx, schema int32) float64 {
|
||||
// Here a bit of context about the behavior for the last bucket counting
|
||||
// regular numbers (called simply "last bucket" below) and the bucket
|
||||
// counting observations of ±Inf (called "inf bucket" below, with an idx
|
||||
// one higher than that of the "last bucket"):
|
||||
//
|
||||
// If we apply the usual formula to the last bucket, its upper bound
|
||||
// would be calculated as +Inf. The reason is that the max possible
|
||||
// regular float64 number (math.MaxFloat64) doesn't coincide with one of
|
||||
// the calculated bucket boundaries. So the calculated boundary has to
|
||||
// be larger than math.MaxFloat64, and the only float64 larger than
|
||||
// math.MaxFloat64 is +Inf. However, we want to count actual
|
||||
// observations of ±Inf in the inf bucket. Therefore, we have to treat
|
||||
// the upper bound of the last bucket specially and set it to
|
||||
// math.MaxFloat64. (The upper bound of the inf bucket, with its idx
|
||||
// being one higher than that of the last bucket, naturally comes out as
|
||||
// +Inf by the usual formula. So that's fine.)
|
||||
//
|
||||
// math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of
|
||||
// 1024. If there were a float64 number following math.MaxFloat64, it
|
||||
// would have a frac of 1.0 and an exp of 1024, or equivalently a frac
|
||||
// of 0.5 and an exp of 1025. However, since frac must be smaller than
|
||||
// 1, and exp must be smaller than 1025, either representation overflows
|
||||
// a float64. (Which, in turn, is the reason that math.MaxFloat64 is the
|
||||
// largest possible float64. Q.E.D.) However, the formula for
|
||||
// calculating the upper bound from the idx and schema of the last
|
||||
// bucket results in precisely that. It is either frac=1.0 & exp=1024
|
||||
// (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is,
|
||||
// by the way, a power of two where the exponent itself is a power of
|
||||
// two, 2¹⁰ in fact, which coinicides with a bucket boundary in all
|
||||
// schemas.) So these are the special cases we have to catch below.
|
||||
if schema < 0 {
|
||||
exp := int(idx) << -schema
|
||||
if exp == 1024 {
|
||||
// This is the last bucket before the overflow bucket
|
||||
// (for ±Inf observations). Return math.MaxFloat64 as
|
||||
// explained above.
|
||||
return math.MaxFloat64
|
||||
}
|
||||
return math.Ldexp(1, exp)
|
||||
}
|
||||
|
||||
fracIdx := idx & ((1 << schema) - 1)
|
||||
frac := exponentialBounds[schema][fracIdx]
|
||||
exp := (int(idx) >> schema) + 1
|
||||
if frac == 0.5 && exp == 1025 {
|
||||
// This is the last bucket before the overflow bucket (for ±Inf
|
||||
// observations). Return math.MaxFloat64 as explained above.
|
||||
return math.MaxFloat64
|
||||
}
|
||||
return math.Ldexp(frac, exp)
|
||||
}
|
||||
|
||||
// exponentialBounds is a precalculated table of bucket bounds in the interval
|
||||
// [0.5,1) in schema 0 to 8.
|
||||
var exponentialBounds = [][]float64{
|
||||
// Schema "0":
|
||||
{0.5},
|
||||
// Schema 1:
|
||||
{0.5, 0.7071067811865475},
|
||||
// Schema 2:
|
||||
{0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144},
|
||||
// Schema 3:
|
||||
{
|
||||
0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048,
|
||||
0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711,
|
||||
},
|
||||
// Schema 4:
|
||||
{
|
||||
0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458,
|
||||
0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463,
|
||||
0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627,
|
||||
0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735,
|
||||
},
|
||||
// Schema 5:
|
||||
{
|
||||
0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117,
|
||||
0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887,
|
||||
0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666,
|
||||
0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159,
|
||||
0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112,
|
||||
0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823,
|
||||
0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533,
|
||||
0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999,
|
||||
},
|
||||
// Schema 6:
|
||||
{
|
||||
0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142,
|
||||
0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598,
|
||||
0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209,
|
||||
0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406,
|
||||
0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349,
|
||||
0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891,
|
||||
0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515,
|
||||
0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555,
|
||||
0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234,
|
||||
0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269,
|
||||
0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334,
|
||||
0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681,
|
||||
0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529,
|
||||
0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991,
|
||||
0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827,
|
||||
0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752,
|
||||
},
|
||||
// Schema 7:
|
||||
{
|
||||
0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764,
|
||||
0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894,
|
||||
0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309,
|
||||
0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545,
|
||||
0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393,
|
||||
0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595,
|
||||
0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754,
|
||||
0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704,
|
||||
0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907,
|
||||
0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665,
|
||||
0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253,
|
||||
0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329,
|
||||
0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032,
|
||||
0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728,
|
||||
0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265,
|
||||
0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076,
|
||||
0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491,
|
||||
0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908,
|
||||
0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126,
|
||||
0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777,
|
||||
0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764,
|
||||
0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465,
|
||||
0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821,
|
||||
0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981,
|
||||
0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312,
|
||||
0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842,
|
||||
0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671,
|
||||
0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263,
|
||||
0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943,
|
||||
0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368,
|
||||
0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164,
|
||||
0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328,
|
||||
},
|
||||
// Schema 8:
|
||||
{
|
||||
0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088,
|
||||
0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869,
|
||||
0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205,
|
||||
0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158,
|
||||
0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313,
|
||||
0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321,
|
||||
0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954,
|
||||
0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847,
|
||||
0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111,
|
||||
0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088,
|
||||
0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098,
|
||||
0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026,
|
||||
0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894,
|
||||
0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493,
|
||||
0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185,
|
||||
0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968,
|
||||
0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903,
|
||||
0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005,
|
||||
0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725,
|
||||
0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082,
|
||||
0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581,
|
||||
0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031,
|
||||
0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346,
|
||||
0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447,
|
||||
0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385,
|
||||
0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788,
|
||||
0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727,
|
||||
0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171,
|
||||
0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058,
|
||||
0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119,
|
||||
0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999,
|
||||
0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352,
|
||||
0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471,
|
||||
0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126,
|
||||
0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218,
|
||||
0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837,
|
||||
0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984,
|
||||
0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031,
|
||||
0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071,
|
||||
0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282,
|
||||
0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442,
|
||||
0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707,
|
||||
0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818,
|
||||
0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853,
|
||||
0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642,
|
||||
0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003,
|
||||
0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079,
|
||||
0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391,
|
||||
0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661,
|
||||
0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629,
|
||||
0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553,
|
||||
0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389,
|
||||
0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771,
|
||||
0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002,
|
||||
0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155,
|
||||
0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483,
|
||||
0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253,
|
||||
0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191,
|
||||
0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693,
|
||||
0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947,
|
||||
0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133,
|
||||
0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889,
|
||||
0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168,
|
||||
0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698,
|
||||
},
|
||||
}
|
448
vendor/github.com/prometheus/prometheus/model/histogram/histogram.go
generated
vendored
Normal file
448
vendor/github.com/prometheus/prometheus/model/histogram/histogram.go
generated
vendored
Normal file
|
@ -0,0 +1,448 @@
|
|||
// Copyright 2021 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package histogram
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Histogram encodes a sparse, high-resolution histogram. See the design
|
||||
// document for full details:
|
||||
// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit#
|
||||
//
|
||||
// The most tricky bit is how bucket indices represent real bucket boundaries.
|
||||
// An example for schema 0 (by which each bucket is twice as wide as the
|
||||
// previous bucket):
|
||||
//
|
||||
// Bucket boundaries → [-2,-1) [-1,-0.5) [-0.5,-0.25) ... [-0.001,0.001] ... (0.25,0.5] (0.5,1] (1,2] ....
|
||||
// ↑ ↑ ↑ ↑ ↑ ↑ ↑
|
||||
// Zero bucket (width e.g. 0.001) → | | | ZB | | |
|
||||
// Positive bucket indices → | | | ... -1 0 1 2 3
|
||||
// Negative bucket indices → 3 2 1 0 -1 ...
|
||||
//
|
||||
// Which bucket indices are actually used is determined by the spans.
|
||||
type Histogram struct {
|
||||
// Currently valid schema numbers are -4 <= n <= 8. They are all for
|
||||
// base-2 bucket schemas, where 1 is a bucket boundary in each case, and
|
||||
// then each power of two is divided into 2^n logarithmic buckets. Or
|
||||
// in other words, each bucket boundary is the previous boundary times
|
||||
// 2^(2^-n).
|
||||
Schema int32
|
||||
// Width of the zero bucket.
|
||||
ZeroThreshold float64
|
||||
// Observations falling into the zero bucket.
|
||||
ZeroCount uint64
|
||||
// Total number of observations.
|
||||
Count uint64
|
||||
// Sum of observations. This is also used as the stale marker.
|
||||
Sum float64
|
||||
// Spans for positive and negative buckets (see Span below).
|
||||
PositiveSpans, NegativeSpans []Span
|
||||
// Observation counts in buckets. The first element is an absolute
|
||||
// count. All following ones are deltas relative to the previous
|
||||
// element.
|
||||
PositiveBuckets, NegativeBuckets []int64
|
||||
}
|
||||
|
||||
// A Span defines a continuous sequence of buckets.
|
||||
type Span struct {
|
||||
// Gap to previous span (always positive), or starting index for the 1st
|
||||
// span (which can be negative).
|
||||
Offset int32
|
||||
// Length of the span.
|
||||
Length uint32
|
||||
}
|
||||
|
||||
// Copy returns a deep copy of the Histogram.
|
||||
func (h *Histogram) Copy() *Histogram {
|
||||
c := *h
|
||||
|
||||
if len(h.PositiveSpans) != 0 {
|
||||
c.PositiveSpans = make([]Span, len(h.PositiveSpans))
|
||||
copy(c.PositiveSpans, h.PositiveSpans)
|
||||
}
|
||||
if len(h.NegativeSpans) != 0 {
|
||||
c.NegativeSpans = make([]Span, len(h.NegativeSpans))
|
||||
copy(c.NegativeSpans, h.NegativeSpans)
|
||||
}
|
||||
if len(h.PositiveBuckets) != 0 {
|
||||
c.PositiveBuckets = make([]int64, len(h.PositiveBuckets))
|
||||
copy(c.PositiveBuckets, h.PositiveBuckets)
|
||||
}
|
||||
if len(h.NegativeBuckets) != 0 {
|
||||
c.NegativeBuckets = make([]int64, len(h.NegativeBuckets))
|
||||
copy(c.NegativeBuckets, h.NegativeBuckets)
|
||||
}
|
||||
|
||||
return &c
|
||||
}
|
||||
|
||||
// String returns a string representation of the Histogram.
|
||||
func (h *Histogram) String() string {
|
||||
var sb strings.Builder
|
||||
fmt.Fprintf(&sb, "{count:%d, sum:%g", h.Count, h.Sum)
|
||||
|
||||
var nBuckets []Bucket[uint64]
|
||||
for it := h.NegativeBucketIterator(); it.Next(); {
|
||||
bucket := it.At()
|
||||
if bucket.Count != 0 {
|
||||
nBuckets = append(nBuckets, it.At())
|
||||
}
|
||||
}
|
||||
for i := len(nBuckets) - 1; i >= 0; i-- {
|
||||
fmt.Fprintf(&sb, ", %s", nBuckets[i].String())
|
||||
}
|
||||
|
||||
if h.ZeroCount != 0 {
|
||||
fmt.Fprintf(&sb, ", %s", h.ZeroBucket().String())
|
||||
}
|
||||
|
||||
for it := h.PositiveBucketIterator(); it.Next(); {
|
||||
bucket := it.At()
|
||||
if bucket.Count != 0 {
|
||||
fmt.Fprintf(&sb, ", %s", bucket.String())
|
||||
}
|
||||
}
|
||||
|
||||
sb.WriteRune('}')
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// ZeroBucket returns the zero bucket.
|
||||
func (h *Histogram) ZeroBucket() Bucket[uint64] {
|
||||
return Bucket[uint64]{
|
||||
Lower: -h.ZeroThreshold,
|
||||
Upper: h.ZeroThreshold,
|
||||
LowerInclusive: true,
|
||||
UpperInclusive: true,
|
||||
Count: h.ZeroCount,
|
||||
}
|
||||
}
|
||||
|
||||
// PositiveBucketIterator returns a BucketIterator to iterate over all positive
|
||||
// buckets in ascending order (starting next to the zero bucket and going up).
|
||||
func (h *Histogram) PositiveBucketIterator() BucketIterator[uint64] {
|
||||
return newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true)
|
||||
}
|
||||
|
||||
// NegativeBucketIterator returns a BucketIterator to iterate over all negative
|
||||
// buckets in descending order (starting next to the zero bucket and going down).
|
||||
func (h *Histogram) NegativeBucketIterator() BucketIterator[uint64] {
|
||||
return newRegularBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false)
|
||||
}
|
||||
|
||||
// CumulativeBucketIterator returns a BucketIterator to iterate over a
|
||||
// cumulative view of the buckets. This method currently only supports
|
||||
// Histograms without negative buckets and panics if the Histogram has negative
|
||||
// buckets. It is currently only used for testing.
|
||||
func (h *Histogram) CumulativeBucketIterator() BucketIterator[uint64] {
|
||||
if len(h.NegativeBuckets) > 0 {
|
||||
panic("CumulativeBucketIterator called on Histogram with negative buckets")
|
||||
}
|
||||
return &cumulativeBucketIterator{h: h, posSpansIdx: -1}
|
||||
}
|
||||
|
||||
// Equals returns true if the given histogram matches exactly.
|
||||
// Exact match is when there are no new buckets (even empty) and no missing buckets,
|
||||
// and all the bucket values match. Spans can have different empty length spans in between,
|
||||
// but they must represent the same bucket layout to match.
|
||||
func (h *Histogram) Equals(h2 *Histogram) bool {
|
||||
if h2 == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold ||
|
||||
h.ZeroCount != h2.ZeroCount || h.Count != h2.Count || h.Sum != h2.Sum {
|
||||
return false
|
||||
}
|
||||
|
||||
if !spansMatch(h.PositiveSpans, h2.PositiveSpans) {
|
||||
return false
|
||||
}
|
||||
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !bucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) {
|
||||
return false
|
||||
}
|
||||
if !bucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// spansMatch returns true if both spans represent the same bucket layout
|
||||
// after combining zero length spans with the next non-zero length span.
|
||||
func spansMatch(s1, s2 []Span) bool {
|
||||
if len(s1) == 0 && len(s2) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
s1idx, s2idx := 0, 0
|
||||
for {
|
||||
if s1idx >= len(s1) {
|
||||
return allEmptySpans(s2[s2idx:])
|
||||
}
|
||||
if s2idx >= len(s2) {
|
||||
return allEmptySpans(s1[s1idx:])
|
||||
}
|
||||
|
||||
currS1, currS2 := s1[s1idx], s2[s2idx]
|
||||
s1idx++
|
||||
s2idx++
|
||||
if currS1.Length == 0 {
|
||||
// This span is zero length, so we add consecutive such spans
|
||||
// until we find a non-zero span.
|
||||
for ; s1idx < len(s1) && s1[s1idx].Length == 0; s1idx++ {
|
||||
currS1.Offset += s1[s1idx].Offset
|
||||
}
|
||||
if s1idx < len(s1) {
|
||||
currS1.Offset += s1[s1idx].Offset
|
||||
currS1.Length = s1[s1idx].Length
|
||||
s1idx++
|
||||
}
|
||||
}
|
||||
if currS2.Length == 0 {
|
||||
// This span is zero length, so we add consecutive such spans
|
||||
// until we find a non-zero span.
|
||||
for ; s2idx < len(s2) && s2[s2idx].Length == 0; s2idx++ {
|
||||
currS2.Offset += s2[s2idx].Offset
|
||||
}
|
||||
if s2idx < len(s2) {
|
||||
currS2.Offset += s2[s2idx].Offset
|
||||
currS2.Length = s2[s2idx].Length
|
||||
s2idx++
|
||||
}
|
||||
}
|
||||
|
||||
if currS1.Length == 0 && currS2.Length == 0 {
|
||||
// The last spans of both set are zero length. Previous spans match.
|
||||
return true
|
||||
}
|
||||
|
||||
if currS1.Offset != currS2.Offset || currS1.Length != currS2.Length {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func allEmptySpans(s []Span) bool {
|
||||
for _, ss := range s {
|
||||
if ss.Length > 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func bucketsMatch(b1, b2 []int64) bool {
|
||||
if len(b1) != len(b2) {
|
||||
return false
|
||||
}
|
||||
for i, b := range b1 {
|
||||
if b != b2[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Compact works like FloatHistogram.Compact. See there for detailed
|
||||
// explanations.
|
||||
func (h *Histogram) Compact(maxEmptyBuckets int) *Histogram {
|
||||
h.PositiveBuckets, h.PositiveSpans = compactBuckets(
|
||||
h.PositiveBuckets, h.PositiveSpans, maxEmptyBuckets, true,
|
||||
)
|
||||
h.NegativeBuckets, h.NegativeSpans = compactBuckets(
|
||||
h.NegativeBuckets, h.NegativeSpans, maxEmptyBuckets, true,
|
||||
)
|
||||
return h
|
||||
}
|
||||
|
||||
// ToFloat returns a FloatHistogram representation of the Histogram. It is a
|
||||
// deep copy (e.g. spans are not shared).
|
||||
func (h *Histogram) ToFloat() *FloatHistogram {
|
||||
var (
|
||||
positiveSpans, negativeSpans []Span
|
||||
positiveBuckets, negativeBuckets []float64
|
||||
)
|
||||
if len(h.PositiveSpans) != 0 {
|
||||
positiveSpans = make([]Span, len(h.PositiveSpans))
|
||||
copy(positiveSpans, h.PositiveSpans)
|
||||
}
|
||||
if len(h.NegativeSpans) != 0 {
|
||||
negativeSpans = make([]Span, len(h.NegativeSpans))
|
||||
copy(negativeSpans, h.NegativeSpans)
|
||||
}
|
||||
if len(h.PositiveBuckets) != 0 {
|
||||
positiveBuckets = make([]float64, len(h.PositiveBuckets))
|
||||
var current float64
|
||||
for i, b := range h.PositiveBuckets {
|
||||
current += float64(b)
|
||||
positiveBuckets[i] = current
|
||||
}
|
||||
}
|
||||
if len(h.NegativeBuckets) != 0 {
|
||||
negativeBuckets = make([]float64, len(h.NegativeBuckets))
|
||||
var current float64
|
||||
for i, b := range h.NegativeBuckets {
|
||||
current += float64(b)
|
||||
negativeBuckets[i] = current
|
||||
}
|
||||
}
|
||||
|
||||
return &FloatHistogram{
|
||||
Schema: h.Schema,
|
||||
ZeroThreshold: h.ZeroThreshold,
|
||||
ZeroCount: float64(h.ZeroCount),
|
||||
Count: float64(h.Count),
|
||||
Sum: h.Sum,
|
||||
PositiveSpans: positiveSpans,
|
||||
NegativeSpans: negativeSpans,
|
||||
PositiveBuckets: positiveBuckets,
|
||||
NegativeBuckets: negativeBuckets,
|
||||
}
|
||||
}
|
||||
|
||||
type regularBucketIterator struct {
|
||||
baseBucketIterator[uint64, int64]
|
||||
}
|
||||
|
||||
func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool) *regularBucketIterator {
|
||||
i := baseBucketIterator[uint64, int64]{
|
||||
schema: schema,
|
||||
spans: spans,
|
||||
buckets: buckets,
|
||||
positive: positive,
|
||||
}
|
||||
return ®ularBucketIterator{i}
|
||||
}
|
||||
|
||||
func (r *regularBucketIterator) Next() bool {
|
||||
if r.spansIdx >= len(r.spans) {
|
||||
return false
|
||||
}
|
||||
span := r.spans[r.spansIdx]
|
||||
// Seed currIdx for the first bucket.
|
||||
if r.bucketsIdx == 0 {
|
||||
r.currIdx = span.Offset
|
||||
} else {
|
||||
r.currIdx++
|
||||
}
|
||||
for r.idxInSpan >= span.Length {
|
||||
// We have exhausted the current span and have to find a new
|
||||
// one. We'll even handle pathologic spans of length 0.
|
||||
r.idxInSpan = 0
|
||||
r.spansIdx++
|
||||
if r.spansIdx >= len(r.spans) {
|
||||
return false
|
||||
}
|
||||
span = r.spans[r.spansIdx]
|
||||
r.currIdx += span.Offset
|
||||
}
|
||||
|
||||
r.currCount += r.buckets[r.bucketsIdx]
|
||||
r.idxInSpan++
|
||||
r.bucketsIdx++
|
||||
return true
|
||||
}
|
||||
|
||||
type cumulativeBucketIterator struct {
|
||||
h *Histogram
|
||||
|
||||
posSpansIdx int // Index in h.PositiveSpans we are in. -1 means 0 bucket.
|
||||
posBucketsIdx int // Index in h.PositiveBuckets.
|
||||
idxInSpan uint32 // Index in the current span. 0 <= idxInSpan < span.Length.
|
||||
|
||||
initialized bool
|
||||
currIdx int32 // The actual bucket index after decoding from spans.
|
||||
currUpper float64 // The upper boundary of the current bucket.
|
||||
currCount int64 // Current non-cumulative count for the current bucket. Does not apply for empty bucket.
|
||||
currCumulativeCount uint64 // Current "cumulative" count for the current bucket.
|
||||
|
||||
// Between 2 spans there could be some empty buckets which
|
||||
// still needs to be counted for cumulative buckets.
|
||||
// When we hit the end of a span, we use this to iterate
|
||||
// through the empty buckets.
|
||||
emptyBucketCount int32
|
||||
}
|
||||
|
||||
func (c *cumulativeBucketIterator) Next() bool {
|
||||
if c.posSpansIdx == -1 {
|
||||
// Zero bucket.
|
||||
c.posSpansIdx++
|
||||
if c.h.ZeroCount == 0 {
|
||||
return c.Next()
|
||||
}
|
||||
|
||||
c.currUpper = c.h.ZeroThreshold
|
||||
c.currCount = int64(c.h.ZeroCount)
|
||||
c.currCumulativeCount = uint64(c.currCount)
|
||||
return true
|
||||
}
|
||||
|
||||
if c.posSpansIdx >= len(c.h.PositiveSpans) {
|
||||
return false
|
||||
}
|
||||
|
||||
if c.emptyBucketCount > 0 {
|
||||
// We are traversing through empty buckets at the moment.
|
||||
c.currUpper = getBound(c.currIdx, c.h.Schema)
|
||||
c.currIdx++
|
||||
c.emptyBucketCount--
|
||||
return true
|
||||
}
|
||||
|
||||
span := c.h.PositiveSpans[c.posSpansIdx]
|
||||
if c.posSpansIdx == 0 && !c.initialized {
|
||||
// Initializing.
|
||||
c.currIdx = span.Offset
|
||||
// The first bucket is an absolute value and not a delta with Zero bucket.
|
||||
c.currCount = 0
|
||||
c.initialized = true
|
||||
}
|
||||
|
||||
c.currCount += c.h.PositiveBuckets[c.posBucketsIdx]
|
||||
c.currCumulativeCount += uint64(c.currCount)
|
||||
c.currUpper = getBound(c.currIdx, c.h.Schema)
|
||||
|
||||
c.posBucketsIdx++
|
||||
c.idxInSpan++
|
||||
c.currIdx++
|
||||
if c.idxInSpan >= span.Length {
|
||||
// Move to the next span. This one is done.
|
||||
c.posSpansIdx++
|
||||
c.idxInSpan = 0
|
||||
if c.posSpansIdx < len(c.h.PositiveSpans) {
|
||||
c.emptyBucketCount = c.h.PositiveSpans[c.posSpansIdx].Offset
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *cumulativeBucketIterator) At() Bucket[uint64] {
|
||||
return Bucket[uint64]{
|
||||
Upper: c.currUpper,
|
||||
Lower: math.Inf(-1),
|
||||
UpperInclusive: true,
|
||||
LowerInclusive: true,
|
||||
Count: c.currCumulativeCount,
|
||||
Index: c.currIdx - 1,
|
||||
}
|
||||
}
|
37
vendor/github.com/prometheus/prometheus/model/textparse/interface.go
generated
vendored
37
vendor/github.com/prometheus/prometheus/model/textparse/interface.go
generated
vendored
|
@ -17,16 +17,23 @@ import (
|
|||
"mime"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
)
|
||||
|
||||
// Parser parses samples from a byte slice of samples in the official
|
||||
// Prometheus and OpenMetrics text exposition formats.
|
||||
type Parser interface {
|
||||
// Series returns the bytes of the series, the timestamp if set, and the value
|
||||
// of the current sample.
|
||||
// Series returns the bytes of a series with a simple float64 as a
|
||||
// value, the timestamp if set, and the value of the current sample.
|
||||
Series() ([]byte, *int64, float64)
|
||||
|
||||
// Histogram returns the bytes of a series with a sparse histogram as a
|
||||
// value, the timestamp if set, and the histogram in the current sample.
|
||||
// Depending on the parsed input, the function returns an (integer) Histogram
|
||||
// or a FloatHistogram, with the respective other return value being nil.
|
||||
Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram)
|
||||
|
||||
// Help returns the metric name and help text in the current entry.
|
||||
// Must only be called after Next returned a help entry.
|
||||
// The returned byte slices become invalid after the next call to Next.
|
||||
|
@ -70,22 +77,30 @@ func New(b []byte, contentType string) (Parser, error) {
|
|||
}
|
||||
|
||||
mediaType, _, err := mime.ParseMediaType(contentType)
|
||||
if err == nil && mediaType == "application/openmetrics-text" {
|
||||
return NewOpenMetricsParser(b), nil
|
||||
if err != nil {
|
||||
return NewPromParser(b), err
|
||||
}
|
||||
switch mediaType {
|
||||
case "application/openmetrics-text":
|
||||
return NewOpenMetricsParser(b), nil
|
||||
case "application/vnd.google.protobuf":
|
||||
return NewProtobufParser(b), nil
|
||||
default:
|
||||
return NewPromParser(b), nil
|
||||
}
|
||||
return NewPromParser(b), err
|
||||
}
|
||||
|
||||
// Entry represents the type of a parsed entry.
|
||||
type Entry int
|
||||
|
||||
const (
|
||||
EntryInvalid Entry = -1
|
||||
EntryType Entry = 0
|
||||
EntryHelp Entry = 1
|
||||
EntrySeries Entry = 2
|
||||
EntryComment Entry = 3
|
||||
EntryUnit Entry = 4
|
||||
EntryInvalid Entry = -1
|
||||
EntryType Entry = 0
|
||||
EntryHelp Entry = 1
|
||||
EntrySeries Entry = 2 // A series with a simple float64 as value.
|
||||
EntryComment Entry = 3
|
||||
EntryUnit Entry = 4
|
||||
EntryHistogram Entry = 5 // A series with a sparse histogram as a value.
|
||||
)
|
||||
|
||||
// MetricType represents metric type values.
|
||||
|
|
7
vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go
generated
vendored
7
vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go
generated
vendored
|
@ -27,6 +27,7 @@ import (
|
|||
"unicode/utf8"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/value"
|
||||
)
|
||||
|
@ -112,6 +113,12 @@ func (p *OpenMetricsParser) Series() ([]byte, *int64, float64) {
|
|||
return p.series, nil, p.val
|
||||
}
|
||||
|
||||
// Histogram always returns (nil, nil, nil, nil) because OpenMetrics does not support
|
||||
// sparse histograms.
|
||||
func (p *OpenMetricsParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) {
|
||||
return nil, nil, nil, nil
|
||||
}
|
||||
|
||||
// Help returns the metric name and help text in the current entry.
|
||||
// Must only be called after Next returned a help entry.
|
||||
// The returned byte slices become invalid after the next call to Next.
|
||||
|
|
7
vendor/github.com/prometheus/prometheus/model/textparse/promparse.go
generated
vendored
7
vendor/github.com/prometheus/prometheus/model/textparse/promparse.go
generated
vendored
|
@ -28,6 +28,7 @@ import (
|
|||
"unsafe"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/value"
|
||||
)
|
||||
|
@ -167,6 +168,12 @@ func (p *PromParser) Series() ([]byte, *int64, float64) {
|
|||
return p.series, nil, p.val
|
||||
}
|
||||
|
||||
// Histogram always returns (nil, nil, nil, nil) because the Prometheus text format
|
||||
// does not support sparse histograms.
|
||||
func (p *PromParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) {
|
||||
return nil, nil, nil, nil
|
||||
}
|
||||
|
||||
// Help returns the metric name and help text in the current entry.
|
||||
// Must only be called after Next returned a help entry.
|
||||
// The returned byte slices become invalid after the next call to Next.
|
||||
|
|
518
vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go
generated
vendored
Normal file
518
vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go
generated
vendored
Normal file
|
@ -0,0 +1,518 @@
|
|||
// Copyright 2021 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package textparse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
|
||||
dto "github.com/prometheus/prometheus/prompb/io/prometheus/client"
|
||||
)
|
||||
|
||||
// ProtobufParser is a very inefficient way of unmarshaling the old Prometheus
|
||||
// protobuf format and then present it as it if were parsed by a
|
||||
// Prometheus-2-style text parser. This is only done so that we can easily plug
|
||||
// in the protobuf format into Prometheus 2. For future use (with the final
|
||||
// format that will be used for native histograms), we have to revisit the
|
||||
// parsing. A lot of the efficiency tricks of the Prometheus-2-style parsing
|
||||
// could be used in a similar fashion (byte-slice pointers into the raw
|
||||
// payload), which requires some hand-coded protobuf handling. But the current
|
||||
// parsers all expect the full series name (metric name plus label pairs) as one
|
||||
// string, which is not how things are represented in the protobuf format. If
|
||||
// the re-arrangement work is actually causing problems (which has to be seen),
|
||||
// that expectation needs to be changed.
|
||||
type ProtobufParser struct {
|
||||
in []byte // The intput to parse.
|
||||
inPos int // Position within the input.
|
||||
metricPos int // Position within Metric slice.
|
||||
// fieldPos is the position within a Summary or (legacy) Histogram. -2
|
||||
// is the count. -1 is the sum. Otherwise it is the index within
|
||||
// quantiles/buckets.
|
||||
fieldPos int
|
||||
fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed.
|
||||
// state is marked by the entry we are processing. EntryInvalid implies
|
||||
// that we have to decode the next MetricFamily.
|
||||
state Entry
|
||||
|
||||
mf *dto.MetricFamily
|
||||
|
||||
// The following are just shenanigans to satisfy the Parser interface.
|
||||
metricBytes *bytes.Buffer // A somewhat fluid representation of the current metric.
|
||||
}
|
||||
|
||||
// NewProtobufParser returns a parser for the payload in the byte slice.
|
||||
func NewProtobufParser(b []byte) Parser {
|
||||
return &ProtobufParser{
|
||||
in: b,
|
||||
state: EntryInvalid,
|
||||
mf: &dto.MetricFamily{},
|
||||
metricBytes: &bytes.Buffer{},
|
||||
}
|
||||
}
|
||||
|
||||
// Series returns the bytes of a series with a simple float64 as a
|
||||
// value, the timestamp if set, and the value of the current sample.
|
||||
func (p *ProtobufParser) Series() ([]byte, *int64, float64) {
|
||||
var (
|
||||
m = p.mf.GetMetric()[p.metricPos]
|
||||
ts = m.GetTimestampMs()
|
||||
v float64
|
||||
)
|
||||
switch p.mf.GetType() {
|
||||
case dto.MetricType_COUNTER:
|
||||
v = m.GetCounter().GetValue()
|
||||
case dto.MetricType_GAUGE:
|
||||
v = m.GetGauge().GetValue()
|
||||
case dto.MetricType_UNTYPED:
|
||||
v = m.GetUntyped().GetValue()
|
||||
case dto.MetricType_SUMMARY:
|
||||
s := m.GetSummary()
|
||||
switch p.fieldPos {
|
||||
case -2:
|
||||
v = float64(s.GetSampleCount())
|
||||
case -1:
|
||||
v = s.GetSampleSum()
|
||||
// Need to detect a summaries without quantile here.
|
||||
if len(s.GetQuantile()) == 0 {
|
||||
p.fieldsDone = true
|
||||
}
|
||||
default:
|
||||
v = s.GetQuantile()[p.fieldPos].GetValue()
|
||||
}
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
// This should only happen for a legacy histogram.
|
||||
h := m.GetHistogram()
|
||||
switch p.fieldPos {
|
||||
case -2:
|
||||
v = float64(h.GetSampleCount())
|
||||
case -1:
|
||||
v = h.GetSampleSum()
|
||||
default:
|
||||
bb := h.GetBucket()
|
||||
if p.fieldPos >= len(bb) {
|
||||
v = float64(h.GetSampleCount())
|
||||
} else {
|
||||
v = float64(bb[p.fieldPos].GetCumulativeCount())
|
||||
}
|
||||
}
|
||||
default:
|
||||
panic("encountered unexpected metric type, this is a bug")
|
||||
}
|
||||
if ts != 0 {
|
||||
return p.metricBytes.Bytes(), &ts, v
|
||||
}
|
||||
// Nasty hack: Assume that ts==0 means no timestamp. That's not true in
|
||||
// general, but proto3 has no distinction between unset and
|
||||
// default. Need to avoid in the final format.
|
||||
return p.metricBytes.Bytes(), nil, v
|
||||
}
|
||||
|
||||
// Histogram returns the bytes of a series with a native histogram as a value,
|
||||
// the timestamp if set, and the native histogram in the current sample.
|
||||
//
|
||||
// The Compact method is called before returning the Histogram (or FloatHistogram).
|
||||
//
|
||||
// If the SampleCountFloat or the ZeroCountFloat in the proto message is > 0,
|
||||
// the histogram is parsed and returned as a FloatHistogram and nil is returned
|
||||
// as the (integer) Histogram return value. Otherwise, it is parsed and returned
|
||||
// as an (integer) Histogram and nil is returned as the FloatHistogram return
|
||||
// value.
|
||||
func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) {
|
||||
var (
|
||||
m = p.mf.GetMetric()[p.metricPos]
|
||||
ts = m.GetTimestampMs()
|
||||
h = m.GetHistogram()
|
||||
)
|
||||
if h.GetSampleCountFloat() > 0 || h.GetZeroCountFloat() > 0 {
|
||||
// It is a float histogram.
|
||||
fh := histogram.FloatHistogram{
|
||||
Count: h.GetSampleCountFloat(),
|
||||
Sum: h.GetSampleSum(),
|
||||
ZeroThreshold: h.GetZeroThreshold(),
|
||||
ZeroCount: h.GetZeroCountFloat(),
|
||||
Schema: h.GetSchema(),
|
||||
PositiveSpans: make([]histogram.Span, len(h.GetPositiveSpan())),
|
||||
PositiveBuckets: h.GetPositiveCount(),
|
||||
NegativeSpans: make([]histogram.Span, len(h.GetNegativeSpan())),
|
||||
NegativeBuckets: h.GetNegativeCount(),
|
||||
}
|
||||
for i, span := range h.GetPositiveSpan() {
|
||||
fh.PositiveSpans[i].Offset = span.GetOffset()
|
||||
fh.PositiveSpans[i].Length = span.GetLength()
|
||||
}
|
||||
for i, span := range h.GetNegativeSpan() {
|
||||
fh.NegativeSpans[i].Offset = span.GetOffset()
|
||||
fh.NegativeSpans[i].Length = span.GetLength()
|
||||
}
|
||||
fh.Compact(0)
|
||||
if ts != 0 {
|
||||
return p.metricBytes.Bytes(), &ts, nil, &fh
|
||||
}
|
||||
// Nasty hack: Assume that ts==0 means no timestamp. That's not true in
|
||||
// general, but proto3 has no distinction between unset and
|
||||
// default. Need to avoid in the final format.
|
||||
return p.metricBytes.Bytes(), nil, nil, &fh
|
||||
}
|
||||
|
||||
sh := histogram.Histogram{
|
||||
Count: h.GetSampleCount(),
|
||||
Sum: h.GetSampleSum(),
|
||||
ZeroThreshold: h.GetZeroThreshold(),
|
||||
ZeroCount: h.GetZeroCount(),
|
||||
Schema: h.GetSchema(),
|
||||
PositiveSpans: make([]histogram.Span, len(h.GetPositiveSpan())),
|
||||
PositiveBuckets: h.GetPositiveDelta(),
|
||||
NegativeSpans: make([]histogram.Span, len(h.GetNegativeSpan())),
|
||||
NegativeBuckets: h.GetNegativeDelta(),
|
||||
}
|
||||
for i, span := range h.GetPositiveSpan() {
|
||||
sh.PositiveSpans[i].Offset = span.GetOffset()
|
||||
sh.PositiveSpans[i].Length = span.GetLength()
|
||||
}
|
||||
for i, span := range h.GetNegativeSpan() {
|
||||
sh.NegativeSpans[i].Offset = span.GetOffset()
|
||||
sh.NegativeSpans[i].Length = span.GetLength()
|
||||
}
|
||||
sh.Compact(0)
|
||||
if ts != 0 {
|
||||
return p.metricBytes.Bytes(), &ts, &sh, nil
|
||||
}
|
||||
return p.metricBytes.Bytes(), nil, &sh, nil
|
||||
}
|
||||
|
||||
// Help returns the metric name and help text in the current entry.
|
||||
// Must only be called after Next returned a help entry.
|
||||
// The returned byte slices become invalid after the next call to Next.
|
||||
func (p *ProtobufParser) Help() ([]byte, []byte) {
|
||||
return p.metricBytes.Bytes(), []byte(p.mf.GetHelp())
|
||||
}
|
||||
|
||||
// Type returns the metric name and type in the current entry.
|
||||
// Must only be called after Next returned a type entry.
|
||||
// The returned byte slices become invalid after the next call to Next.
|
||||
func (p *ProtobufParser) Type() ([]byte, MetricType) {
|
||||
n := p.metricBytes.Bytes()
|
||||
switch p.mf.GetType() {
|
||||
case dto.MetricType_COUNTER:
|
||||
return n, MetricTypeCounter
|
||||
case dto.MetricType_GAUGE:
|
||||
return n, MetricTypeGauge
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
return n, MetricTypeHistogram
|
||||
case dto.MetricType_SUMMARY:
|
||||
return n, MetricTypeSummary
|
||||
}
|
||||
return n, MetricTypeUnknown
|
||||
}
|
||||
|
||||
// Unit always returns (nil, nil) because units aren't supported by the protobuf
|
||||
// format.
|
||||
func (p *ProtobufParser) Unit() ([]byte, []byte) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Comment always returns nil because comments aren't supported by the protobuf
|
||||
// format.
|
||||
func (p *ProtobufParser) Comment() []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Metric writes the labels of the current sample into the passed labels.
|
||||
// It returns the string from which the metric was parsed.
|
||||
func (p *ProtobufParser) Metric(l *labels.Labels) string {
|
||||
*l = append(*l, labels.Label{
|
||||
Name: labels.MetricName,
|
||||
Value: p.getMagicName(),
|
||||
})
|
||||
|
||||
for _, lp := range p.mf.GetMetric()[p.metricPos].GetLabel() {
|
||||
*l = append(*l, labels.Label{
|
||||
Name: lp.GetName(),
|
||||
Value: lp.GetValue(),
|
||||
})
|
||||
}
|
||||
if needed, name, value := p.getMagicLabel(); needed {
|
||||
*l = append(*l, labels.Label{Name: name, Value: value})
|
||||
}
|
||||
|
||||
// Sort labels to maintain the sorted labels invariant.
|
||||
sort.Sort(*l)
|
||||
|
||||
return p.metricBytes.String()
|
||||
}
|
||||
|
||||
// Exemplar writes the exemplar of the current sample into the passed
|
||||
// exemplar. It returns if an exemplar exists or not. In case of a native
|
||||
// histogram, the legacy bucket section is still used for exemplars. To ingest
|
||||
// all examplars, call the Exemplar method repeatedly until it returns false.
|
||||
func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool {
|
||||
m := p.mf.GetMetric()[p.metricPos]
|
||||
var exProto *dto.Exemplar
|
||||
switch p.mf.GetType() {
|
||||
case dto.MetricType_COUNTER:
|
||||
exProto = m.GetCounter().GetExemplar()
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
bb := m.GetHistogram().GetBucket()
|
||||
if p.fieldPos < 0 {
|
||||
if p.state == EntrySeries {
|
||||
return false // At _count or _sum.
|
||||
}
|
||||
p.fieldPos = 0 // Start at 1st bucket for native histograms.
|
||||
}
|
||||
for p.fieldPos < len(bb) {
|
||||
exProto = bb[p.fieldPos].GetExemplar()
|
||||
if p.state == EntrySeries {
|
||||
break
|
||||
}
|
||||
p.fieldPos++
|
||||
if exProto != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
if exProto == nil {
|
||||
return false
|
||||
}
|
||||
ex.Value = exProto.GetValue()
|
||||
if ts := exProto.GetTimestamp(); ts != nil {
|
||||
ex.HasTs = true
|
||||
ex.Ts = ts.GetSeconds()*1000 + int64(ts.GetNanos()/1_000_000)
|
||||
}
|
||||
for _, lp := range exProto.GetLabel() {
|
||||
ex.Labels = append(ex.Labels, labels.Label{
|
||||
Name: lp.GetName(),
|
||||
Value: lp.GetValue(),
|
||||
})
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Next advances the parser to the next "sample" (emulating the behavior of a
|
||||
// text format parser). It returns (EntryInvalid, io.EOF) if no samples were
|
||||
// read.
|
||||
func (p *ProtobufParser) Next() (Entry, error) {
|
||||
switch p.state {
|
||||
case EntryInvalid:
|
||||
p.metricPos = 0
|
||||
p.fieldPos = -2
|
||||
n, err := readDelimited(p.in[p.inPos:], p.mf)
|
||||
p.inPos += n
|
||||
if err != nil {
|
||||
return p.state, err
|
||||
}
|
||||
|
||||
// Skip empty metric families.
|
||||
if len(p.mf.GetMetric()) == 0 {
|
||||
return p.Next()
|
||||
}
|
||||
|
||||
// We are at the beginning of a metric family. Put only the name
|
||||
// into metricBytes and validate only name and help for now.
|
||||
name := p.mf.GetName()
|
||||
if !model.IsValidMetricName(model.LabelValue(name)) {
|
||||
return EntryInvalid, errors.Errorf("invalid metric name: %s", name)
|
||||
}
|
||||
if help := p.mf.GetHelp(); !utf8.ValidString(help) {
|
||||
return EntryInvalid, errors.Errorf("invalid help for metric %q: %s", name, help)
|
||||
}
|
||||
p.metricBytes.Reset()
|
||||
p.metricBytes.WriteString(name)
|
||||
|
||||
p.state = EntryHelp
|
||||
case EntryHelp:
|
||||
p.state = EntryType
|
||||
case EntryType:
|
||||
if p.mf.GetType() == dto.MetricType_HISTOGRAM &&
|
||||
isNativeHistogram(p.mf.GetMetric()[0].GetHistogram()) {
|
||||
p.state = EntryHistogram
|
||||
} else {
|
||||
p.state = EntrySeries
|
||||
}
|
||||
if err := p.updateMetricBytes(); err != nil {
|
||||
return EntryInvalid, err
|
||||
}
|
||||
case EntryHistogram, EntrySeries:
|
||||
if p.state == EntrySeries && !p.fieldsDone &&
|
||||
(p.mf.GetType() == dto.MetricType_SUMMARY || p.mf.GetType() == dto.MetricType_HISTOGRAM) {
|
||||
p.fieldPos++
|
||||
} else {
|
||||
p.metricPos++
|
||||
p.fieldPos = -2
|
||||
p.fieldsDone = false
|
||||
}
|
||||
if p.metricPos >= len(p.mf.GetMetric()) {
|
||||
p.state = EntryInvalid
|
||||
return p.Next()
|
||||
}
|
||||
if err := p.updateMetricBytes(); err != nil {
|
||||
return EntryInvalid, err
|
||||
}
|
||||
default:
|
||||
return EntryInvalid, errors.Errorf("invalid protobuf parsing state: %d", p.state)
|
||||
}
|
||||
return p.state, nil
|
||||
}
|
||||
|
||||
func (p *ProtobufParser) updateMetricBytes() error {
|
||||
b := p.metricBytes
|
||||
b.Reset()
|
||||
b.WriteString(p.getMagicName())
|
||||
for _, lp := range p.mf.GetMetric()[p.metricPos].GetLabel() {
|
||||
b.WriteByte(model.SeparatorByte)
|
||||
n := lp.GetName()
|
||||
if !model.LabelName(n).IsValid() {
|
||||
return errors.Errorf("invalid label name: %s", n)
|
||||
}
|
||||
b.WriteString(n)
|
||||
b.WriteByte(model.SeparatorByte)
|
||||
v := lp.GetValue()
|
||||
if !utf8.ValidString(v) {
|
||||
return errors.Errorf("invalid label value: %s", v)
|
||||
}
|
||||
b.WriteString(v)
|
||||
}
|
||||
if needed, n, v := p.getMagicLabel(); needed {
|
||||
b.WriteByte(model.SeparatorByte)
|
||||
b.WriteString(n)
|
||||
b.WriteByte(model.SeparatorByte)
|
||||
b.WriteString(v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getMagicName usually just returns p.mf.GetType() but adds a magic suffix
|
||||
// ("_count", "_sum", "_bucket") if needed according to the current parser
|
||||
// state.
|
||||
func (p *ProtobufParser) getMagicName() string {
|
||||
t := p.mf.GetType()
|
||||
if p.state == EntryHistogram || (t != dto.MetricType_HISTOGRAM && t != dto.MetricType_SUMMARY) {
|
||||
return p.mf.GetName()
|
||||
}
|
||||
if p.fieldPos == -2 {
|
||||
return p.mf.GetName() + "_count"
|
||||
}
|
||||
if p.fieldPos == -1 {
|
||||
return p.mf.GetName() + "_sum"
|
||||
}
|
||||
if t == dto.MetricType_HISTOGRAM {
|
||||
return p.mf.GetName() + "_bucket"
|
||||
}
|
||||
return p.mf.GetName()
|
||||
}
|
||||
|
||||
// getMagicLabel returns if a magic label ("quantile" or "le") is needed and, if
|
||||
// so, its name and value. It also sets p.fieldsDone if applicable.
|
||||
func (p *ProtobufParser) getMagicLabel() (bool, string, string) {
|
||||
if p.state == EntryHistogram || p.fieldPos < 0 {
|
||||
return false, "", ""
|
||||
}
|
||||
switch p.mf.GetType() {
|
||||
case dto.MetricType_SUMMARY:
|
||||
qq := p.mf.GetMetric()[p.metricPos].GetSummary().GetQuantile()
|
||||
q := qq[p.fieldPos]
|
||||
p.fieldsDone = p.fieldPos == len(qq)-1
|
||||
return true, model.QuantileLabel, formatOpenMetricsFloat(q.GetQuantile())
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
bb := p.mf.GetMetric()[p.metricPos].GetHistogram().GetBucket()
|
||||
if p.fieldPos >= len(bb) {
|
||||
p.fieldsDone = true
|
||||
return true, model.BucketLabel, "+Inf"
|
||||
}
|
||||
b := bb[p.fieldPos]
|
||||
p.fieldsDone = math.IsInf(b.GetUpperBound(), +1)
|
||||
return true, model.BucketLabel, formatOpenMetricsFloat(b.GetUpperBound())
|
||||
}
|
||||
return false, "", ""
|
||||
}
|
||||
|
||||
var errInvalidVarint = errors.New("protobufparse: invalid varint encountered")
|
||||
|
||||
// readDelimited is essentially doing what the function of the same name in
|
||||
// github.com/matttproud/golang_protobuf_extensions/pbutil is doing, but it is
|
||||
// specific to a MetricFamily, utilizes the more efficient gogo-protobuf
|
||||
// unmarshaling, and acts on a byte slice directly without any additional
|
||||
// staging buffers.
|
||||
func readDelimited(b []byte, mf *dto.MetricFamily) (n int, err error) {
|
||||
if len(b) == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
messageLength, varIntLength := proto.DecodeVarint(b)
|
||||
if varIntLength == 0 || varIntLength > binary.MaxVarintLen32 {
|
||||
return 0, errInvalidVarint
|
||||
}
|
||||
totalLength := varIntLength + int(messageLength)
|
||||
if totalLength > len(b) {
|
||||
return 0, errors.Errorf("protobufparse: insufficient length of buffer, expected at least %d bytes, got %d bytes", totalLength, len(b))
|
||||
}
|
||||
mf.Reset()
|
||||
return totalLength, mf.Unmarshal(b[varIntLength:totalLength])
|
||||
}
|
||||
|
||||
// formatOpenMetricsFloat works like the usual Go string formatting of a fleat
|
||||
// but appends ".0" if the resulting number would otherwise contain neither a
|
||||
// "." nor an "e".
|
||||
func formatOpenMetricsFloat(f float64) string {
|
||||
// A few common cases hardcoded.
|
||||
switch {
|
||||
case f == 1:
|
||||
return "1.0"
|
||||
case f == 0:
|
||||
return "0.0"
|
||||
case f == -1:
|
||||
return "-1.0"
|
||||
case math.IsNaN(f):
|
||||
return "NaN"
|
||||
case math.IsInf(f, +1):
|
||||
return "+Inf"
|
||||
case math.IsInf(f, -1):
|
||||
return "-Inf"
|
||||
}
|
||||
s := fmt.Sprint(f)
|
||||
if strings.ContainsAny(s, "e.") {
|
||||
return s
|
||||
}
|
||||
return s + ".0"
|
||||
}
|
||||
|
||||
// isNativeHistogram returns false iff the provided histograms has no sparse
|
||||
// buckets and a zero threshold of 0 and a zero count of 0. In principle, this
|
||||
// could still be meant to be a native histogram (with a zero threshold of 0 and
|
||||
// no observations yet), but for now, we'll treat this case as a conventional
|
||||
// histogram.
|
||||
//
|
||||
// TODO(beorn7): In the final format, there should be an unambiguous way of
|
||||
// deciding if a histogram should be ingested as a conventional one or a native
|
||||
// one.
|
||||
func isNativeHistogram(h *dto.Histogram) bool {
|
||||
return len(h.GetNegativeDelta()) > 0 ||
|
||||
len(h.GetPositiveDelta()) > 0 ||
|
||||
h.GetZeroCount() > 0 ||
|
||||
h.GetZeroThreshold() > 0
|
||||
}
|
3
vendor/github.com/prometheus/prometheus/prompb/buf.yaml
generated
vendored
3
vendor/github.com/prometheus/prometheus/prompb/buf.yaml
generated
vendored
|
@ -5,14 +5,17 @@ lint:
|
|||
ENUM_VALUE_PREFIX:
|
||||
- remote.proto
|
||||
- types.proto
|
||||
- io/prometheus/client/metrics.proto
|
||||
ENUM_ZERO_VALUE_SUFFIX:
|
||||
- remote.proto
|
||||
- types.proto
|
||||
- io/prometheus/client/metrics.proto
|
||||
PACKAGE_DIRECTORY_MATCH:
|
||||
- remote.proto
|
||||
- types.proto
|
||||
PACKAGE_VERSION_SUFFIX:
|
||||
- remote.proto
|
||||
- types.proto
|
||||
- io/prometheus/client/metrics.proto
|
||||
deps:
|
||||
- buf.build/gogo/protobuf
|
||||
|
|
3994
vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go
generated
vendored
Normal file
3994
vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
146
vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto
generated
vendored
Normal file
146
vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto
generated
vendored
Normal file
|
@ -0,0 +1,146 @@
|
|||
// Copyright 2013 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This is copied and lightly edited from
|
||||
// github.com/prometheus/client_model/io/prometheus/client/metrics.proto
|
||||
// and finally converted to proto3 syntax to make it usable for the
|
||||
// gogo-protobuf approach taken within prometheus/prometheus.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package io.prometheus.client;
|
||||
option go_package = "io_prometheus_client";
|
||||
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
message LabelPair {
|
||||
string name = 1;
|
||||
string value = 2;
|
||||
}
|
||||
|
||||
enum MetricType {
|
||||
// COUNTER must use the Metric field "counter".
|
||||
COUNTER = 0;
|
||||
// GAUGE must use the Metric field "gauge".
|
||||
GAUGE = 1;
|
||||
// SUMMARY must use the Metric field "summary".
|
||||
SUMMARY = 2;
|
||||
// UNTYPED must use the Metric field "untyped".
|
||||
UNTYPED = 3;
|
||||
// HISTOGRAM must use the Metric field "histogram".
|
||||
HISTOGRAM = 4;
|
||||
// GAUGE_HISTOGRAM must use the Metric field "histogram".
|
||||
GAUGE_HISTOGRAM = 5;
|
||||
}
|
||||
|
||||
message Gauge {
|
||||
double value = 1;
|
||||
}
|
||||
|
||||
message Counter {
|
||||
double value = 1;
|
||||
Exemplar exemplar = 2;
|
||||
}
|
||||
|
||||
message Quantile {
|
||||
double quantile = 1;
|
||||
double value = 2;
|
||||
}
|
||||
|
||||
message Summary {
|
||||
uint64 sample_count = 1;
|
||||
double sample_sum = 2;
|
||||
repeated Quantile quantile = 3;
|
||||
}
|
||||
|
||||
message Untyped {
|
||||
double value = 1;
|
||||
}
|
||||
|
||||
message Histogram {
|
||||
uint64 sample_count = 1;
|
||||
double sample_count_float = 4; // Overrides sample_count if > 0.
|
||||
double sample_sum = 2;
|
||||
// Buckets for the conventional histogram.
|
||||
repeated Bucket bucket = 3; // Ordered in increasing order of upper_bound, +Inf bucket is optional.
|
||||
|
||||
// Everything below here is for native histograms (also known as sparse histograms).
|
||||
// Native histograms are an experimental feature without stability guarantees.
|
||||
|
||||
// schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
|
||||
// They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
|
||||
// then each power of two is divided into 2^n logarithmic buckets.
|
||||
// Or in other words, each bucket boundary is the previous boundary times 2^(2^-n).
|
||||
// In the future, more bucket schemas may be added using numbers < -4 or > 8.
|
||||
sint32 schema = 5;
|
||||
double zero_threshold = 6; // Breadth of the zero bucket.
|
||||
uint64 zero_count = 7; // Count in zero bucket.
|
||||
double zero_count_float = 8; // Overrides sb_zero_count if > 0.
|
||||
|
||||
// Negative buckets for the native histogram.
|
||||
repeated BucketSpan negative_span = 9;
|
||||
// Use either "negative_delta" or "negative_count", the former for
|
||||
// regular histograms with integer counts, the latter for float
|
||||
// histograms.
|
||||
repeated sint64 negative_delta = 10; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
|
||||
repeated double negative_count = 11; // Absolute count of each bucket.
|
||||
|
||||
// Positive buckets for the native histogram.
|
||||
repeated BucketSpan positive_span = 12;
|
||||
// Use either "positive_delta" or "positive_count", the former for
|
||||
// regular histograms with integer counts, the latter for float
|
||||
// histograms.
|
||||
repeated sint64 positive_delta = 13; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
|
||||
repeated double positive_count = 14; // Absolute count of each bucket.
|
||||
}
|
||||
|
||||
message Bucket {
|
||||
uint64 cumulative_count = 1; // Cumulative in increasing order.
|
||||
double cumulative_count_float = 4; // Overrides cumulative_count if > 0.
|
||||
double upper_bound = 2; // Inclusive.
|
||||
Exemplar exemplar = 3;
|
||||
}
|
||||
|
||||
// A BucketSpan defines a number of consecutive buckets in a native
|
||||
// histogram with their offset. Logically, it would be more
|
||||
// straightforward to include the bucket counts in the Span. However,
|
||||
// the protobuf representation is more compact in the way the data is
|
||||
// structured here (with all the buckets in a single array separate
|
||||
// from the Spans).
|
||||
message BucketSpan {
|
||||
sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative).
|
||||
uint32 length = 2; // Length of consecutive buckets.
|
||||
}
|
||||
|
||||
message Exemplar {
|
||||
repeated LabelPair label = 1;
|
||||
double value = 2;
|
||||
google.protobuf.Timestamp timestamp = 3; // OpenMetrics-style.
|
||||
}
|
||||
|
||||
message Metric {
|
||||
repeated LabelPair label = 1;
|
||||
Gauge gauge = 2;
|
||||
Counter counter = 3;
|
||||
Summary summary = 4;
|
||||
Untyped untyped = 5;
|
||||
Histogram histogram = 7;
|
||||
int64 timestamp_ms = 6;
|
||||
}
|
||||
|
||||
message MetricFamily {
|
||||
string name = 1;
|
||||
string help = 2;
|
||||
MetricType type = 3;
|
||||
repeated Metric metric = 4;
|
||||
}
|
6
vendor/github.com/prometheus/prometheus/prompb/remote.pb.go
generated
vendored
6
vendor/github.com/prometheus/prometheus/prompb/remote.pb.go
generated
vendored
|
@ -34,8 +34,10 @@ const (
|
|||
// Content-Type: "application/x-protobuf"
|
||||
// Content-Encoding: "snappy"
|
||||
ReadRequest_SAMPLES ReadRequest_ResponseType = 0
|
||||
// Server will stream a delimited ChunkedReadResponse message that contains XOR encoded chunks for a single series.
|
||||
// Each message is following varint size and fixed size bigendian uint32 for CRC32 Castagnoli checksum.
|
||||
// Server will stream a delimited ChunkedReadResponse message that
|
||||
// contains XOR or HISTOGRAM(!) encoded chunks for a single series.
|
||||
// Each message is following varint size and fixed size bigendian
|
||||
// uint32 for CRC32 Castagnoli checksum.
|
||||
//
|
||||
// Response headers:
|
||||
// Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse"
|
||||
|
|
6
vendor/github.com/prometheus/prometheus/prompb/remote.proto
generated
vendored
6
vendor/github.com/prometheus/prometheus/prompb/remote.proto
generated
vendored
|
@ -39,8 +39,10 @@ message ReadRequest {
|
|||
// Content-Type: "application/x-protobuf"
|
||||
// Content-Encoding: "snappy"
|
||||
SAMPLES = 0;
|
||||
// Server will stream a delimited ChunkedReadResponse message that contains XOR encoded chunks for a single series.
|
||||
// Each message is following varint size and fixed size bigendian uint32 for CRC32 Castagnoli checksum.
|
||||
// Server will stream a delimited ChunkedReadResponse message that
|
||||
// contains XOR or HISTOGRAM(!) encoded chunks for a single series.
|
||||
// Each message is following varint size and fixed size bigendian
|
||||
// uint32 for CRC32 Castagnoli checksum.
|
||||
//
|
||||
// Response headers:
|
||||
// Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse"
|
||||
|
|
1534
vendor/github.com/prometheus/prometheus/prompb/types.pb.go
generated
vendored
1534
vendor/github.com/prometheus/prometheus/prompb/types.pb.go
generated
vendored
File diff suppressed because it is too large
Load diff
77
vendor/github.com/prometheus/prometheus/prompb/types.proto
generated
vendored
77
vendor/github.com/prometheus/prometheus/prompb/types.proto
generated
vendored
|
@ -54,13 +54,79 @@ message Exemplar {
|
|||
int64 timestamp = 3;
|
||||
}
|
||||
|
||||
// A native histogram, also known as a sparse histogram.
|
||||
// Original design doc:
|
||||
// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit
|
||||
// The appendix of this design doc also explains the concept of float
|
||||
// histograms. This Histogram message can represent both, the usual
|
||||
// integer histogram as well as a float histogram.
|
||||
message Histogram {
|
||||
enum ResetHint {
|
||||
UNKNOWN = 0; // Need to test for a counter reset explicitly.
|
||||
YES = 1; // This is the 1st histogram after a counter reset.
|
||||
NO = 2; // There was no counter reset between this and the previous Histogram.
|
||||
GAUGE = 3; // This is a gauge histogram where counter resets don't happen.
|
||||
}
|
||||
|
||||
oneof count { // Count of observations in the histogram.
|
||||
uint64 count_int = 1;
|
||||
double count_float = 2;
|
||||
}
|
||||
double sum = 3; // Sum of observations in the histogram.
|
||||
// The schema defines the bucket schema. Currently, valid numbers
|
||||
// are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1
|
||||
// is a bucket boundary in each case, and then each power of two is
|
||||
// divided into 2^n logarithmic buckets. Or in other words, each
|
||||
// bucket boundary is the previous boundary times 2^(2^-n). In the
|
||||
// future, more bucket schemas may be added using numbers < -4 or >
|
||||
// 8.
|
||||
sint32 schema = 4;
|
||||
double zero_threshold = 5; // Breadth of the zero bucket.
|
||||
oneof zero_count { // Count in zero bucket.
|
||||
uint64 zero_count_int = 6;
|
||||
double zero_count_float = 7;
|
||||
}
|
||||
|
||||
// Negative Buckets.
|
||||
repeated BucketSpan negative_spans = 8;
|
||||
// Use either "negative_deltas" or "negative_counts", the former for
|
||||
// regular histograms with integer counts, the latter for float
|
||||
// histograms.
|
||||
repeated sint64 negative_deltas = 9; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
|
||||
repeated double negative_counts = 10; // Absolute count of each bucket.
|
||||
|
||||
// Positive Buckets.
|
||||
repeated BucketSpan positive_spans = 11;
|
||||
// Use either "positive_deltas" or "positive_counts", the former for
|
||||
// regular histograms with integer counts, the latter for float
|
||||
// histograms.
|
||||
repeated sint64 positive_deltas = 12; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
|
||||
repeated double positive_counts = 13; // Absolute count of each bucket.
|
||||
|
||||
ResetHint reset_hint = 14;
|
||||
// timestamp is in ms format, see model/timestamp/timestamp.go for
|
||||
// conversion from time.Time to Prometheus timestamp.
|
||||
int64 timestamp = 15;
|
||||
}
|
||||
|
||||
// A BucketSpan defines a number of consecutive buckets with their
|
||||
// offset. Logically, it would be more straightforward to include the
|
||||
// bucket counts in the Span. However, the protobuf representation is
|
||||
// more compact in the way the data is structured here (with all the
|
||||
// buckets in a single array separate from the Spans).
|
||||
message BucketSpan {
|
||||
sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative).
|
||||
uint32 length = 2; // Length of consecutive buckets.
|
||||
}
|
||||
|
||||
// TimeSeries represents samples and labels for a single time series.
|
||||
message TimeSeries {
|
||||
// For a timeseries to be valid, and for the samples and exemplars
|
||||
// to be ingested by the remote system properly, the labels field is required.
|
||||
repeated Label labels = 1 [(gogoproto.nullable) = false];
|
||||
repeated Sample samples = 2 [(gogoproto.nullable) = false];
|
||||
repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false];
|
||||
repeated Label labels = 1 [(gogoproto.nullable) = false];
|
||||
repeated Sample samples = 2 [(gogoproto.nullable) = false];
|
||||
repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false];
|
||||
repeated Histogram histograms = 4 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message Label {
|
||||
|
@ -103,8 +169,9 @@ message Chunk {
|
|||
|
||||
// We require this to match chunkenc.Encoding.
|
||||
enum Encoding {
|
||||
UNKNOWN = 0;
|
||||
XOR = 1;
|
||||
UNKNOWN = 0;
|
||||
XOR = 1;
|
||||
HISTOGRAM = 2;
|
||||
}
|
||||
Encoding type = 3;
|
||||
bytes data = 4;
|
||||
|
|
3
vendor/github.com/prometheus/prometheus/scrape/manager.go
generated
vendored
3
vendor/github.com/prometheus/prometheus/scrape/manager.go
generated
vendored
|
@ -132,6 +132,9 @@ type Options struct {
|
|||
// Option to enable the experimental in-memory metadata storage and append
|
||||
// metadata to the WAL.
|
||||
EnableMetadataStorage bool
|
||||
// Option to enable protobuf negotiation with the client. Note that the client can already
|
||||
// send protobuf without needing to enable this.
|
||||
EnableProtobufNegotiation bool
|
||||
// Option to increase the interval used by scrape manager to throttle target groups updates.
|
||||
DiscoveryReloadInterval model.Duration
|
||||
|
||||
|
|
80
vendor/github.com/prometheus/prometheus/scrape/scrape.go
generated
vendored
80
vendor/github.com/prometheus/prometheus/scrape/scrape.go
generated
vendored
|
@ -40,6 +40,7 @@ import (
|
|||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/metadata"
|
||||
"github.com/prometheus/prometheus/model/relabel"
|
||||
|
@ -242,6 +243,8 @@ type scrapePool struct {
|
|||
newLoop func(scrapeLoopOptions) loop
|
||||
|
||||
noDefaultPort bool
|
||||
|
||||
enableProtobufNegotiation bool
|
||||
}
|
||||
|
||||
type labelLimits struct {
|
||||
|
@ -283,15 +286,16 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed
|
|||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
sp := &scrapePool{
|
||||
cancel: cancel,
|
||||
appendable: app,
|
||||
config: cfg,
|
||||
client: client,
|
||||
activeTargets: map[uint64]*Target{},
|
||||
loops: map[uint64]loop{},
|
||||
logger: logger,
|
||||
httpOpts: options.HTTPClientOptions,
|
||||
noDefaultPort: options.NoDefaultPort,
|
||||
cancel: cancel,
|
||||
appendable: app,
|
||||
config: cfg,
|
||||
client: client,
|
||||
activeTargets: map[uint64]*Target{},
|
||||
loops: map[uint64]loop{},
|
||||
logger: logger,
|
||||
httpOpts: options.HTTPClientOptions,
|
||||
noDefaultPort: options.NoDefaultPort,
|
||||
enableProtobufNegotiation: options.EnableProtobufNegotiation,
|
||||
}
|
||||
sp.newLoop = func(opts scrapeLoopOptions) loop {
|
||||
// Update the targets retrieval function for metadata to a new scrape cache.
|
||||
|
@ -432,8 +436,12 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
|||
|
||||
t := sp.activeTargets[fp]
|
||||
interval, timeout, err := t.intervalAndTimeout(interval, timeout)
|
||||
acceptHeader := scrapeAcceptHeader
|
||||
if sp.enableProtobufNegotiation {
|
||||
acceptHeader = scrapeAcceptHeaderWithProtobuf
|
||||
}
|
||||
var (
|
||||
s = &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit}
|
||||
s = &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit, acceptHeader: acceptHeader}
|
||||
newLoop = sp.newLoop(scrapeLoopOptions{
|
||||
target: t,
|
||||
scraper: s,
|
||||
|
@ -536,8 +544,11 @@ func (sp *scrapePool) sync(targets []*Target) {
|
|||
// for every target.
|
||||
var err error
|
||||
interval, timeout, err = t.intervalAndTimeout(interval, timeout)
|
||||
|
||||
s := &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit}
|
||||
acceptHeader := scrapeAcceptHeader
|
||||
if sp.enableProtobufNegotiation {
|
||||
acceptHeader = scrapeAcceptHeaderWithProtobuf
|
||||
}
|
||||
s := &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit, acceptHeader: acceptHeader}
|
||||
l := sp.newLoop(scrapeLoopOptions{
|
||||
target: t,
|
||||
scraper: s,
|
||||
|
@ -756,11 +767,15 @@ type targetScraper struct {
|
|||
buf *bufio.Reader
|
||||
|
||||
bodySizeLimit int64
|
||||
acceptHeader string
|
||||
}
|
||||
|
||||
var errBodySizeLimit = errors.New("body size limit exceeded")
|
||||
|
||||
const acceptHeader = `application/openmetrics-text;version=1.0.0,application/openmetrics-text;version=0.0.1;q=0.75,text/plain;version=0.0.4;q=0.5,*/*;q=0.1`
|
||||
const (
|
||||
scrapeAcceptHeader = `application/openmetrics-text;version=1.0.0,application/openmetrics-text;version=0.0.1;q=0.75,text/plain;version=0.0.4;q=0.5,*/*;q=0.1`
|
||||
scrapeAcceptHeaderWithProtobuf = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited,application/openmetrics-text;version=1.0.0;q=0.8,application/openmetrics-text;version=0.0.1;q=0.75,text/plain;version=0.0.4;q=0.5,*/*;q=0.1`
|
||||
)
|
||||
|
||||
var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
||||
|
||||
|
@ -770,7 +785,7 @@ func (s *targetScraper) scrape(ctx context.Context, w io.Writer) (string, error)
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
req.Header.Add("Accept", acceptHeader)
|
||||
req.Header.Add("Accept", s.acceptHeader)
|
||||
req.Header.Add("Accept-Encoding", "gzip")
|
||||
req.Header.Set("User-Agent", UserAgent)
|
||||
req.Header.Set("X-Prometheus-Scrape-Timeout-Seconds", strconv.FormatFloat(s.timeout.Seconds(), 'f', -1, 64))
|
||||
|
@ -1510,8 +1525,12 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string,
|
|||
loop:
|
||||
for {
|
||||
var (
|
||||
et textparse.Entry
|
||||
sampleAdded bool
|
||||
et textparse.Entry
|
||||
sampleAdded, isHistogram bool
|
||||
met []byte
|
||||
parsedTimestamp *int64
|
||||
val float64
|
||||
h *histogram.Histogram
|
||||
)
|
||||
if et, err = p.Next(); err != nil {
|
||||
if err == io.EOF {
|
||||
|
@ -1531,17 +1550,24 @@ loop:
|
|||
continue
|
||||
case textparse.EntryComment:
|
||||
continue
|
||||
case textparse.EntryHistogram:
|
||||
isHistogram = true
|
||||
default:
|
||||
}
|
||||
total++
|
||||
|
||||
t := defTime
|
||||
met, tp, v := p.Series()
|
||||
if !sl.honorTimestamps {
|
||||
tp = nil
|
||||
if isHistogram {
|
||||
met, parsedTimestamp, h, _ = p.Histogram()
|
||||
// TODO: ingest float histograms in tsdb.
|
||||
} else {
|
||||
met, parsedTimestamp, val = p.Series()
|
||||
}
|
||||
if tp != nil {
|
||||
t = *tp
|
||||
if !sl.honorTimestamps {
|
||||
parsedTimestamp = nil
|
||||
}
|
||||
if parsedTimestamp != nil {
|
||||
t = *parsedTimestamp
|
||||
}
|
||||
|
||||
// Zero metadata out for current iteration until it's resolved.
|
||||
|
@ -1594,8 +1620,14 @@ loop:
|
|||
updateMetadata(lset, true)
|
||||
}
|
||||
|
||||
ref, err = app.Append(ref, lset, t, v)
|
||||
sampleAdded, err = sl.checkAddError(ce, met, tp, err, &sampleLimitErr, &appErrs)
|
||||
if isHistogram {
|
||||
if h != nil {
|
||||
ref, err = app.AppendHistogram(ref, lset, t, h)
|
||||
}
|
||||
} else {
|
||||
ref, err = app.Append(ref, lset, t, val)
|
||||
}
|
||||
sampleAdded, err = sl.checkAddError(ce, met, parsedTimestamp, err, &sampleLimitErr, &appErrs)
|
||||
if err != nil {
|
||||
if err != storage.ErrNotFound {
|
||||
level.Debug(sl.l).Log("msg", "Unexpected error", "series", string(met), "err", err)
|
||||
|
@ -1604,7 +1636,7 @@ loop:
|
|||
}
|
||||
|
||||
if !ok {
|
||||
if tp == nil {
|
||||
if parsedTimestamp == nil {
|
||||
// Bypass staleness logic if there is an explicit timestamp.
|
||||
sl.cache.trackStaleness(hash, lset)
|
||||
}
|
||||
|
|
185
vendor/github.com/prometheus/prometheus/storage/buffer.go
generated
vendored
185
vendor/github.com/prometheus/prometheus/storage/buffer.go
generated
vendored
|
@ -14,8 +14,10 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
)
|
||||
|
||||
|
@ -25,8 +27,8 @@ type BufferedSeriesIterator struct {
|
|||
buf *sampleRing
|
||||
delta int64
|
||||
|
||||
lastTime int64
|
||||
ok bool
|
||||
lastTime int64
|
||||
valueType chunkenc.ValueType
|
||||
}
|
||||
|
||||
// NewBuffer returns a new iterator that buffers the values within the time range
|
||||
|
@ -39,6 +41,7 @@ func NewBuffer(delta int64) *BufferedSeriesIterator {
|
|||
// NewBufferIterator returns a new iterator that buffers the values within the
|
||||
// time range of the current element and the duration of delta before.
|
||||
func NewBufferIterator(it chunkenc.Iterator, delta int64) *BufferedSeriesIterator {
|
||||
// TODO(codesome): based on encoding, allocate different buffer.
|
||||
bit := &BufferedSeriesIterator{
|
||||
buf: newSampleRing(delta, 16),
|
||||
delta: delta,
|
||||
|
@ -53,10 +56,9 @@ func NewBufferIterator(it chunkenc.Iterator, delta int64) *BufferedSeriesIterato
|
|||
func (b *BufferedSeriesIterator) Reset(it chunkenc.Iterator) {
|
||||
b.it = it
|
||||
b.lastTime = math.MinInt64
|
||||
b.ok = true
|
||||
b.buf.reset()
|
||||
b.buf.delta = b.delta
|
||||
it.Next()
|
||||
b.valueType = it.Next()
|
||||
}
|
||||
|
||||
// ReduceDelta lowers the buffered time delta, for the current SeriesIterator only.
|
||||
|
@ -66,8 +68,9 @@ func (b *BufferedSeriesIterator) ReduceDelta(delta int64) bool {
|
|||
|
||||
// PeekBack returns the nth previous element of the iterator. If there is none buffered,
|
||||
// ok is false.
|
||||
func (b *BufferedSeriesIterator) PeekBack(n int) (t int64, v float64, ok bool) {
|
||||
return b.buf.nthLast(n)
|
||||
func (b *BufferedSeriesIterator) PeekBack(n int) (t int64, v float64, h *histogram.Histogram, ok bool) {
|
||||
s, ok := b.buf.nthLast(n)
|
||||
return s.t, s.v, s.h, ok
|
||||
}
|
||||
|
||||
// Buffer returns an iterator over the buffered data. Invalidates previously
|
||||
|
@ -77,63 +80,96 @@ func (b *BufferedSeriesIterator) Buffer() chunkenc.Iterator {
|
|||
}
|
||||
|
||||
// Seek advances the iterator to the element at time t or greater.
|
||||
func (b *BufferedSeriesIterator) Seek(t int64) bool {
|
||||
func (b *BufferedSeriesIterator) Seek(t int64) chunkenc.ValueType {
|
||||
t0 := t - b.buf.delta
|
||||
|
||||
// If the delta would cause us to seek backwards, preserve the buffer
|
||||
// and just continue regular advancement while filling the buffer on the way.
|
||||
if b.ok && t0 > b.lastTime {
|
||||
if b.valueType != chunkenc.ValNone && t0 > b.lastTime {
|
||||
b.buf.reset()
|
||||
|
||||
b.ok = b.it.Seek(t0)
|
||||
if !b.ok {
|
||||
return false
|
||||
b.valueType = b.it.Seek(t0)
|
||||
switch b.valueType {
|
||||
case chunkenc.ValNone:
|
||||
return chunkenc.ValNone
|
||||
case chunkenc.ValFloat:
|
||||
b.lastTime, _ = b.At()
|
||||
case chunkenc.ValHistogram:
|
||||
b.lastTime, _ = b.AtHistogram()
|
||||
case chunkenc.ValFloatHistogram:
|
||||
b.lastTime, _ = b.AtFloatHistogram()
|
||||
default:
|
||||
panic(fmt.Errorf("BufferedSeriesIterator: unknown value type %v", b.valueType))
|
||||
}
|
||||
b.lastTime, _ = b.At()
|
||||
}
|
||||
|
||||
if b.lastTime >= t {
|
||||
return true
|
||||
return b.valueType
|
||||
}
|
||||
for b.Next() {
|
||||
if b.lastTime >= t {
|
||||
return true
|
||||
for {
|
||||
if b.valueType = b.Next(); b.valueType == chunkenc.ValNone || b.lastTime >= t {
|
||||
return b.valueType
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Next advances the iterator to the next element.
|
||||
func (b *BufferedSeriesIterator) Next() bool {
|
||||
if !b.ok {
|
||||
return false
|
||||
}
|
||||
|
||||
func (b *BufferedSeriesIterator) Next() chunkenc.ValueType {
|
||||
// Add current element to buffer before advancing.
|
||||
b.buf.add(b.it.At())
|
||||
|
||||
b.ok = b.it.Next()
|
||||
if b.ok {
|
||||
b.lastTime, _ = b.At()
|
||||
switch b.valueType {
|
||||
case chunkenc.ValNone:
|
||||
return chunkenc.ValNone
|
||||
case chunkenc.ValFloat:
|
||||
t, v := b.it.At()
|
||||
b.buf.add(sample{t: t, v: v})
|
||||
case chunkenc.ValHistogram:
|
||||
t, h := b.it.AtHistogram()
|
||||
b.buf.add(sample{t: t, h: h})
|
||||
case chunkenc.ValFloatHistogram:
|
||||
t, fh := b.it.AtFloatHistogram()
|
||||
b.buf.add(sample{t: t, fh: fh})
|
||||
default:
|
||||
panic(fmt.Errorf("BufferedSeriesIterator: unknown value type %v", b.valueType))
|
||||
}
|
||||
|
||||
return b.ok
|
||||
b.valueType = b.it.Next()
|
||||
if b.valueType != chunkenc.ValNone {
|
||||
b.lastTime = b.AtT()
|
||||
}
|
||||
return b.valueType
|
||||
}
|
||||
|
||||
// At returns the current element of the iterator.
|
||||
// At returns the current float element of the iterator.
|
||||
func (b *BufferedSeriesIterator) At() (int64, float64) {
|
||||
return b.it.At()
|
||||
}
|
||||
|
||||
// AtHistogram returns the current histogram element of the iterator.
|
||||
func (b *BufferedSeriesIterator) AtHistogram() (int64, *histogram.Histogram) {
|
||||
return b.it.AtHistogram()
|
||||
}
|
||||
|
||||
// AtFloatHistogram returns the current float-histogram element of the iterator.
|
||||
func (b *BufferedSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
|
||||
return b.it.AtFloatHistogram()
|
||||
}
|
||||
|
||||
// AtT returns the current timestamp of the iterator.
|
||||
func (b *BufferedSeriesIterator) AtT() int64 {
|
||||
return b.it.AtT()
|
||||
}
|
||||
|
||||
// Err returns the last encountered error.
|
||||
func (b *BufferedSeriesIterator) Err() error {
|
||||
return b.it.Err()
|
||||
}
|
||||
|
||||
// TODO(beorn7): Consider having different sample types for different value types.
|
||||
type sample struct {
|
||||
t int64
|
||||
v float64
|
||||
t int64
|
||||
v float64
|
||||
h *histogram.Histogram
|
||||
fh *histogram.FloatHistogram
|
||||
}
|
||||
|
||||
func (s sample) T() int64 {
|
||||
|
@ -144,6 +180,25 @@ func (s sample) V() float64 {
|
|||
return s.v
|
||||
}
|
||||
|
||||
func (s sample) H() *histogram.Histogram {
|
||||
return s.h
|
||||
}
|
||||
|
||||
func (s sample) FH() *histogram.FloatHistogram {
|
||||
return s.fh
|
||||
}
|
||||
|
||||
func (s sample) Type() chunkenc.ValueType {
|
||||
switch {
|
||||
case s.h != nil:
|
||||
return chunkenc.ValHistogram
|
||||
case s.fh != nil:
|
||||
return chunkenc.ValFloatHistogram
|
||||
default:
|
||||
return chunkenc.ValFloat
|
||||
}
|
||||
}
|
||||
|
||||
type sampleRing struct {
|
||||
delta int64
|
||||
|
||||
|
@ -176,17 +231,36 @@ func (r *sampleRing) iterator() chunkenc.Iterator {
|
|||
}
|
||||
|
||||
type sampleRingIterator struct {
|
||||
r *sampleRing
|
||||
i int
|
||||
r *sampleRing
|
||||
i int
|
||||
t int64
|
||||
v float64
|
||||
h *histogram.Histogram
|
||||
fh *histogram.FloatHistogram
|
||||
}
|
||||
|
||||
func (it *sampleRingIterator) Next() bool {
|
||||
func (it *sampleRingIterator) Next() chunkenc.ValueType {
|
||||
it.i++
|
||||
return it.i < it.r.l
|
||||
if it.i >= it.r.l {
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
s := it.r.at(it.i)
|
||||
it.t = s.t
|
||||
switch {
|
||||
case s.h != nil:
|
||||
it.h = s.h
|
||||
return chunkenc.ValHistogram
|
||||
case s.fh != nil:
|
||||
it.fh = s.fh
|
||||
return chunkenc.ValFloatHistogram
|
||||
default:
|
||||
it.v = s.v
|
||||
return chunkenc.ValFloat
|
||||
}
|
||||
}
|
||||
|
||||
func (it *sampleRingIterator) Seek(int64) bool {
|
||||
return false
|
||||
func (it *sampleRingIterator) Seek(int64) chunkenc.ValueType {
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
|
||||
func (it *sampleRingIterator) Err() error {
|
||||
|
@ -194,18 +268,32 @@ func (it *sampleRingIterator) Err() error {
|
|||
}
|
||||
|
||||
func (it *sampleRingIterator) At() (int64, float64) {
|
||||
return it.r.at(it.i)
|
||||
return it.t, it.v
|
||||
}
|
||||
|
||||
func (r *sampleRing) at(i int) (int64, float64) {
|
||||
func (it *sampleRingIterator) AtHistogram() (int64, *histogram.Histogram) {
|
||||
return it.t, it.h
|
||||
}
|
||||
|
||||
func (it *sampleRingIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
|
||||
if it.fh == nil {
|
||||
return it.t, it.h.ToFloat()
|
||||
}
|
||||
return it.t, it.fh
|
||||
}
|
||||
|
||||
func (it *sampleRingIterator) AtT() int64 {
|
||||
return it.t
|
||||
}
|
||||
|
||||
func (r *sampleRing) at(i int) sample {
|
||||
j := (r.f + i) % len(r.buf)
|
||||
s := r.buf[j]
|
||||
return s.t, s.v
|
||||
return r.buf[j]
|
||||
}
|
||||
|
||||
// add adds a sample to the ring buffer and frees all samples that fall
|
||||
// out of the delta range.
|
||||
func (r *sampleRing) add(t int64, v float64) {
|
||||
func (r *sampleRing) add(s sample) {
|
||||
l := len(r.buf)
|
||||
// Grow the ring buffer if it fits no more elements.
|
||||
if l == r.l {
|
||||
|
@ -224,11 +312,11 @@ func (r *sampleRing) add(t int64, v float64) {
|
|||
}
|
||||
}
|
||||
|
||||
r.buf[r.i] = sample{t: t, v: v}
|
||||
r.buf[r.i] = s
|
||||
r.l++
|
||||
|
||||
// Free head of the buffer of samples that just fell out of the range.
|
||||
tmin := t - r.delta
|
||||
tmin := s.t - r.delta
|
||||
for r.buf[r.f].t < tmin {
|
||||
r.f++
|
||||
if r.f >= l {
|
||||
|
@ -264,12 +352,11 @@ func (r *sampleRing) reduceDelta(delta int64) bool {
|
|||
}
|
||||
|
||||
// nthLast returns the nth most recent element added to the ring.
|
||||
func (r *sampleRing) nthLast(n int) (int64, float64, bool) {
|
||||
func (r *sampleRing) nthLast(n int) (sample, bool) {
|
||||
if n > r.l {
|
||||
return 0, 0, false
|
||||
return sample{}, false
|
||||
}
|
||||
t, v := r.at(r.l - n)
|
||||
return t, v, true
|
||||
return r.at(r.l - n), true
|
||||
}
|
||||
|
||||
func (r *sampleRing) samples() []sample {
|
||||
|
|
15
vendor/github.com/prometheus/prometheus/storage/fanout.go
generated
vendored
15
vendor/github.com/prometheus/prometheus/storage/fanout.go
generated
vendored
|
@ -21,6 +21,7 @@ import (
|
|||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/metadata"
|
||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||
|
@ -173,6 +174,20 @@ func (f *fanoutAppender) AppendExemplar(ref SeriesRef, l labels.Labels, e exempl
|
|||
return ref, nil
|
||||
}
|
||||
|
||||
func (f *fanoutAppender) AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (SeriesRef, error) {
|
||||
ref, err := f.primary.AppendHistogram(ref, l, t, h)
|
||||
if err != nil {
|
||||
return ref, err
|
||||
}
|
||||
|
||||
for _, appender := range f.secondaries {
|
||||
if _, err := appender.AppendHistogram(ref, l, t, h); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return ref, nil
|
||||
}
|
||||
|
||||
func (f *fanoutAppender) UpdateMetadata(ref SeriesRef, l labels.Labels, m metadata.Metadata) (SeriesRef, error) {
|
||||
ref, err := f.primary.UpdateMetadata(ref, l, m)
|
||||
if err != nil {
|
||||
|
|
37
vendor/github.com/prometheus/prometheus/storage/interface.go
generated
vendored
37
vendor/github.com/prometheus/prometheus/storage/interface.go
generated
vendored
|
@ -19,6 +19,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/metadata"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
|
@ -35,11 +36,16 @@ var (
|
|||
// ErrTooOldSample is when out of order support is enabled but the sample is outside the time window allowed.
|
||||
ErrTooOldSample = errors.New("too old sample")
|
||||
// ErrDuplicateSampleForTimestamp is when the sample has same timestamp but different value.
|
||||
ErrDuplicateSampleForTimestamp = errors.New("duplicate sample for timestamp")
|
||||
ErrOutOfOrderExemplar = errors.New("out of order exemplar")
|
||||
ErrDuplicateExemplar = errors.New("duplicate exemplar")
|
||||
ErrExemplarLabelLength = fmt.Errorf("label length for exemplar exceeds maximum of %d UTF-8 characters", exemplar.ExemplarMaxLabelSetLength)
|
||||
ErrExemplarsDisabled = fmt.Errorf("exemplar storage is disabled or max exemplars is less than or equal to 0")
|
||||
ErrDuplicateSampleForTimestamp = errors.New("duplicate sample for timestamp")
|
||||
ErrOutOfOrderExemplar = errors.New("out of order exemplar")
|
||||
ErrDuplicateExemplar = errors.New("duplicate exemplar")
|
||||
ErrExemplarLabelLength = fmt.Errorf("label length for exemplar exceeds maximum of %d UTF-8 characters", exemplar.ExemplarMaxLabelSetLength)
|
||||
ErrExemplarsDisabled = fmt.Errorf("exemplar storage is disabled or max exemplars is less than or equal to 0")
|
||||
ErrNativeHistogramsDisabled = fmt.Errorf("native histograms are disabled")
|
||||
ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets")
|
||||
ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative")
|
||||
ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative")
|
||||
ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided")
|
||||
)
|
||||
|
||||
// SeriesRef is a generic series reference. In prometheus it is either a
|
||||
|
@ -207,6 +213,9 @@ func (f QueryableFunc) Querier(ctx context.Context, mint, maxt int64) (Querier,
|
|||
// It must be completed with a call to Commit or Rollback and must not be reused afterwards.
|
||||
//
|
||||
// Operations on the Appender interface are not goroutine-safe.
|
||||
//
|
||||
// The type of samples (float64, histogram, etc) appended for a given series must remain same within an Appender.
|
||||
// The behaviour is undefined if samples of different types are appended to the same series in a single Commit().
|
||||
type Appender interface {
|
||||
// Append adds a sample pair for the given series.
|
||||
// An optional series reference can be provided to accelerate calls.
|
||||
|
@ -227,7 +236,9 @@ type Appender interface {
|
|||
// Rollback rolls back all modifications made in the appender so far.
|
||||
// Appender has to be discarded after rollback.
|
||||
Rollback() error
|
||||
|
||||
ExemplarAppender
|
||||
HistogramAppender
|
||||
MetadataUpdater
|
||||
}
|
||||
|
||||
|
@ -257,6 +268,22 @@ type ExemplarAppender interface {
|
|||
AppendExemplar(ref SeriesRef, l labels.Labels, e exemplar.Exemplar) (SeriesRef, error)
|
||||
}
|
||||
|
||||
// HistogramAppender provides an interface for appending histograms to the storage.
|
||||
type HistogramAppender interface {
|
||||
// AppendHistogram adds a histogram for the given series labels. An
|
||||
// optional reference number can be provided to accelerate calls. A
|
||||
// reference number is returned which can be used to add further
|
||||
// histograms in the same or later transactions. Returned reference
|
||||
// numbers are ephemeral and may be rejected in calls to Append() at any
|
||||
// point. Adding the sample via Append() returns a new reference number.
|
||||
// If the reference is 0, it must not be used for caching.
|
||||
//
|
||||
// For efficiency reasons, the histogram is passed as a
|
||||
// pointer. AppendHistogram won't mutate the histogram, but in turn
|
||||
// depends on the caller to not mutate it either.
|
||||
AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (SeriesRef, error)
|
||||
}
|
||||
|
||||
// MetadataUpdater provides an interface for associating metadata to stored series.
|
||||
type MetadataUpdater interface {
|
||||
// UpdateMetadata updates a metadata entry for the given series and labels.
|
||||
|
|
94
vendor/github.com/prometheus/prometheus/storage/memoized_iterator.go
generated
vendored
94
vendor/github.com/prometheus/prometheus/storage/memoized_iterator.go
generated
vendored
|
@ -16,6 +16,7 @@ package storage
|
|||
import (
|
||||
"math"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
)
|
||||
|
||||
|
@ -24,12 +25,18 @@ type MemoizedSeriesIterator struct {
|
|||
it chunkenc.Iterator
|
||||
delta int64
|
||||
|
||||
lastTime int64
|
||||
ok bool
|
||||
lastTime int64
|
||||
valueType chunkenc.ValueType
|
||||
|
||||
// Keep track of the previously returned value.
|
||||
prevTime int64
|
||||
prevValue float64
|
||||
prevTime int64
|
||||
prevValue float64
|
||||
prevHistogram *histogram.Histogram
|
||||
prevFloatHistogram *histogram.FloatHistogram
|
||||
// TODO(beorn7): MemoizedSeriesIterator is currently only used by the
|
||||
// PromQL engine, which only works with FloatHistograms. For better
|
||||
// performance, we could change MemoizedSeriesIterator to also only
|
||||
// handle FloatHistograms.
|
||||
}
|
||||
|
||||
// NewMemoizedEmptyIterator is like NewMemoizedIterator but it's initialised with an empty iterator.
|
||||
|
@ -53,70 +60,93 @@ func NewMemoizedIterator(it chunkenc.Iterator, delta int64) *MemoizedSeriesItera
|
|||
func (b *MemoizedSeriesIterator) Reset(it chunkenc.Iterator) {
|
||||
b.it = it
|
||||
b.lastTime = math.MinInt64
|
||||
b.ok = true
|
||||
b.prevTime = math.MinInt64
|
||||
it.Next()
|
||||
b.valueType = it.Next()
|
||||
}
|
||||
|
||||
// PeekPrev returns the previous element of the iterator. If there is none buffered,
|
||||
// ok is false.
|
||||
func (b *MemoizedSeriesIterator) PeekPrev() (t int64, v float64, ok bool) {
|
||||
func (b *MemoizedSeriesIterator) PeekPrev() (t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, ok bool) {
|
||||
if b.prevTime == math.MinInt64 {
|
||||
return 0, 0, false
|
||||
return 0, 0, nil, nil, false
|
||||
}
|
||||
return b.prevTime, b.prevValue, true
|
||||
return b.prevTime, b.prevValue, b.prevHistogram, b.prevFloatHistogram, true
|
||||
}
|
||||
|
||||
// Seek advances the iterator to the element at time t or greater.
|
||||
func (b *MemoizedSeriesIterator) Seek(t int64) bool {
|
||||
func (b *MemoizedSeriesIterator) Seek(t int64) chunkenc.ValueType {
|
||||
t0 := t - b.delta
|
||||
|
||||
if b.ok && t0 > b.lastTime {
|
||||
if b.valueType != chunkenc.ValNone && t0 > b.lastTime {
|
||||
// Reset the previously stored element because the seek advanced
|
||||
// more than the delta.
|
||||
b.prevTime = math.MinInt64
|
||||
|
||||
b.ok = b.it.Seek(t0)
|
||||
if !b.ok {
|
||||
return false
|
||||
b.valueType = b.it.Seek(t0)
|
||||
if b.valueType == chunkenc.ValNone {
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
b.lastTime, _ = b.it.At()
|
||||
b.lastTime = b.it.AtT()
|
||||
}
|
||||
|
||||
if b.lastTime >= t {
|
||||
return true
|
||||
return b.valueType
|
||||
}
|
||||
for b.Next() {
|
||||
for b.Next() != chunkenc.ValNone {
|
||||
if b.lastTime >= t {
|
||||
return true
|
||||
return b.valueType
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
|
||||
// Next advances the iterator to the next element.
|
||||
func (b *MemoizedSeriesIterator) Next() bool {
|
||||
if !b.ok {
|
||||
return false
|
||||
}
|
||||
|
||||
func (b *MemoizedSeriesIterator) Next() chunkenc.ValueType {
|
||||
// Keep track of the previous element.
|
||||
b.prevTime, b.prevValue = b.it.At()
|
||||
|
||||
b.ok = b.it.Next()
|
||||
if b.ok {
|
||||
b.lastTime, _ = b.it.At()
|
||||
switch b.valueType {
|
||||
case chunkenc.ValNone:
|
||||
return chunkenc.ValNone
|
||||
case chunkenc.ValFloat:
|
||||
b.prevTime, b.prevValue = b.it.At()
|
||||
b.prevHistogram = nil
|
||||
b.prevFloatHistogram = nil
|
||||
case chunkenc.ValHistogram:
|
||||
b.prevValue = 0
|
||||
b.prevTime, b.prevHistogram = b.it.AtHistogram()
|
||||
_, b.prevFloatHistogram = b.it.AtFloatHistogram()
|
||||
case chunkenc.ValFloatHistogram:
|
||||
b.prevValue = 0
|
||||
b.prevHistogram = nil
|
||||
b.prevTime, b.prevFloatHistogram = b.it.AtFloatHistogram()
|
||||
}
|
||||
|
||||
return b.ok
|
||||
b.valueType = b.it.Next()
|
||||
if b.valueType != chunkenc.ValNone {
|
||||
b.lastTime = b.it.AtT()
|
||||
}
|
||||
return b.valueType
|
||||
}
|
||||
|
||||
// At returns the current element of the iterator.
|
||||
// At returns the current float element of the iterator.
|
||||
func (b *MemoizedSeriesIterator) At() (int64, float64) {
|
||||
return b.it.At()
|
||||
}
|
||||
|
||||
// AtHistogram returns the current histogram element of the iterator.
|
||||
func (b *MemoizedSeriesIterator) AtHistogram() (int64, *histogram.Histogram) {
|
||||
return b.it.AtHistogram()
|
||||
}
|
||||
|
||||
// AtFloatHistogram returns the current float-histogram element of the iterator.
|
||||
func (b *MemoizedSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
|
||||
return b.it.AtFloatHistogram()
|
||||
}
|
||||
|
||||
// AtT returns the current timestamp of the iterator.
|
||||
func (b *MemoizedSeriesIterator) AtT() int64 {
|
||||
return b.it.AtT()
|
||||
}
|
||||
|
||||
// Err returns the last encountered error.
|
||||
func (b *MemoizedSeriesIterator) Err() error {
|
||||
return b.it.Err()
|
||||
|
|
84
vendor/github.com/prometheus/prometheus/storage/merge.go
generated
vendored
84
vendor/github.com/prometheus/prometheus/storage/merge.go
generated
vendored
|
@ -18,9 +18,11 @@ import (
|
|||
"container/heap"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
|
@ -240,7 +242,7 @@ func (q *mergeGenericQuerier) LabelNames(matchers ...*labels.Matcher) ([]string,
|
|||
for name := range labelNamesMap {
|
||||
labelNames = append(labelNames, name)
|
||||
}
|
||||
sort.Strings(labelNames)
|
||||
slices.Sort(labelNames)
|
||||
return labelNames, warnings, nil
|
||||
}
|
||||
|
||||
|
@ -441,7 +443,7 @@ type chainSampleIterator struct {
|
|||
h samplesIteratorHeap
|
||||
|
||||
curr chunkenc.Iterator
|
||||
lastt int64
|
||||
lastT int64
|
||||
}
|
||||
|
||||
// NewChainSampleIterator returns a single iterator that iterates over the samples from the given iterators in a sorted
|
||||
|
@ -451,60 +453,82 @@ func NewChainSampleIterator(iterators []chunkenc.Iterator) chunkenc.Iterator {
|
|||
return &chainSampleIterator{
|
||||
iterators: iterators,
|
||||
h: nil,
|
||||
lastt: math.MinInt64,
|
||||
lastT: math.MinInt64,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *chainSampleIterator) Seek(t int64) bool {
|
||||
// No-op check
|
||||
if c.curr != nil && c.lastt >= t {
|
||||
return true
|
||||
func (c *chainSampleIterator) Seek(t int64) chunkenc.ValueType {
|
||||
// No-op check.
|
||||
if c.curr != nil && c.lastT >= t {
|
||||
return c.curr.Seek(c.lastT)
|
||||
}
|
||||
|
||||
c.h = samplesIteratorHeap{}
|
||||
for _, iter := range c.iterators {
|
||||
if iter.Seek(t) {
|
||||
if iter.Seek(t) != chunkenc.ValNone {
|
||||
heap.Push(&c.h, iter)
|
||||
}
|
||||
}
|
||||
if len(c.h) > 0 {
|
||||
c.curr = heap.Pop(&c.h).(chunkenc.Iterator)
|
||||
c.lastt, _ = c.curr.At()
|
||||
return true
|
||||
c.lastT = c.curr.AtT()
|
||||
return c.curr.Seek(c.lastT)
|
||||
}
|
||||
c.curr = nil
|
||||
return false
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
|
||||
func (c *chainSampleIterator) At() (t int64, v float64) {
|
||||
if c.curr == nil {
|
||||
panic("chainSampleIterator.At() called before first .Next() or after .Next() returned false.")
|
||||
panic("chainSampleIterator.At called before first .Next or after .Next returned false.")
|
||||
}
|
||||
return c.curr.At()
|
||||
}
|
||||
|
||||
func (c *chainSampleIterator) Next() bool {
|
||||
func (c *chainSampleIterator) AtHistogram() (int64, *histogram.Histogram) {
|
||||
if c.curr == nil {
|
||||
panic("chainSampleIterator.AtHistogram called before first .Next or after .Next returned false.")
|
||||
}
|
||||
return c.curr.AtHistogram()
|
||||
}
|
||||
|
||||
func (c *chainSampleIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
|
||||
if c.curr == nil {
|
||||
panic("chainSampleIterator.AtFloatHistogram called before first .Next or after .Next returned false.")
|
||||
}
|
||||
return c.curr.AtFloatHistogram()
|
||||
}
|
||||
|
||||
func (c *chainSampleIterator) AtT() int64 {
|
||||
if c.curr == nil {
|
||||
panic("chainSampleIterator.AtT called before first .Next or after .Next returned false.")
|
||||
}
|
||||
return c.curr.AtT()
|
||||
}
|
||||
|
||||
func (c *chainSampleIterator) Next() chunkenc.ValueType {
|
||||
if c.h == nil {
|
||||
c.h = samplesIteratorHeap{}
|
||||
// We call c.curr.Next() as the first thing below.
|
||||
// So, we don't call Next() on it here.
|
||||
c.curr = c.iterators[0]
|
||||
for _, iter := range c.iterators[1:] {
|
||||
if iter.Next() {
|
||||
if iter.Next() != chunkenc.ValNone {
|
||||
heap.Push(&c.h, iter)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if c.curr == nil {
|
||||
return false
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
|
||||
var currt int64
|
||||
var currT int64
|
||||
var currValueType chunkenc.ValueType
|
||||
for {
|
||||
if c.curr.Next() {
|
||||
currt, _ = c.curr.At()
|
||||
if currt == c.lastt {
|
||||
currValueType = c.curr.Next()
|
||||
if currValueType != chunkenc.ValNone {
|
||||
currT = c.curr.AtT()
|
||||
if currT == c.lastT {
|
||||
// Ignoring sample for the same timestamp.
|
||||
continue
|
||||
}
|
||||
|
@ -515,7 +539,8 @@ func (c *chainSampleIterator) Next() bool {
|
|||
}
|
||||
|
||||
// Check current iterator with the top of the heap.
|
||||
if nextt, _ := c.h[0].At(); currt < nextt {
|
||||
nextT := c.h[0].AtT()
|
||||
if currT < nextT {
|
||||
// Current iterator has smaller timestamp than the heap.
|
||||
break
|
||||
}
|
||||
|
@ -524,18 +549,19 @@ func (c *chainSampleIterator) Next() bool {
|
|||
} else if len(c.h) == 0 {
|
||||
// No iterator left to iterate.
|
||||
c.curr = nil
|
||||
return false
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
|
||||
c.curr = heap.Pop(&c.h).(chunkenc.Iterator)
|
||||
currt, _ = c.curr.At()
|
||||
if currt != c.lastt {
|
||||
currT = c.curr.AtT()
|
||||
currValueType = c.curr.Seek(currT)
|
||||
if currT != c.lastT {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
c.lastt = currt
|
||||
return true
|
||||
c.lastT = currT
|
||||
return currValueType
|
||||
}
|
||||
|
||||
func (c *chainSampleIterator) Err() error {
|
||||
|
@ -552,9 +578,7 @@ func (h samplesIteratorHeap) Len() int { return len(h) }
|
|||
func (h samplesIteratorHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
|
||||
|
||||
func (h samplesIteratorHeap) Less(i, j int) bool {
|
||||
at, _ := h[i].At()
|
||||
bt, _ := h[j].At()
|
||||
return at < bt
|
||||
return h[i].AtT() < h[j].AtT()
|
||||
}
|
||||
|
||||
func (h *samplesIteratorHeap) Push(x interface{}) {
|
||||
|
|
102
vendor/github.com/prometheus/prometheus/storage/remote/codec.go
generated
vendored
102
vendor/github.com/prometheus/prometheus/storage/remote/codec.go
generated
vendored
|
@ -26,6 +26,7 @@ import (
|
|||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/textparse"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
|
@ -118,7 +119,8 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult,
|
|||
iter := series.Iterator()
|
||||
samples := []prompb.Sample{}
|
||||
|
||||
for iter.Next() {
|
||||
for iter.Next() == chunkenc.ValFloat {
|
||||
// TODO(beorn7): Add Histogram support.
|
||||
numSamples++
|
||||
if sampleLimit > 0 && numSamples > sampleLimit {
|
||||
return nil, ss.Warnings(), HTTPError{
|
||||
|
@ -355,37 +357,65 @@ func newConcreteSeriersIterator(series *concreteSeries) chunkenc.Iterator {
|
|||
}
|
||||
|
||||
// Seek implements storage.SeriesIterator.
|
||||
func (c *concreteSeriesIterator) Seek(t int64) bool {
|
||||
func (c *concreteSeriesIterator) Seek(t int64) chunkenc.ValueType {
|
||||
if c.cur == -1 {
|
||||
c.cur = 0
|
||||
}
|
||||
if c.cur >= len(c.series.samples) {
|
||||
return false
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
// No-op check.
|
||||
if s := c.series.samples[c.cur]; s.Timestamp >= t {
|
||||
return true
|
||||
return chunkenc.ValFloat
|
||||
}
|
||||
// Do binary search between current position and end.
|
||||
c.cur += sort.Search(len(c.series.samples)-c.cur, func(n int) bool {
|
||||
return c.series.samples[n+c.cur].Timestamp >= t
|
||||
})
|
||||
return c.cur < len(c.series.samples)
|
||||
if c.cur < len(c.series.samples) {
|
||||
return chunkenc.ValFloat
|
||||
}
|
||||
return chunkenc.ValNone
|
||||
// TODO(beorn7): Add histogram support.
|
||||
}
|
||||
|
||||
// At implements storage.SeriesIterator.
|
||||
// At implements chunkenc.Iterator.
|
||||
func (c *concreteSeriesIterator) At() (t int64, v float64) {
|
||||
s := c.series.samples[c.cur]
|
||||
return s.Timestamp, s.Value
|
||||
}
|
||||
|
||||
// Next implements storage.SeriesIterator.
|
||||
func (c *concreteSeriesIterator) Next() bool {
|
||||
c.cur++
|
||||
return c.cur < len(c.series.samples)
|
||||
// AtHistogram always returns (0, nil) because there is no support for histogram
|
||||
// values yet.
|
||||
// TODO(beorn7): Fix that for histogram support in remote storage.
|
||||
func (c *concreteSeriesIterator) AtHistogram() (int64, *histogram.Histogram) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Err implements storage.SeriesIterator.
|
||||
// AtFloatHistogram always returns (0, nil) because there is no support for histogram
|
||||
// values yet.
|
||||
// TODO(beorn7): Fix that for histogram support in remote storage.
|
||||
func (c *concreteSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// AtT implements chunkenc.Iterator.
|
||||
func (c *concreteSeriesIterator) AtT() int64 {
|
||||
s := c.series.samples[c.cur]
|
||||
return s.Timestamp
|
||||
}
|
||||
|
||||
// Next implements chunkenc.Iterator.
|
||||
func (c *concreteSeriesIterator) Next() chunkenc.ValueType {
|
||||
c.cur++
|
||||
if c.cur < len(c.series.samples) {
|
||||
return chunkenc.ValFloat
|
||||
}
|
||||
return chunkenc.ValNone
|
||||
// TODO(beorn7): Add histogram support.
|
||||
}
|
||||
|
||||
// Err implements chunkenc.Iterator.
|
||||
func (c *concreteSeriesIterator) Err() error {
|
||||
return nil
|
||||
}
|
||||
|
@ -472,6 +502,56 @@ func exemplarProtoToExemplar(ep prompb.Exemplar) exemplar.Exemplar {
|
|||
}
|
||||
}
|
||||
|
||||
// HistogramProtoToHistogram extracts a (normal integer) Histogram from the
|
||||
// provided proto message. The caller has to make sure that the proto message
|
||||
// represents an interger histogram and not a float histogram.
|
||||
func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram {
|
||||
return &histogram.Histogram{
|
||||
Schema: hp.Schema,
|
||||
ZeroThreshold: hp.ZeroThreshold,
|
||||
ZeroCount: hp.GetZeroCountInt(),
|
||||
Count: hp.GetCountInt(),
|
||||
Sum: hp.Sum,
|
||||
PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()),
|
||||
PositiveBuckets: hp.GetPositiveDeltas(),
|
||||
NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
|
||||
NegativeBuckets: hp.GetNegativeDeltas(),
|
||||
}
|
||||
}
|
||||
|
||||
func spansProtoToSpans(s []*prompb.BucketSpan) []histogram.Span {
|
||||
spans := make([]histogram.Span, len(s))
|
||||
for i := 0; i < len(s); i++ {
|
||||
spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
|
||||
}
|
||||
|
||||
return spans
|
||||
}
|
||||
|
||||
func HistogramToHistogramProto(timestamp int64, h *histogram.Histogram) prompb.Histogram {
|
||||
return prompb.Histogram{
|
||||
Count: &prompb.Histogram_CountInt{CountInt: h.Count},
|
||||
Sum: h.Sum,
|
||||
Schema: h.Schema,
|
||||
ZeroThreshold: h.ZeroThreshold,
|
||||
ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount},
|
||||
NegativeSpans: spansToSpansProto(h.NegativeSpans),
|
||||
NegativeDeltas: h.NegativeBuckets,
|
||||
PositiveSpans: spansToSpansProto(h.PositiveSpans),
|
||||
PositiveDeltas: h.PositiveBuckets,
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
func spansToSpansProto(s []histogram.Span) []*prompb.BucketSpan {
|
||||
spans := make([]*prompb.BucketSpan, len(s))
|
||||
for i := 0; i < len(s); i++ {
|
||||
spans[i] = &prompb.BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
|
||||
}
|
||||
|
||||
return spans
|
||||
}
|
||||
|
||||
// LabelProtosToMetric unpack a []*prompb.Label to a model.Metric
|
||||
func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric {
|
||||
metric := make(model.Metric, len(labelPairs))
|
||||
|
|
318
vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go
generated
vendored
318
vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go
generated
vendored
|
@ -32,13 +32,14 @@ import (
|
|||
"go.uber.org/atomic"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/relabel"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
"github.com/prometheus/prometheus/scrape"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/prometheus/tsdb/record"
|
||||
"github.com/prometheus/prometheus/tsdb/wal"
|
||||
"github.com/prometheus/prometheus/tsdb/wlog"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -54,30 +55,35 @@ const (
|
|||
type queueManagerMetrics struct {
|
||||
reg prometheus.Registerer
|
||||
|
||||
samplesTotal prometheus.Counter
|
||||
exemplarsTotal prometheus.Counter
|
||||
metadataTotal prometheus.Counter
|
||||
failedSamplesTotal prometheus.Counter
|
||||
failedExemplarsTotal prometheus.Counter
|
||||
failedMetadataTotal prometheus.Counter
|
||||
retriedSamplesTotal prometheus.Counter
|
||||
retriedExemplarsTotal prometheus.Counter
|
||||
retriedMetadataTotal prometheus.Counter
|
||||
droppedSamplesTotal prometheus.Counter
|
||||
droppedExemplarsTotal prometheus.Counter
|
||||
enqueueRetriesTotal prometheus.Counter
|
||||
sentBatchDuration prometheus.Histogram
|
||||
highestSentTimestamp *maxTimestamp
|
||||
pendingSamples prometheus.Gauge
|
||||
pendingExemplars prometheus.Gauge
|
||||
shardCapacity prometheus.Gauge
|
||||
numShards prometheus.Gauge
|
||||
maxNumShards prometheus.Gauge
|
||||
minNumShards prometheus.Gauge
|
||||
desiredNumShards prometheus.Gauge
|
||||
sentBytesTotal prometheus.Counter
|
||||
metadataBytesTotal prometheus.Counter
|
||||
maxSamplesPerSend prometheus.Gauge
|
||||
samplesTotal prometheus.Counter
|
||||
exemplarsTotal prometheus.Counter
|
||||
histogramsTotal prometheus.Counter
|
||||
metadataTotal prometheus.Counter
|
||||
failedSamplesTotal prometheus.Counter
|
||||
failedExemplarsTotal prometheus.Counter
|
||||
failedHistogramsTotal prometheus.Counter
|
||||
failedMetadataTotal prometheus.Counter
|
||||
retriedSamplesTotal prometheus.Counter
|
||||
retriedExemplarsTotal prometheus.Counter
|
||||
retriedHistogramsTotal prometheus.Counter
|
||||
retriedMetadataTotal prometheus.Counter
|
||||
droppedSamplesTotal prometheus.Counter
|
||||
droppedExemplarsTotal prometheus.Counter
|
||||
droppedHistogramsTotal prometheus.Counter
|
||||
enqueueRetriesTotal prometheus.Counter
|
||||
sentBatchDuration prometheus.Histogram
|
||||
highestSentTimestamp *maxTimestamp
|
||||
pendingSamples prometheus.Gauge
|
||||
pendingExemplars prometheus.Gauge
|
||||
pendingHistograms prometheus.Gauge
|
||||
shardCapacity prometheus.Gauge
|
||||
numShards prometheus.Gauge
|
||||
maxNumShards prometheus.Gauge
|
||||
minNumShards prometheus.Gauge
|
||||
desiredNumShards prometheus.Gauge
|
||||
sentBytesTotal prometheus.Counter
|
||||
metadataBytesTotal prometheus.Counter
|
||||
maxSamplesPerSend prometheus.Gauge
|
||||
}
|
||||
|
||||
func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManagerMetrics {
|
||||
|
@ -103,6 +109,13 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager
|
|||
Help: "Total number of exemplars sent to remote storage.",
|
||||
ConstLabels: constLabels,
|
||||
})
|
||||
m.histogramsTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "histograms_total",
|
||||
Help: "Total number of histograms sent to remote storage.",
|
||||
ConstLabels: constLabels,
|
||||
})
|
||||
m.metadataTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
|
@ -124,6 +137,13 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager
|
|||
Help: "Total number of exemplars which failed on send to remote storage, non-recoverable errors.",
|
||||
ConstLabels: constLabels,
|
||||
})
|
||||
m.failedHistogramsTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "histograms_failed_total",
|
||||
Help: "Total number of histograms which failed on send to remote storage, non-recoverable errors.",
|
||||
ConstLabels: constLabels,
|
||||
})
|
||||
m.failedMetadataTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
|
@ -145,6 +165,13 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager
|
|||
Help: "Total number of exemplars which failed on send to remote storage but were retried because the send error was recoverable.",
|
||||
ConstLabels: constLabels,
|
||||
})
|
||||
m.retriedHistogramsTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "histograms_retried_total",
|
||||
Help: "Total number of histograms which failed on send to remote storage but were retried because the send error was recoverable.",
|
||||
ConstLabels: constLabels,
|
||||
})
|
||||
m.retriedMetadataTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
|
@ -166,6 +193,13 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager
|
|||
Help: "Total number of exemplars which were dropped after being read from the WAL before being sent via remote write, either via relabelling or unintentionally because of an unknown reference ID.",
|
||||
ConstLabels: constLabels,
|
||||
})
|
||||
m.droppedHistogramsTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "histograms_dropped_total",
|
||||
Help: "Total number of histograms which were dropped after being read from the WAL before being sent via remote write, either via relabelling or unintentionally because of an unknown reference ID.",
|
||||
ConstLabels: constLabels,
|
||||
})
|
||||
m.enqueueRetriesTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
|
@ -204,6 +238,13 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager
|
|||
Help: "The number of exemplars pending in the queues shards to be sent to the remote storage.",
|
||||
ConstLabels: constLabels,
|
||||
})
|
||||
m.pendingHistograms = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "histograms_pending",
|
||||
Help: "The number of histograms pending in the queues shards to be sent to the remote storage.",
|
||||
ConstLabels: constLabels,
|
||||
})
|
||||
m.shardCapacity = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
|
@ -269,20 +310,25 @@ func (m *queueManagerMetrics) register() {
|
|||
m.reg.MustRegister(
|
||||
m.samplesTotal,
|
||||
m.exemplarsTotal,
|
||||
m.histogramsTotal,
|
||||
m.metadataTotal,
|
||||
m.failedSamplesTotal,
|
||||
m.failedExemplarsTotal,
|
||||
m.failedHistogramsTotal,
|
||||
m.failedMetadataTotal,
|
||||
m.retriedSamplesTotal,
|
||||
m.retriedExemplarsTotal,
|
||||
m.retriedHistogramsTotal,
|
||||
m.retriedMetadataTotal,
|
||||
m.droppedSamplesTotal,
|
||||
m.droppedExemplarsTotal,
|
||||
m.droppedHistogramsTotal,
|
||||
m.enqueueRetriesTotal,
|
||||
m.sentBatchDuration,
|
||||
m.highestSentTimestamp,
|
||||
m.pendingSamples,
|
||||
m.pendingExemplars,
|
||||
m.pendingHistograms,
|
||||
m.shardCapacity,
|
||||
m.numShards,
|
||||
m.maxNumShards,
|
||||
|
@ -299,20 +345,25 @@ func (m *queueManagerMetrics) unregister() {
|
|||
if m.reg != nil {
|
||||
m.reg.Unregister(m.samplesTotal)
|
||||
m.reg.Unregister(m.exemplarsTotal)
|
||||
m.reg.Unregister(m.histogramsTotal)
|
||||
m.reg.Unregister(m.metadataTotal)
|
||||
m.reg.Unregister(m.failedSamplesTotal)
|
||||
m.reg.Unregister(m.failedExemplarsTotal)
|
||||
m.reg.Unregister(m.failedHistogramsTotal)
|
||||
m.reg.Unregister(m.failedMetadataTotal)
|
||||
m.reg.Unregister(m.retriedSamplesTotal)
|
||||
m.reg.Unregister(m.retriedExemplarsTotal)
|
||||
m.reg.Unregister(m.retriedHistogramsTotal)
|
||||
m.reg.Unregister(m.retriedMetadataTotal)
|
||||
m.reg.Unregister(m.droppedSamplesTotal)
|
||||
m.reg.Unregister(m.droppedExemplarsTotal)
|
||||
m.reg.Unregister(m.droppedHistogramsTotal)
|
||||
m.reg.Unregister(m.enqueueRetriesTotal)
|
||||
m.reg.Unregister(m.sentBatchDuration)
|
||||
m.reg.Unregister(m.highestSentTimestamp)
|
||||
m.reg.Unregister(m.pendingSamples)
|
||||
m.reg.Unregister(m.pendingExemplars)
|
||||
m.reg.Unregister(m.pendingHistograms)
|
||||
m.reg.Unregister(m.shardCapacity)
|
||||
m.reg.Unregister(m.numShards)
|
||||
m.reg.Unregister(m.maxNumShards)
|
||||
|
@ -341,15 +392,16 @@ type WriteClient interface {
|
|||
type QueueManager struct {
|
||||
lastSendTimestamp atomic.Int64
|
||||
|
||||
logger log.Logger
|
||||
flushDeadline time.Duration
|
||||
cfg config.QueueConfig
|
||||
mcfg config.MetadataConfig
|
||||
externalLabels labels.Labels
|
||||
relabelConfigs []*relabel.Config
|
||||
sendExemplars bool
|
||||
watcher *wal.Watcher
|
||||
metadataWatcher *MetadataWatcher
|
||||
logger log.Logger
|
||||
flushDeadline time.Duration
|
||||
cfg config.QueueConfig
|
||||
mcfg config.MetadataConfig
|
||||
externalLabels labels.Labels
|
||||
relabelConfigs []*relabel.Config
|
||||
sendExemplars bool
|
||||
sendNativeHistograms bool
|
||||
watcher *wlog.Watcher
|
||||
metadataWatcher *MetadataWatcher
|
||||
|
||||
clientMtx sync.RWMutex
|
||||
storeClient WriteClient
|
||||
|
@ -381,8 +433,8 @@ type QueueManager struct {
|
|||
// the WAL directory will be constructed as <dir>/wal.
|
||||
func NewQueueManager(
|
||||
metrics *queueManagerMetrics,
|
||||
watcherMetrics *wal.WatcherMetrics,
|
||||
readerMetrics *wal.LiveReaderMetrics,
|
||||
watcherMetrics *wlog.WatcherMetrics,
|
||||
readerMetrics *wlog.LiveReaderMetrics,
|
||||
logger log.Logger,
|
||||
dir string,
|
||||
samplesIn *ewmaRate,
|
||||
|
@ -396,6 +448,7 @@ func NewQueueManager(
|
|||
highestRecvTimestamp *maxTimestamp,
|
||||
sm ReadyScrapeManager,
|
||||
enableExemplarRemoteWrite bool,
|
||||
enableNativeHistogramRemoteWrite bool,
|
||||
) *QueueManager {
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
|
@ -403,14 +456,15 @@ func NewQueueManager(
|
|||
|
||||
logger = log.With(logger, remoteName, client.Name(), endpoint, client.Endpoint())
|
||||
t := &QueueManager{
|
||||
logger: logger,
|
||||
flushDeadline: flushDeadline,
|
||||
cfg: cfg,
|
||||
mcfg: mCfg,
|
||||
externalLabels: externalLabels,
|
||||
relabelConfigs: relabelConfigs,
|
||||
storeClient: client,
|
||||
sendExemplars: enableExemplarRemoteWrite,
|
||||
logger: logger,
|
||||
flushDeadline: flushDeadline,
|
||||
cfg: cfg,
|
||||
mcfg: mCfg,
|
||||
externalLabels: externalLabels,
|
||||
relabelConfigs: relabelConfigs,
|
||||
storeClient: client,
|
||||
sendExemplars: enableExemplarRemoteWrite,
|
||||
sendNativeHistograms: enableNativeHistogramRemoteWrite,
|
||||
|
||||
seriesLabels: make(map[chunks.HeadSeriesRef]labels.Labels),
|
||||
seriesSegmentIndexes: make(map[chunks.HeadSeriesRef]int),
|
||||
|
@ -430,7 +484,7 @@ func NewQueueManager(
|
|||
highestRecvTimestamp: highestRecvTimestamp,
|
||||
}
|
||||
|
||||
t.watcher = wal.NewWatcher(watcherMetrics, readerMetrics, logger, client.Name(), t, dir, enableExemplarRemoteWrite)
|
||||
t.watcher = wlog.NewWatcher(watcherMetrics, readerMetrics, logger, client.Name(), t, dir, enableExemplarRemoteWrite, enableNativeHistogramRemoteWrite)
|
||||
if t.mcfg.Send {
|
||||
t.metadataWatcher = NewMetadataWatcher(logger, sm, client.Name(), t, t.mcfg.SendInterval, flushDeadline)
|
||||
}
|
||||
|
@ -538,11 +592,11 @@ outer:
|
|||
return false
|
||||
default:
|
||||
}
|
||||
if t.shards.enqueue(s.Ref, sampleOrExemplar{
|
||||
if t.shards.enqueue(s.Ref, timeSeries{
|
||||
seriesLabels: lbls,
|
||||
timestamp: s.T,
|
||||
value: s.V,
|
||||
isSample: true,
|
||||
sType: tSample,
|
||||
}) {
|
||||
continue outer
|
||||
}
|
||||
|
@ -588,11 +642,59 @@ outer:
|
|||
return false
|
||||
default:
|
||||
}
|
||||
if t.shards.enqueue(e.Ref, sampleOrExemplar{
|
||||
if t.shards.enqueue(e.Ref, timeSeries{
|
||||
seriesLabels: lbls,
|
||||
timestamp: e.T,
|
||||
value: e.V,
|
||||
exemplarLabels: e.Labels,
|
||||
sType: tExemplar,
|
||||
}) {
|
||||
continue outer
|
||||
}
|
||||
|
||||
t.metrics.enqueueRetriesTotal.Inc()
|
||||
time.Sleep(time.Duration(backoff))
|
||||
backoff = backoff * 2
|
||||
if backoff > t.cfg.MaxBackoff {
|
||||
backoff = t.cfg.MaxBackoff
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (t *QueueManager) AppendHistograms(histograms []record.RefHistogramSample) bool {
|
||||
if !t.sendNativeHistograms {
|
||||
return true
|
||||
}
|
||||
|
||||
outer:
|
||||
for _, h := range histograms {
|
||||
t.seriesMtx.Lock()
|
||||
lbls, ok := t.seriesLabels[h.Ref]
|
||||
if !ok {
|
||||
t.metrics.droppedHistogramsTotal.Inc()
|
||||
t.dataDropped.incr(1)
|
||||
if _, ok := t.droppedSeries[h.Ref]; !ok {
|
||||
level.Info(t.logger).Log("msg", "Dropped histogram for series that was not explicitly dropped via relabelling", "ref", h.Ref)
|
||||
}
|
||||
t.seriesMtx.Unlock()
|
||||
continue
|
||||
}
|
||||
t.seriesMtx.Unlock()
|
||||
|
||||
backoff := model.Duration(5 * time.Millisecond)
|
||||
for {
|
||||
select {
|
||||
case <-t.quit:
|
||||
return false
|
||||
default:
|
||||
}
|
||||
if t.shards.enqueue(h.Ref, timeSeries{
|
||||
seriesLabels: lbls,
|
||||
timestamp: h.T,
|
||||
histogram: h.H,
|
||||
sType: tHistogram,
|
||||
}) {
|
||||
continue outer
|
||||
}
|
||||
|
@ -921,8 +1023,9 @@ type shards struct {
|
|||
qm *QueueManager
|
||||
queues []*queue
|
||||
// So we can accurately track how many of each are lost during shard shutdowns.
|
||||
enqueuedSamples atomic.Int64
|
||||
enqueuedExemplars atomic.Int64
|
||||
enqueuedSamples atomic.Int64
|
||||
enqueuedExemplars atomic.Int64
|
||||
enqueuedHistograms atomic.Int64
|
||||
|
||||
// Emulate a wait group with a channel and an atomic int, as you
|
||||
// cannot select on a wait group.
|
||||
|
@ -934,9 +1037,10 @@ type shards struct {
|
|||
|
||||
// Hard shutdown context is used to terminate outgoing HTTP connections
|
||||
// after giving them a chance to terminate.
|
||||
hardShutdown context.CancelFunc
|
||||
samplesDroppedOnHardShutdown atomic.Uint32
|
||||
exemplarsDroppedOnHardShutdown atomic.Uint32
|
||||
hardShutdown context.CancelFunc
|
||||
samplesDroppedOnHardShutdown atomic.Uint32
|
||||
exemplarsDroppedOnHardShutdown atomic.Uint32
|
||||
histogramsDroppedOnHardShutdown atomic.Uint32
|
||||
}
|
||||
|
||||
// start the shards; must be called before any call to enqueue.
|
||||
|
@ -961,8 +1065,10 @@ func (s *shards) start(n int) {
|
|||
s.done = make(chan struct{})
|
||||
s.enqueuedSamples.Store(0)
|
||||
s.enqueuedExemplars.Store(0)
|
||||
s.enqueuedHistograms.Store(0)
|
||||
s.samplesDroppedOnHardShutdown.Store(0)
|
||||
s.exemplarsDroppedOnHardShutdown.Store(0)
|
||||
s.histogramsDroppedOnHardShutdown.Store(0)
|
||||
for i := 0; i < n; i++ {
|
||||
go s.runShard(hardShutdownCtx, i, newQueues[i])
|
||||
}
|
||||
|
@ -1008,7 +1114,7 @@ func (s *shards) stop() {
|
|||
// retry. A shard is full when its configured capacity has been reached,
|
||||
// specifically, when s.queues[shard] has filled its batchQueue channel and the
|
||||
// partial batch has also been filled.
|
||||
func (s *shards) enqueue(ref chunks.HeadSeriesRef, data sampleOrExemplar) bool {
|
||||
func (s *shards) enqueue(ref chunks.HeadSeriesRef, data timeSeries) bool {
|
||||
s.mtx.RLock()
|
||||
defer s.mtx.RUnlock()
|
||||
|
||||
|
@ -1021,12 +1127,16 @@ func (s *shards) enqueue(ref chunks.HeadSeriesRef, data sampleOrExemplar) bool {
|
|||
if !appended {
|
||||
return false
|
||||
}
|
||||
if data.isSample {
|
||||
switch data.sType {
|
||||
case tSample:
|
||||
s.qm.metrics.pendingSamples.Inc()
|
||||
s.enqueuedSamples.Inc()
|
||||
} else {
|
||||
case tExemplar:
|
||||
s.qm.metrics.pendingExemplars.Inc()
|
||||
s.enqueuedExemplars.Inc()
|
||||
case tHistogram:
|
||||
s.qm.metrics.pendingHistograms.Inc()
|
||||
s.enqueuedHistograms.Inc()
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
@ -1035,24 +1145,34 @@ func (s *shards) enqueue(ref chunks.HeadSeriesRef, data sampleOrExemplar) bool {
|
|||
type queue struct {
|
||||
// batchMtx covers operations appending to or publishing the partial batch.
|
||||
batchMtx sync.Mutex
|
||||
batch []sampleOrExemplar
|
||||
batchQueue chan []sampleOrExemplar
|
||||
batch []timeSeries
|
||||
batchQueue chan []timeSeries
|
||||
|
||||
// Since we know there are a limited number of batches out, using a stack
|
||||
// is easy and safe so a sync.Pool is not necessary.
|
||||
// poolMtx covers adding and removing batches from the batchPool.
|
||||
poolMtx sync.Mutex
|
||||
batchPool [][]sampleOrExemplar
|
||||
batchPool [][]timeSeries
|
||||
}
|
||||
|
||||
type sampleOrExemplar struct {
|
||||
type timeSeries struct {
|
||||
seriesLabels labels.Labels
|
||||
value float64
|
||||
histogram *histogram.Histogram
|
||||
timestamp int64
|
||||
exemplarLabels labels.Labels
|
||||
isSample bool
|
||||
// The type of series: sample, exemplar, or histogram.
|
||||
sType seriesType
|
||||
}
|
||||
|
||||
type seriesType int
|
||||
|
||||
const (
|
||||
tSample seriesType = iota
|
||||
tExemplar
|
||||
tHistogram
|
||||
)
|
||||
|
||||
func newQueue(batchSize, capacity int) *queue {
|
||||
batches := capacity / batchSize
|
||||
// Always create an unbuffered channel even if capacity is configured to be
|
||||
|
@ -1061,17 +1181,17 @@ func newQueue(batchSize, capacity int) *queue {
|
|||
batches = 1
|
||||
}
|
||||
return &queue{
|
||||
batch: make([]sampleOrExemplar, 0, batchSize),
|
||||
batchQueue: make(chan []sampleOrExemplar, batches),
|
||||
batch: make([]timeSeries, 0, batchSize),
|
||||
batchQueue: make(chan []timeSeries, batches),
|
||||
// batchPool should have capacity for everything in the channel + 1 for
|
||||
// the batch being processed.
|
||||
batchPool: make([][]sampleOrExemplar, 0, batches+1),
|
||||
batchPool: make([][]timeSeries, 0, batches+1),
|
||||
}
|
||||
}
|
||||
|
||||
// Append the sampleOrExemplar to the buffered batch. Returns false if it
|
||||
// Append the timeSeries to the buffered batch. Returns false if it
|
||||
// cannot be added and must be retried.
|
||||
func (q *queue) Append(datum sampleOrExemplar) bool {
|
||||
func (q *queue) Append(datum timeSeries) bool {
|
||||
q.batchMtx.Lock()
|
||||
defer q.batchMtx.Unlock()
|
||||
q.batch = append(q.batch, datum)
|
||||
|
@ -1089,12 +1209,12 @@ func (q *queue) Append(datum sampleOrExemplar) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (q *queue) Chan() <-chan []sampleOrExemplar {
|
||||
func (q *queue) Chan() <-chan []timeSeries {
|
||||
return q.batchQueue
|
||||
}
|
||||
|
||||
// Batch returns the current batch and allocates a new batch.
|
||||
func (q *queue) Batch() []sampleOrExemplar {
|
||||
func (q *queue) Batch() []timeSeries {
|
||||
q.batchMtx.Lock()
|
||||
defer q.batchMtx.Unlock()
|
||||
|
||||
|
@ -1109,7 +1229,7 @@ func (q *queue) Batch() []sampleOrExemplar {
|
|||
}
|
||||
|
||||
// ReturnForReuse adds the batch buffer back to the internal pool.
|
||||
func (q *queue) ReturnForReuse(batch []sampleOrExemplar) {
|
||||
func (q *queue) ReturnForReuse(batch []timeSeries) {
|
||||
q.poolMtx.Lock()
|
||||
defer q.poolMtx.Unlock()
|
||||
if len(q.batchPool) < cap(q.batchPool) {
|
||||
|
@ -1149,7 +1269,7 @@ func (q *queue) tryEnqueueingBatch(done <-chan struct{}) bool {
|
|||
}
|
||||
}
|
||||
|
||||
func (q *queue) newBatch(capacity int) []sampleOrExemplar {
|
||||
func (q *queue) newBatch(capacity int) []timeSeries {
|
||||
q.poolMtx.Lock()
|
||||
defer q.poolMtx.Unlock()
|
||||
batches := len(q.batchPool)
|
||||
|
@ -1158,7 +1278,7 @@ func (q *queue) newBatch(capacity int) []sampleOrExemplar {
|
|||
q.batchPool = q.batchPool[:batches-1]
|
||||
return batch
|
||||
}
|
||||
return make([]sampleOrExemplar, 0, capacity)
|
||||
return make([]timeSeries, 0, capacity)
|
||||
}
|
||||
|
||||
func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
||||
|
@ -1209,22 +1329,26 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
|||
// Remove them from pending and mark them as failed.
|
||||
droppedSamples := int(s.enqueuedSamples.Load())
|
||||
droppedExemplars := int(s.enqueuedExemplars.Load())
|
||||
droppedHistograms := int(s.enqueuedHistograms.Load())
|
||||
s.qm.metrics.pendingSamples.Sub(float64(droppedSamples))
|
||||
s.qm.metrics.pendingExemplars.Sub(float64(droppedExemplars))
|
||||
s.qm.metrics.pendingHistograms.Sub(float64(droppedHistograms))
|
||||
s.qm.metrics.failedSamplesTotal.Add(float64(droppedSamples))
|
||||
s.qm.metrics.failedExemplarsTotal.Add(float64(droppedExemplars))
|
||||
s.qm.metrics.failedHistogramsTotal.Add(float64(droppedHistograms))
|
||||
s.samplesDroppedOnHardShutdown.Add(uint32(droppedSamples))
|
||||
s.exemplarsDroppedOnHardShutdown.Add(uint32(droppedExemplars))
|
||||
s.histogramsDroppedOnHardShutdown.Add(uint32(droppedHistograms))
|
||||
return
|
||||
|
||||
case batch, ok := <-batchQueue:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
nPendingSamples, nPendingExemplars := s.populateTimeSeries(batch, pendingData)
|
||||
nPendingSamples, nPendingExemplars, nPendingHistograms := s.populateTimeSeries(batch, pendingData)
|
||||
queue.ReturnForReuse(batch)
|
||||
n := nPendingSamples + nPendingExemplars
|
||||
s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, pBuf, &buf)
|
||||
n := nPendingSamples + nPendingExemplars + nPendingHistograms
|
||||
s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
|
||||
|
||||
stop()
|
||||
timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline))
|
||||
|
@ -1232,10 +1356,10 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
|||
case <-timer.C:
|
||||
batch := queue.Batch()
|
||||
if len(batch) > 0 {
|
||||
nPendingSamples, nPendingExemplars := s.populateTimeSeries(batch, pendingData)
|
||||
n := nPendingSamples + nPendingExemplars
|
||||
nPendingSamples, nPendingExemplars, nPendingHistograms := s.populateTimeSeries(batch, pendingData)
|
||||
n := nPendingSamples + nPendingExemplars + nPendingHistograms
|
||||
level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples, "exemplars", nPendingExemplars, "shard", shardNum)
|
||||
s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, pBuf, &buf)
|
||||
s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
|
||||
}
|
||||
queue.ReturnForReuse(batch)
|
||||
timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline))
|
||||
|
@ -1243,43 +1367,51 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *shards) populateTimeSeries(batch []sampleOrExemplar, pendingData []prompb.TimeSeries) (int, int) {
|
||||
var nPendingSamples, nPendingExemplars int
|
||||
func (s *shards) populateTimeSeries(batch []timeSeries, pendingData []prompb.TimeSeries) (int, int, int) {
|
||||
var nPendingSamples, nPendingExemplars, nPendingHistograms int
|
||||
for nPending, d := range batch {
|
||||
pendingData[nPending].Samples = pendingData[nPending].Samples[:0]
|
||||
if s.qm.sendExemplars {
|
||||
pendingData[nPending].Exemplars = pendingData[nPending].Exemplars[:0]
|
||||
}
|
||||
if s.qm.sendNativeHistograms {
|
||||
pendingData[nPending].Histograms = pendingData[nPending].Histograms[:0]
|
||||
}
|
||||
|
||||
// Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff)
|
||||
// retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll
|
||||
// stop reading from the queue. This makes it safe to reference pendingSamples by index.
|
||||
if d.isSample {
|
||||
pendingData[nPending].Labels = labelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels)
|
||||
pendingData[nPending].Labels = labelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels)
|
||||
switch d.sType {
|
||||
case tSample:
|
||||
pendingData[nPending].Samples = append(pendingData[nPending].Samples, prompb.Sample{
|
||||
Value: d.value,
|
||||
Timestamp: d.timestamp,
|
||||
})
|
||||
nPendingSamples++
|
||||
} else {
|
||||
pendingData[nPending].Labels = labelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels)
|
||||
case tExemplar:
|
||||
pendingData[nPending].Exemplars = append(pendingData[nPending].Exemplars, prompb.Exemplar{
|
||||
Labels: labelsToLabelsProto(d.exemplarLabels, nil),
|
||||
Value: d.value,
|
||||
Timestamp: d.timestamp,
|
||||
})
|
||||
nPendingExemplars++
|
||||
case tHistogram:
|
||||
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, HistogramToHistogramProto(d.timestamp, d.histogram))
|
||||
nPendingHistograms++
|
||||
}
|
||||
}
|
||||
return nPendingSamples, nPendingExemplars
|
||||
return nPendingSamples, nPendingExemplars, nPendingHistograms
|
||||
}
|
||||
|
||||
func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount int, pBuf *proto.Buffer, buf *[]byte) {
|
||||
func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte) {
|
||||
begin := time.Now()
|
||||
err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, pBuf, buf)
|
||||
err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, histogramCount, pBuf, buf)
|
||||
if err != nil {
|
||||
level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "err", err)
|
||||
s.qm.metrics.failedSamplesTotal.Add(float64(sampleCount))
|
||||
s.qm.metrics.failedExemplarsTotal.Add(float64(exemplarCount))
|
||||
s.qm.metrics.failedHistogramsTotal.Add(float64(histogramCount))
|
||||
}
|
||||
|
||||
// These counters are used to calculate the dynamic sharding, and as such
|
||||
|
@ -1287,16 +1419,18 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, s
|
|||
s.qm.dataOut.incr(int64(len(samples)))
|
||||
s.qm.dataOutDuration.incr(int64(time.Since(begin)))
|
||||
s.qm.lastSendTimestamp.Store(time.Now().Unix())
|
||||
// Pending samples/exemplars also should be subtracted as an error means
|
||||
// Pending samples/exemplars/histograms also should be subtracted as an error means
|
||||
// they will not be retried.
|
||||
s.qm.metrics.pendingSamples.Sub(float64(sampleCount))
|
||||
s.qm.metrics.pendingExemplars.Sub(float64(exemplarCount))
|
||||
s.qm.metrics.pendingHistograms.Sub(float64(histogramCount))
|
||||
s.enqueuedSamples.Sub(int64(sampleCount))
|
||||
s.enqueuedExemplars.Sub(int64(exemplarCount))
|
||||
s.enqueuedHistograms.Sub(int64(histogramCount))
|
||||
}
|
||||
|
||||
// sendSamples to the remote storage with backoff for recoverable errors.
|
||||
func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount int, pBuf *proto.Buffer, buf *[]byte) error {
|
||||
func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte) error {
|
||||
// Build the WriteRequest with no metadata.
|
||||
req, highest, err := buildWriteRequest(samples, nil, pBuf, *buf)
|
||||
if err != nil {
|
||||
|
@ -1326,10 +1460,14 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
|
|||
if exemplarCount > 0 {
|
||||
span.SetAttributes(attribute.Int("exemplars", exemplarCount))
|
||||
}
|
||||
if histogramCount > 0 {
|
||||
span.SetAttributes(attribute.Int("histograms", histogramCount))
|
||||
}
|
||||
|
||||
begin := time.Now()
|
||||
s.qm.metrics.samplesTotal.Add(float64(sampleCount))
|
||||
s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount))
|
||||
s.qm.metrics.histogramsTotal.Add(float64(histogramCount))
|
||||
err := s.qm.client().Store(ctx, *buf)
|
||||
s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds())
|
||||
|
||||
|
@ -1344,6 +1482,7 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
|
|||
onRetry := func() {
|
||||
s.qm.metrics.retriedSamplesTotal.Add(float64(sampleCount))
|
||||
s.qm.metrics.retriedExemplarsTotal.Add(float64(exemplarCount))
|
||||
s.qm.metrics.retriedHistogramsTotal.Add(float64(histogramCount))
|
||||
}
|
||||
|
||||
err = sendWriteRequestWithBackoff(ctx, s.qm.cfg, s.qm.logger, attemptStore, onRetry)
|
||||
|
@ -1420,6 +1559,9 @@ func buildWriteRequest(samples []prompb.TimeSeries, metadata []prompb.MetricMeta
|
|||
if len(ts.Exemplars) > 0 && ts.Exemplars[0].Timestamp > highest {
|
||||
highest = ts.Exemplars[0].Timestamp
|
||||
}
|
||||
if len(ts.Histograms) > 0 && ts.Histograms[0].Timestamp > highest {
|
||||
highest = ts.Histograms[0].Timestamp
|
||||
}
|
||||
}
|
||||
|
||||
req := &prompb.WriteRequest{
|
||||
|
|
30
vendor/github.com/prometheus/prometheus/storage/remote/write.go
generated
vendored
30
vendor/github.com/prometheus/prometheus/storage/remote/write.go
generated
vendored
|
@ -26,10 +26,11 @@ import (
|
|||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/metadata"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb/wal"
|
||||
"github.com/prometheus/prometheus/tsdb/wlog"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -45,6 +46,12 @@ var (
|
|||
Name: "exemplars_in_total",
|
||||
Help: "Exemplars in to remote storage, compare to exemplars out for queue managers.",
|
||||
})
|
||||
histogramsIn = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "histograms_in_total",
|
||||
Help: "HistogramSamples in to remote storage, compare to histograms out for queue managers.",
|
||||
})
|
||||
)
|
||||
|
||||
// WriteStorage represents all the remote write storage.
|
||||
|
@ -53,8 +60,8 @@ type WriteStorage struct {
|
|||
reg prometheus.Registerer
|
||||
mtx sync.Mutex
|
||||
|
||||
watcherMetrics *wal.WatcherMetrics
|
||||
liveReaderMetrics *wal.LiveReaderMetrics
|
||||
watcherMetrics *wlog.WatcherMetrics
|
||||
liveReaderMetrics *wlog.LiveReaderMetrics
|
||||
externalLabels labels.Labels
|
||||
dir string
|
||||
queues map[string]*QueueManager
|
||||
|
@ -75,8 +82,8 @@ func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, f
|
|||
}
|
||||
rws := &WriteStorage{
|
||||
queues: make(map[string]*QueueManager),
|
||||
watcherMetrics: wal.NewWatcherMetrics(reg),
|
||||
liveReaderMetrics: wal.NewLiveReaderMetrics(reg),
|
||||
watcherMetrics: wlog.NewWatcherMetrics(reg),
|
||||
liveReaderMetrics: wlog.NewLiveReaderMetrics(reg),
|
||||
logger: logger,
|
||||
reg: reg,
|
||||
flushDeadline: flushDeadline,
|
||||
|
@ -188,6 +195,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
|
|||
rws.highestTimestamp,
|
||||
rws.scraper,
|
||||
rwConf.SendExemplars,
|
||||
rwConf.SendNativeHistograms,
|
||||
)
|
||||
// Keep track of which queues are new so we know which to start.
|
||||
newHashes = append(newHashes, hash)
|
||||
|
@ -251,6 +259,7 @@ type timestampTracker struct {
|
|||
writeStorage *WriteStorage
|
||||
samples int64
|
||||
exemplars int64
|
||||
histograms int64
|
||||
highestTimestamp int64
|
||||
highestRecvTimestamp *maxTimestamp
|
||||
}
|
||||
|
@ -269,6 +278,14 @@ func (t *timestampTracker) AppendExemplar(_ storage.SeriesRef, _ labels.Labels,
|
|||
return 0, nil
|
||||
}
|
||||
|
||||
func (t *timestampTracker) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, ts int64, h *histogram.Histogram) (storage.SeriesRef, error) {
|
||||
t.histograms++
|
||||
if ts > t.highestTimestamp {
|
||||
t.highestTimestamp = ts
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) {
|
||||
// TODO: Add and increment a `metadata` field when we get around to wiring metadata in remote_write.
|
||||
// UpadteMetadata is no-op for remote write (where timestampTracker is being used) for now.
|
||||
|
@ -277,10 +294,11 @@ func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels,
|
|||
|
||||
// Commit implements storage.Appender.
|
||||
func (t *timestampTracker) Commit() error {
|
||||
t.writeStorage.samplesIn.incr(t.samples + t.exemplars)
|
||||
t.writeStorage.samplesIn.incr(t.samples + t.exemplars + t.histograms)
|
||||
|
||||
samplesIn.Add(float64(t.samples))
|
||||
exemplarsIn.Add(float64(t.exemplars))
|
||||
histogramsIn.Add(float64(t.histograms))
|
||||
t.highestRecvTimestamp.Set(float64(t.highestTimestamp / 1000))
|
||||
return nil
|
||||
}
|
||||
|
|
14
vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go
generated
vendored
14
vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go
generated
vendored
|
@ -117,6 +117,20 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
|
|||
level.Debug(h.logger).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr)
|
||||
}
|
||||
}
|
||||
|
||||
for _, hp := range ts.Histograms {
|
||||
hs := HistogramProtoToHistogram(hp)
|
||||
_, err = app.AppendHistogram(0, labels, hp.Timestamp, hs)
|
||||
if err != nil {
|
||||
unwrappedErr := errors.Unwrap(err)
|
||||
// Althogh AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
|
||||
// a note indicating its inclusion in the future.
|
||||
if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
|
||||
level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if outOfOrderExemplarErrs > 0 {
|
||||
|
|
133
vendor/github.com/prometheus/prometheus/storage/series.go
generated
vendored
133
vendor/github.com/prometheus/prometheus/storage/series.go
generated
vendored
|
@ -14,9 +14,11 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
|
@ -90,21 +92,39 @@ func (it *listSeriesIterator) At() (int64, float64) {
|
|||
return s.T(), s.V()
|
||||
}
|
||||
|
||||
func (it *listSeriesIterator) Next() bool {
|
||||
it.idx++
|
||||
return it.idx < it.samples.Len()
|
||||
func (it *listSeriesIterator) AtHistogram() (int64, *histogram.Histogram) {
|
||||
s := it.samples.Get(it.idx)
|
||||
return s.T(), s.H()
|
||||
}
|
||||
|
||||
func (it *listSeriesIterator) Seek(t int64) bool {
|
||||
func (it *listSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
|
||||
s := it.samples.Get(it.idx)
|
||||
return s.T(), s.FH()
|
||||
}
|
||||
|
||||
func (it *listSeriesIterator) AtT() int64 {
|
||||
s := it.samples.Get(it.idx)
|
||||
return s.T()
|
||||
}
|
||||
|
||||
func (it *listSeriesIterator) Next() chunkenc.ValueType {
|
||||
it.idx++
|
||||
if it.idx >= it.samples.Len() {
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
return it.samples.Get(it.idx).Type()
|
||||
}
|
||||
|
||||
func (it *listSeriesIterator) Seek(t int64) chunkenc.ValueType {
|
||||
if it.idx == -1 {
|
||||
it.idx = 0
|
||||
}
|
||||
if it.idx >= it.samples.Len() {
|
||||
return false
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
// No-op check.
|
||||
if s := it.samples.Get(it.idx); s.T() >= t {
|
||||
return true
|
||||
return s.Type()
|
||||
}
|
||||
// Do binary search between current position and end.
|
||||
it.idx += sort.Search(it.samples.Len()-it.idx, func(i int) bool {
|
||||
|
@ -112,7 +132,10 @@ func (it *listSeriesIterator) Seek(t int64) bool {
|
|||
return s.T() >= t
|
||||
})
|
||||
|
||||
return it.idx < it.samples.Len()
|
||||
if it.idx >= it.samples.Len() {
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
return it.samples.Get(it.idx).Type()
|
||||
}
|
||||
|
||||
func (it *listSeriesIterator) Err() error { return nil }
|
||||
|
@ -230,27 +253,32 @@ func NewSeriesToChunkEncoder(series Series) ChunkSeries {
|
|||
}
|
||||
|
||||
func (s *seriesToChunkEncoder) Iterator() chunks.Iterator {
|
||||
chk := chunkenc.NewXORChunk()
|
||||
app, err := chk.Appender()
|
||||
if err != nil {
|
||||
return errChunksIterator{err: err}
|
||||
}
|
||||
var (
|
||||
chk chunkenc.Chunk
|
||||
app chunkenc.Appender
|
||||
err error
|
||||
)
|
||||
mint := int64(math.MaxInt64)
|
||||
maxt := int64(math.MinInt64)
|
||||
|
||||
chks := []chunks.Meta{}
|
||||
|
||||
i := 0
|
||||
seriesIter := s.Series.Iterator()
|
||||
for seriesIter.Next() {
|
||||
// Create a new chunk if too many samples in the current one.
|
||||
if i >= seriesToChunkEncoderSplit {
|
||||
chks = append(chks, chunks.Meta{
|
||||
MinTime: mint,
|
||||
MaxTime: maxt,
|
||||
Chunk: chk,
|
||||
})
|
||||
chk = chunkenc.NewXORChunk()
|
||||
lastType := chunkenc.ValNone
|
||||
for typ := seriesIter.Next(); typ != chunkenc.ValNone; typ = seriesIter.Next() {
|
||||
if typ != lastType || i >= seriesToChunkEncoderSplit {
|
||||
// Create a new chunk if the sample type changed or too many samples in the current one.
|
||||
if chk != nil {
|
||||
chks = append(chks, chunks.Meta{
|
||||
MinTime: mint,
|
||||
MaxTime: maxt,
|
||||
Chunk: chk,
|
||||
})
|
||||
}
|
||||
chk, err = chunkenc.NewEmptyChunk(typ.ChunkEncoding())
|
||||
if err != nil {
|
||||
return errChunksIterator{err: err}
|
||||
}
|
||||
app, err = chk.Appender()
|
||||
if err != nil {
|
||||
return errChunksIterator{err: err}
|
||||
|
@ -259,9 +287,23 @@ func (s *seriesToChunkEncoder) Iterator() chunks.Iterator {
|
|||
// maxt is immediately overwritten below which is why setting it here won't make a difference.
|
||||
i = 0
|
||||
}
|
||||
lastType = typ
|
||||
|
||||
t, v := seriesIter.At()
|
||||
app.Append(t, v)
|
||||
var (
|
||||
t int64
|
||||
v float64
|
||||
h *histogram.Histogram
|
||||
)
|
||||
switch typ {
|
||||
case chunkenc.ValFloat:
|
||||
t, v = seriesIter.At()
|
||||
app.Append(t, v)
|
||||
case chunkenc.ValHistogram:
|
||||
t, h = seriesIter.AtHistogram()
|
||||
app.AppendHistogram(t, h)
|
||||
default:
|
||||
return errChunksIterator{err: fmt.Errorf("unknown sample type %s", typ.String())}
|
||||
}
|
||||
|
||||
maxt = t
|
||||
if mint == math.MaxInt64 {
|
||||
|
@ -273,11 +315,13 @@ func (s *seriesToChunkEncoder) Iterator() chunks.Iterator {
|
|||
return errChunksIterator{err: err}
|
||||
}
|
||||
|
||||
chks = append(chks, chunks.Meta{
|
||||
MinTime: mint,
|
||||
MaxTime: maxt,
|
||||
Chunk: chk,
|
||||
})
|
||||
if chk != nil {
|
||||
chks = append(chks, chunks.Meta{
|
||||
MinTime: mint,
|
||||
MaxTime: maxt,
|
||||
Chunk: chk,
|
||||
})
|
||||
}
|
||||
|
||||
return NewListChunkSeriesIterator(chks...)
|
||||
}
|
||||
|
@ -293,21 +337,34 @@ func (e errChunksIterator) Err() error { return e.err }
|
|||
// ExpandSamples iterates over all samples in the iterator, buffering all in slice.
|
||||
// Optionally it takes samples constructor, useful when you want to compare sample slices with different
|
||||
// sample implementations. if nil, sample type from this package will be used.
|
||||
func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, v float64) tsdbutil.Sample) ([]tsdbutil.Sample, error) {
|
||||
func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample) ([]tsdbutil.Sample, error) {
|
||||
if newSampleFn == nil {
|
||||
newSampleFn = func(t int64, v float64) tsdbutil.Sample { return sample{t, v} }
|
||||
newSampleFn = func(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample {
|
||||
return sample{t, v, h, fh}
|
||||
}
|
||||
}
|
||||
|
||||
var result []tsdbutil.Sample
|
||||
for iter.Next() {
|
||||
t, v := iter.At()
|
||||
// NaNs can't be compared normally, so substitute for another value.
|
||||
if math.IsNaN(v) {
|
||||
v = -42
|
||||
for {
|
||||
switch iter.Next() {
|
||||
case chunkenc.ValNone:
|
||||
return result, iter.Err()
|
||||
case chunkenc.ValFloat:
|
||||
t, v := iter.At()
|
||||
// NaNs can't be compared normally, so substitute for another value.
|
||||
if math.IsNaN(v) {
|
||||
v = -42
|
||||
}
|
||||
result = append(result, newSampleFn(t, v, nil, nil))
|
||||
case chunkenc.ValHistogram:
|
||||
t, h := iter.AtHistogram()
|
||||
result = append(result, newSampleFn(t, 0, h, nil))
|
||||
case chunkenc.ValFloatHistogram:
|
||||
t, fh := iter.AtFloatHistogram()
|
||||
result = append(result, newSampleFn(t, 0, nil, fh))
|
||||
|
||||
}
|
||||
result = append(result, newSampleFn(t, v))
|
||||
}
|
||||
return result, iter.Err()
|
||||
}
|
||||
|
||||
// ExpandChunks iterates over all chunks in the iterator, buffering all in slice.
|
||||
|
|
2
vendor/github.com/prometheus/prometheus/tsdb/README.md
generated
vendored
2
vendor/github.com/prometheus/prometheus/tsdb/README.md
generated
vendored
|
@ -13,7 +13,7 @@ which handles storage and querying of all Prometheus v2 data.
|
|||
|
||||
## External resources
|
||||
|
||||
* A writeup of the original design can be found [here](https://fabxc.org/blog/2017-04-10-writing-a-tsdb/).
|
||||
* A writeup of the original design can be found [here](https://web.archive.org/web/20210803115658/https://fabxc.org/tsdb/).
|
||||
* Video: [Storing 16 Bytes at Scale](https://youtu.be/b_pEevMAC3I) from [PromCon 2017](https://promcon.io/2017-munich/).
|
||||
* Compression is based on the Gorilla TSDB [white paper](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf).
|
||||
|
||||
|
|
6
vendor/github.com/prometheus/prometheus/tsdb/block.go
generated
vendored
6
vendor/github.com/prometheus/prometheus/tsdb/block.go
generated
vendored
|
@ -19,13 +19,13 @@ import (
|
|||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/oklog/ulid"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
|
@ -198,7 +198,7 @@ func (bm *BlockMetaCompaction) SetOutOfOrder() {
|
|||
return
|
||||
}
|
||||
bm.Hints = append(bm.Hints, CompactionHintFromOutOfOrder)
|
||||
sort.Strings(bm.Hints)
|
||||
slices.Sort(bm.Hints)
|
||||
}
|
||||
|
||||
func (bm *BlockMetaCompaction) FromOutOfOrder() bool {
|
||||
|
@ -463,7 +463,7 @@ func (r blockIndexReader) SortedLabelValues(name string, matchers ...*labels.Mat
|
|||
} else {
|
||||
st, err = r.LabelValues(name, matchers...)
|
||||
if err == nil {
|
||||
sort.Strings(st)
|
||||
slices.Sort(st)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
1
vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go
generated
vendored
1
vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go
generated
vendored
|
@ -71,6 +71,7 @@ func (w *BlockWriter) initHead() error {
|
|||
opts := DefaultHeadOptions()
|
||||
opts.ChunkRange = w.blockSize
|
||||
opts.ChunkDirRoot = w.chunkDir
|
||||
opts.EnableNativeHistograms.Store(true)
|
||||
h, err := NewHead(nil, w.logger, nil, nil, opts, NewHeadStats())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "tsdb.NewHead")
|
||||
|
|
163
vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go
generated
vendored
163
vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go
generated
vendored
|
@ -18,27 +18,32 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
)
|
||||
|
||||
// Encoding is the identifier for a chunk encoding.
|
||||
type Encoding uint8
|
||||
|
||||
// The different available chunk encodings.
|
||||
const (
|
||||
EncNone Encoding = iota
|
||||
EncXOR
|
||||
EncHistogram
|
||||
)
|
||||
|
||||
func (e Encoding) String() string {
|
||||
switch e {
|
||||
case EncNone:
|
||||
return "none"
|
||||
case EncXOR:
|
||||
return "XOR"
|
||||
case EncHistogram:
|
||||
return "histogram"
|
||||
}
|
||||
return "<unknown>"
|
||||
}
|
||||
|
||||
// The different available chunk encodings.
|
||||
const (
|
||||
EncNone Encoding = iota
|
||||
EncXOR
|
||||
)
|
||||
|
||||
// Chunk encodings for out-of-order chunks.
|
||||
// These encodings must be only used by the Head block for its internal bookkeeping.
|
||||
const (
|
||||
|
@ -50,8 +55,9 @@ func IsOutOfOrderChunk(e Encoding) bool {
|
|||
return (e & OutOfOrderMask) != 0
|
||||
}
|
||||
|
||||
// IsValidEncoding returns true for supported encodings.
|
||||
func IsValidEncoding(e Encoding) bool {
|
||||
return e == EncXOR || e == EncOOOXOR
|
||||
return e == EncXOR || e == EncOOOXOR || e == EncHistogram
|
||||
}
|
||||
|
||||
// Chunk holds a sequence of sample pairs that can be iterated over and appended to.
|
||||
|
@ -84,26 +90,80 @@ type Chunk interface {
|
|||
// Appender adds sample pairs to a chunk.
|
||||
type Appender interface {
|
||||
Append(int64, float64)
|
||||
AppendHistogram(t int64, h *histogram.Histogram)
|
||||
}
|
||||
|
||||
// Iterator is a simple iterator that can only get the next value.
|
||||
// Iterator iterates over the samples of a time series, in timestamp-increasing order.
|
||||
type Iterator interface {
|
||||
// Next advances the iterator by one.
|
||||
Next() bool
|
||||
// Seek advances the iterator forward to the first sample with the timestamp equal or greater than t.
|
||||
// If current sample found by previous `Next` or `Seek` operation already has this property, Seek has no effect.
|
||||
// Seek returns true, if such sample exists, false otherwise.
|
||||
// Iterator is exhausted when the Seek returns false.
|
||||
Seek(t int64) bool
|
||||
// At returns the current timestamp/value pair.
|
||||
// Before the iterator has advanced At behaviour is unspecified.
|
||||
// Next advances the iterator by one and returns the type of the value
|
||||
// at the new position (or ValNone if the iterator is exhausted).
|
||||
Next() ValueType
|
||||
// Seek advances the iterator forward to the first sample with a
|
||||
// timestamp equal or greater than t. If the current sample found by a
|
||||
// previous `Next` or `Seek` operation already has this property, Seek
|
||||
// has no effect. If a sample has been found, Seek returns the type of
|
||||
// its value. Otherwise, it returns ValNone, after with the iterator is
|
||||
// exhausted.
|
||||
Seek(t int64) ValueType
|
||||
// At returns the current timestamp/value pair if the value is a float.
|
||||
// Before the iterator has advanced, the behaviour is unspecified.
|
||||
At() (int64, float64)
|
||||
// Err returns the current error. It should be used only after iterator is
|
||||
// exhausted, that is `Next` or `Seek` returns false.
|
||||
// AtHistogram returns the current timestamp/value pair if the value is
|
||||
// a histogram with integer counts. Before the iterator has advanced,
|
||||
// the behaviour is unspecified.
|
||||
AtHistogram() (int64, *histogram.Histogram)
|
||||
// AtFloatHistogram returns the current timestamp/value pair if the
|
||||
// value is a histogram with floating-point counts. It also works if the
|
||||
// value is a histogram with integer counts, in which case a
|
||||
// FloatHistogram copy of the histogram is returned. Before the iterator
|
||||
// has advanced, the behaviour is unspecified.
|
||||
AtFloatHistogram() (int64, *histogram.FloatHistogram)
|
||||
// AtT returns the current timestamp.
|
||||
// Before the iterator has advanced, the behaviour is unspecified.
|
||||
AtT() int64
|
||||
// Err returns the current error. It should be used only after the
|
||||
// iterator is exhausted, i.e. `Next` or `Seek` have returned ValNone.
|
||||
Err() error
|
||||
}
|
||||
|
||||
// ValueType defines the type of a value an Iterator points to.
|
||||
type ValueType uint8
|
||||
|
||||
// Possible values for ValueType.
|
||||
const (
|
||||
ValNone ValueType = iota // No value at the current position.
|
||||
ValFloat // A simple float, retrieved with At.
|
||||
ValHistogram // A histogram, retrieve with AtHistogram, but AtFloatHistogram works, too.
|
||||
ValFloatHistogram // A floating-point histogram, retrieve with AtFloatHistogram.
|
||||
)
|
||||
|
||||
func (v ValueType) String() string {
|
||||
switch v {
|
||||
case ValNone:
|
||||
return "none"
|
||||
case ValFloat:
|
||||
return "float"
|
||||
case ValHistogram:
|
||||
return "histogram"
|
||||
case ValFloatHistogram:
|
||||
return "floathistogram"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
func (v ValueType) ChunkEncoding() Encoding {
|
||||
switch v {
|
||||
case ValFloat:
|
||||
return EncXOR
|
||||
case ValHistogram:
|
||||
return EncHistogram
|
||||
default:
|
||||
return EncNone
|
||||
}
|
||||
}
|
||||
|
||||
// MockSeriesIterator returns an iterator for a mock series with custom timeStamps and values.
|
||||
func MockSeriesIterator(timestamps []int64, values []float64) Iterator {
|
||||
return &mockSeriesIterator{
|
||||
|
@ -119,18 +179,29 @@ type mockSeriesIterator struct {
|
|||
currIndex int
|
||||
}
|
||||
|
||||
func (it *mockSeriesIterator) Seek(int64) bool { return false }
|
||||
func (it *mockSeriesIterator) Seek(int64) ValueType { return ValNone }
|
||||
|
||||
func (it *mockSeriesIterator) At() (int64, float64) {
|
||||
return it.timeStamps[it.currIndex], it.values[it.currIndex]
|
||||
}
|
||||
|
||||
func (it *mockSeriesIterator) Next() bool {
|
||||
func (it *mockSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { return math.MinInt64, nil }
|
||||
|
||||
func (it *mockSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
|
||||
return math.MinInt64, nil
|
||||
}
|
||||
|
||||
func (it *mockSeriesIterator) AtT() int64 {
|
||||
return it.timeStamps[it.currIndex]
|
||||
}
|
||||
|
||||
func (it *mockSeriesIterator) Next() ValueType {
|
||||
if it.currIndex < len(it.timeStamps)-1 {
|
||||
it.currIndex++
|
||||
return true
|
||||
return ValFloat
|
||||
}
|
||||
|
||||
return false
|
||||
return ValNone
|
||||
}
|
||||
func (it *mockSeriesIterator) Err() error { return nil }
|
||||
|
||||
|
@ -141,10 +212,13 @@ func NewNopIterator() Iterator {
|
|||
|
||||
type nopIterator struct{}
|
||||
|
||||
func (nopIterator) Seek(int64) bool { return false }
|
||||
func (nopIterator) At() (int64, float64) { return math.MinInt64, 0 }
|
||||
func (nopIterator) Next() bool { return false }
|
||||
func (nopIterator) Err() error { return nil }
|
||||
func (nopIterator) Next() ValueType { return ValNone }
|
||||
func (nopIterator) Seek(int64) ValueType { return ValNone }
|
||||
func (nopIterator) At() (int64, float64) { return math.MinInt64, 0 }
|
||||
func (nopIterator) AtHistogram() (int64, *histogram.Histogram) { return math.MinInt64, nil }
|
||||
func (nopIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { return math.MinInt64, nil }
|
||||
func (nopIterator) AtT() int64 { return math.MinInt64 }
|
||||
func (nopIterator) Err() error { return nil }
|
||||
|
||||
// Pool is used to create and reuse chunk references to avoid allocations.
|
||||
type Pool interface {
|
||||
|
@ -154,7 +228,8 @@ type Pool interface {
|
|||
|
||||
// pool is a memory pool of chunk objects.
|
||||
type pool struct {
|
||||
xor sync.Pool
|
||||
xor sync.Pool
|
||||
histogram sync.Pool
|
||||
}
|
||||
|
||||
// NewPool returns a new pool.
|
||||
|
@ -165,6 +240,11 @@ func NewPool() Pool {
|
|||
return &XORChunk{b: bstream{}}
|
||||
},
|
||||
},
|
||||
histogram: sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &HistogramChunk{b: bstream{}}
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -175,6 +255,11 @@ func (p *pool) Get(e Encoding, b []byte) (Chunk, error) {
|
|||
c.b.stream = b
|
||||
c.b.count = 0
|
||||
return c, nil
|
||||
case EncHistogram:
|
||||
c := p.histogram.Get().(*HistogramChunk)
|
||||
c.b.stream = b
|
||||
c.b.count = 0
|
||||
return c, nil
|
||||
}
|
||||
return nil, errors.Errorf("invalid chunk encoding %q", e)
|
||||
}
|
||||
|
@ -192,6 +277,17 @@ func (p *pool) Put(c Chunk) error {
|
|||
xc.b.stream = nil
|
||||
xc.b.count = 0
|
||||
p.xor.Put(c)
|
||||
case EncHistogram:
|
||||
sh, ok := c.(*HistogramChunk)
|
||||
// This may happen often with wrapped chunks. Nothing we can really do about
|
||||
// it but returning an error would cause a lot of allocations again. Thus,
|
||||
// we just skip it.
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
sh.b.stream = nil
|
||||
sh.b.count = 0
|
||||
p.histogram.Put(c)
|
||||
default:
|
||||
return errors.Errorf("invalid chunk encoding %q", c.Encoding())
|
||||
}
|
||||
|
@ -205,6 +301,19 @@ func FromData(e Encoding, d []byte) (Chunk, error) {
|
|||
switch e {
|
||||
case EncXOR, EncOOOXOR:
|
||||
return &XORChunk{b: bstream{count: 0, stream: d}}, nil
|
||||
case EncHistogram:
|
||||
return &HistogramChunk{b: bstream{count: 0, stream: d}}, nil
|
||||
}
|
||||
return nil, errors.Errorf("invalid chunk encoding %q", e)
|
||||
}
|
||||
|
||||
// NewEmptyChunk returns an empty chunk for the given encoding.
|
||||
func NewEmptyChunk(e Encoding) (Chunk, error) {
|
||||
switch e {
|
||||
case EncXOR:
|
||||
return NewXORChunk(), nil
|
||||
case EncHistogram:
|
||||
return NewHistogramChunk(), nil
|
||||
}
|
||||
return nil, errors.Errorf("invalid chunk encoding %q", e)
|
||||
}
|
||||
|
|
876
vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go
generated
vendored
Normal file
876
vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go
generated
vendored
Normal file
|
@ -0,0 +1,876 @@
|
|||
// Copyright 2021 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package chunkenc
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/value"
|
||||
)
|
||||
|
||||
// HistogramChunk holds encoded sample data for a sparse, high-resolution
|
||||
// histogram.
|
||||
//
|
||||
// Each sample has multiple "fields", stored in the following way (raw = store
|
||||
// number directly, delta = store delta to the previous number, dod = store
|
||||
// delta of the delta to the previous number, xor = what we do for regular
|
||||
// sample values):
|
||||
//
|
||||
// field → ts count zeroCount sum []posbuckets []negbuckets
|
||||
// sample 1 raw raw raw raw []raw []raw
|
||||
// sample 2 delta delta delta xor []delta []delta
|
||||
// sample >2 dod dod dod xor []dod []dod
|
||||
type HistogramChunk struct {
|
||||
b bstream
|
||||
}
|
||||
|
||||
// NewHistogramChunk returns a new chunk with histogram encoding of the given
|
||||
// size.
|
||||
func NewHistogramChunk() *HistogramChunk {
|
||||
b := make([]byte, 3, 128)
|
||||
return &HistogramChunk{b: bstream{stream: b, count: 0}}
|
||||
}
|
||||
|
||||
// Encoding returns the encoding type.
|
||||
func (c *HistogramChunk) Encoding() Encoding {
|
||||
return EncHistogram
|
||||
}
|
||||
|
||||
// Bytes returns the underlying byte slice of the chunk.
|
||||
func (c *HistogramChunk) Bytes() []byte {
|
||||
return c.b.bytes()
|
||||
}
|
||||
|
||||
// NumSamples returns the number of samples in the chunk.
|
||||
func (c *HistogramChunk) NumSamples() int {
|
||||
return int(binary.BigEndian.Uint16(c.Bytes()))
|
||||
}
|
||||
|
||||
// Layout returns the histogram layout. Only call this on chunks that have at
|
||||
// least one sample.
|
||||
func (c *HistogramChunk) Layout() (
|
||||
schema int32, zeroThreshold float64,
|
||||
negativeSpans, positiveSpans []histogram.Span,
|
||||
err error,
|
||||
) {
|
||||
if c.NumSamples() == 0 {
|
||||
panic("HistoChunk.Layout() called on an empty chunk")
|
||||
}
|
||||
b := newBReader(c.Bytes()[2:])
|
||||
return readHistogramChunkLayout(&b)
|
||||
}
|
||||
|
||||
// CounterResetHeader defines the first 2 bits of the chunk header.
|
||||
type CounterResetHeader byte
|
||||
|
||||
const (
|
||||
// CounterReset means there was definitely a counter reset that resulted in this chunk.
|
||||
CounterReset CounterResetHeader = 0b10000000
|
||||
// NotCounterReset means there was definitely no counter reset when cutting this chunk.
|
||||
NotCounterReset CounterResetHeader = 0b01000000
|
||||
// GaugeType means this chunk contains a gauge histogram, where counter resets do not happen.
|
||||
GaugeType CounterResetHeader = 0b11000000
|
||||
// UnknownCounterReset means we cannot say if this chunk was created due to a counter reset or not.
|
||||
// An explicit counter reset detection needs to happen during query time.
|
||||
UnknownCounterReset CounterResetHeader = 0b00000000
|
||||
)
|
||||
|
||||
// SetCounterResetHeader sets the counter reset header.
|
||||
func (c *HistogramChunk) SetCounterResetHeader(h CounterResetHeader) {
|
||||
switch h {
|
||||
case CounterReset, NotCounterReset, GaugeType, UnknownCounterReset:
|
||||
bytes := c.Bytes()
|
||||
bytes[2] = (bytes[2] & 0b00111111) | byte(h)
|
||||
default:
|
||||
panic("invalid CounterResetHeader type")
|
||||
}
|
||||
}
|
||||
|
||||
// GetCounterResetHeader returns the info about the first 2 bits of the chunk
|
||||
// header.
|
||||
func (c *HistogramChunk) GetCounterResetHeader() CounterResetHeader {
|
||||
return CounterResetHeader(c.Bytes()[2] & 0b11000000)
|
||||
}
|
||||
|
||||
// Compact implements the Chunk interface.
|
||||
func (c *HistogramChunk) Compact() {
|
||||
if l := len(c.b.stream); cap(c.b.stream) > l+chunkCompactCapacityThreshold {
|
||||
buf := make([]byte, l)
|
||||
copy(buf, c.b.stream)
|
||||
c.b.stream = buf
|
||||
}
|
||||
}
|
||||
|
||||
// Appender implements the Chunk interface.
|
||||
func (c *HistogramChunk) Appender() (Appender, error) {
|
||||
it := c.iterator(nil)
|
||||
|
||||
// To get an appender, we must know the state it would have if we had
|
||||
// appended all existing data from scratch. We iterate through the end
|
||||
// and populate via the iterator's state.
|
||||
for it.Next() == ValHistogram {
|
||||
}
|
||||
if err := it.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
a := &HistogramAppender{
|
||||
b: &c.b,
|
||||
|
||||
schema: it.schema,
|
||||
zThreshold: it.zThreshold,
|
||||
pSpans: it.pSpans,
|
||||
nSpans: it.nSpans,
|
||||
t: it.t,
|
||||
cnt: it.cnt,
|
||||
zCnt: it.zCnt,
|
||||
tDelta: it.tDelta,
|
||||
cntDelta: it.cntDelta,
|
||||
zCntDelta: it.zCntDelta,
|
||||
pBuckets: it.pBuckets,
|
||||
nBuckets: it.nBuckets,
|
||||
pBucketsDelta: it.pBucketsDelta,
|
||||
nBucketsDelta: it.nBucketsDelta,
|
||||
|
||||
sum: it.sum,
|
||||
leading: it.leading,
|
||||
trailing: it.trailing,
|
||||
}
|
||||
if it.numTotal == 0 {
|
||||
a.leading = 0xff
|
||||
}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func countSpans(spans []histogram.Span) int {
|
||||
var cnt int
|
||||
for _, s := range spans {
|
||||
cnt += int(s.Length)
|
||||
}
|
||||
return cnt
|
||||
}
|
||||
|
||||
func newHistogramIterator(b []byte) *histogramIterator {
|
||||
it := &histogramIterator{
|
||||
br: newBReader(b),
|
||||
numTotal: binary.BigEndian.Uint16(b),
|
||||
t: math.MinInt64,
|
||||
}
|
||||
// The first 3 bytes contain chunk headers.
|
||||
// We skip that for actual samples.
|
||||
_, _ = it.br.readBits(24)
|
||||
return it
|
||||
}
|
||||
|
||||
func (c *HistogramChunk) iterator(it Iterator) *histogramIterator {
|
||||
// This commet is copied from XORChunk.iterator:
|
||||
// Should iterators guarantee to act on a copy of the data so it doesn't lock append?
|
||||
// When using striped locks to guard access to chunks, probably yes.
|
||||
// Could only copy data if the chunk is not completed yet.
|
||||
if histogramIter, ok := it.(*histogramIterator); ok {
|
||||
histogramIter.Reset(c.b.bytes())
|
||||
return histogramIter
|
||||
}
|
||||
return newHistogramIterator(c.b.bytes())
|
||||
}
|
||||
|
||||
// Iterator implements the Chunk interface.
|
||||
func (c *HistogramChunk) Iterator(it Iterator) Iterator {
|
||||
return c.iterator(it)
|
||||
}
|
||||
|
||||
// HistogramAppender is an Appender implementation for sparse histograms.
|
||||
type HistogramAppender struct {
|
||||
b *bstream
|
||||
|
||||
// Layout:
|
||||
schema int32
|
||||
zThreshold float64
|
||||
pSpans, nSpans []histogram.Span
|
||||
|
||||
// Although we intend to start new chunks on counter resets, we still
|
||||
// have to handle negative deltas for gauge histograms. Therefore, even
|
||||
// deltas are signed types here (even for tDelta to not treat that one
|
||||
// specially).
|
||||
t int64
|
||||
cnt, zCnt uint64
|
||||
tDelta, cntDelta, zCntDelta int64
|
||||
pBuckets, nBuckets []int64
|
||||
pBucketsDelta, nBucketsDelta []int64
|
||||
|
||||
// The sum is Gorilla xor encoded.
|
||||
sum float64
|
||||
leading uint8
|
||||
trailing uint8
|
||||
}
|
||||
|
||||
// Append implements Appender. This implementation panics because normal float
|
||||
// samples must never be appended to a histogram chunk.
|
||||
func (a *HistogramAppender) Append(int64, float64) {
|
||||
panic("appended a float sample to a histogram chunk")
|
||||
}
|
||||
|
||||
// Appendable returns whether the chunk can be appended to, and if so
|
||||
// whether any recoding needs to happen using the provided interjections
|
||||
// (in case of any new buckets, positive or negative range, respectively).
|
||||
//
|
||||
// The chunk is not appendable in the following cases:
|
||||
//
|
||||
// • The schema has changed.
|
||||
//
|
||||
// • The threshold for the zero bucket has changed.
|
||||
//
|
||||
// • Any buckets have disappeared.
|
||||
//
|
||||
// • There was a counter reset in the count of observations or in any bucket,
|
||||
// including the zero bucket.
|
||||
//
|
||||
// • The last sample in the chunk was stale while the current sample is not stale.
|
||||
//
|
||||
// The method returns an additional boolean set to true if it is not appendable
|
||||
// because of a counter reset. If the given sample is stale, it is always ok to
|
||||
// append. If counterReset is true, okToAppend is always false.
|
||||
func (a *HistogramAppender) Appendable(h *histogram.Histogram) (
|
||||
positiveInterjections, negativeInterjections []Interjection,
|
||||
okToAppend, counterReset bool,
|
||||
) {
|
||||
if value.IsStaleNaN(h.Sum) {
|
||||
// This is a stale sample whose buckets and spans don't matter.
|
||||
okToAppend = true
|
||||
return
|
||||
}
|
||||
if value.IsStaleNaN(a.sum) {
|
||||
// If the last sample was stale, then we can only accept stale
|
||||
// samples in this chunk.
|
||||
return
|
||||
}
|
||||
|
||||
if h.Count < a.cnt {
|
||||
// There has been a counter reset.
|
||||
counterReset = true
|
||||
return
|
||||
}
|
||||
|
||||
if h.Schema != a.schema || h.ZeroThreshold != a.zThreshold {
|
||||
return
|
||||
}
|
||||
|
||||
if h.ZeroCount < a.zCnt {
|
||||
// There has been a counter reset since ZeroThreshold didn't change.
|
||||
counterReset = true
|
||||
return
|
||||
}
|
||||
|
||||
var ok bool
|
||||
positiveInterjections, ok = compareSpans(a.pSpans, h.PositiveSpans)
|
||||
if !ok {
|
||||
counterReset = true
|
||||
return
|
||||
}
|
||||
negativeInterjections, ok = compareSpans(a.nSpans, h.NegativeSpans)
|
||||
if !ok {
|
||||
counterReset = true
|
||||
return
|
||||
}
|
||||
|
||||
if counterResetInAnyBucket(a.pBuckets, h.PositiveBuckets, a.pSpans, h.PositiveSpans) ||
|
||||
counterResetInAnyBucket(a.nBuckets, h.NegativeBuckets, a.nSpans, h.NegativeSpans) {
|
||||
counterReset, positiveInterjections, negativeInterjections = true, nil, nil
|
||||
return
|
||||
}
|
||||
|
||||
okToAppend = true
|
||||
return
|
||||
}
|
||||
|
||||
// counterResetInAnyBucket returns true if there was a counter reset for any
|
||||
// bucket. This should be called only when the bucket layout is the same or new
|
||||
// buckets were added. It does not handle the case of buckets missing.
|
||||
func counterResetInAnyBucket(oldBuckets, newBuckets []int64, oldSpans, newSpans []histogram.Span) bool {
|
||||
if len(oldSpans) == 0 || len(oldBuckets) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
oldSpanSliceIdx, newSpanSliceIdx := 0, 0 // Index for the span slices.
|
||||
oldInsideSpanIdx, newInsideSpanIdx := uint32(0), uint32(0) // Index inside a span.
|
||||
oldIdx, newIdx := oldSpans[0].Offset, newSpans[0].Offset
|
||||
|
||||
oldBucketSliceIdx, newBucketSliceIdx := 0, 0 // Index inside bucket slice.
|
||||
oldVal, newVal := oldBuckets[0], newBuckets[0]
|
||||
|
||||
// Since we assume that new spans won't have missing buckets, there will never be a case
|
||||
// where the old index will not find a matching new index.
|
||||
for {
|
||||
if oldIdx == newIdx {
|
||||
if newVal < oldVal {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if oldIdx <= newIdx {
|
||||
// Moving ahead old bucket and span by 1 index.
|
||||
if oldInsideSpanIdx == oldSpans[oldSpanSliceIdx].Length-1 {
|
||||
// Current span is over.
|
||||
oldSpanSliceIdx++
|
||||
oldInsideSpanIdx = 0
|
||||
if oldSpanSliceIdx >= len(oldSpans) {
|
||||
// All old spans are over.
|
||||
break
|
||||
}
|
||||
oldIdx += 1 + oldSpans[oldSpanSliceIdx].Offset
|
||||
} else {
|
||||
oldInsideSpanIdx++
|
||||
oldIdx++
|
||||
}
|
||||
oldBucketSliceIdx++
|
||||
oldVal += oldBuckets[oldBucketSliceIdx]
|
||||
}
|
||||
|
||||
if oldIdx > newIdx {
|
||||
// Moving ahead new bucket and span by 1 index.
|
||||
if newInsideSpanIdx == newSpans[newSpanSliceIdx].Length-1 {
|
||||
// Current span is over.
|
||||
newSpanSliceIdx++
|
||||
newInsideSpanIdx = 0
|
||||
if newSpanSliceIdx >= len(newSpans) {
|
||||
// All new spans are over.
|
||||
// This should not happen, old spans above should catch this first.
|
||||
panic("new spans over before old spans in counterReset")
|
||||
}
|
||||
newIdx += 1 + newSpans[newSpanSliceIdx].Offset
|
||||
} else {
|
||||
newInsideSpanIdx++
|
||||
newIdx++
|
||||
}
|
||||
newBucketSliceIdx++
|
||||
newVal += newBuckets[newBucketSliceIdx]
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// AppendHistogram appends a histogram to the chunk. The caller must ensure that
|
||||
// the histogram is properly structured, e.g. the number of buckets used
|
||||
// corresponds to the number conveyed by the span structures. First call
|
||||
// Appendable() and act accordingly!
|
||||
func (a *HistogramAppender) AppendHistogram(t int64, h *histogram.Histogram) {
|
||||
var tDelta, cntDelta, zCntDelta int64
|
||||
num := binary.BigEndian.Uint16(a.b.bytes())
|
||||
|
||||
if value.IsStaleNaN(h.Sum) {
|
||||
// Emptying out other fields to write no buckets, and an empty
|
||||
// layout in case of first histogram in the chunk.
|
||||
h = &histogram.Histogram{Sum: h.Sum}
|
||||
}
|
||||
|
||||
if num == 0 {
|
||||
// The first append gets the privilege to dictate the layout
|
||||
// but it's also responsible for encoding it into the chunk!
|
||||
writeHistogramChunkLayout(a.b, h.Schema, h.ZeroThreshold, h.PositiveSpans, h.NegativeSpans)
|
||||
a.schema = h.Schema
|
||||
a.zThreshold = h.ZeroThreshold
|
||||
|
||||
if len(h.PositiveSpans) > 0 {
|
||||
a.pSpans = make([]histogram.Span, len(h.PositiveSpans))
|
||||
copy(a.pSpans, h.PositiveSpans)
|
||||
} else {
|
||||
a.pSpans = nil
|
||||
}
|
||||
if len(h.NegativeSpans) > 0 {
|
||||
a.nSpans = make([]histogram.Span, len(h.NegativeSpans))
|
||||
copy(a.nSpans, h.NegativeSpans)
|
||||
} else {
|
||||
a.nSpans = nil
|
||||
}
|
||||
|
||||
numPBuckets, numNBuckets := countSpans(h.PositiveSpans), countSpans(h.NegativeSpans)
|
||||
if numPBuckets > 0 {
|
||||
a.pBuckets = make([]int64, numPBuckets)
|
||||
a.pBucketsDelta = make([]int64, numPBuckets)
|
||||
} else {
|
||||
a.pBuckets = nil
|
||||
a.pBucketsDelta = nil
|
||||
}
|
||||
if numNBuckets > 0 {
|
||||
a.nBuckets = make([]int64, numNBuckets)
|
||||
a.nBucketsDelta = make([]int64, numNBuckets)
|
||||
} else {
|
||||
a.nBuckets = nil
|
||||
a.nBucketsDelta = nil
|
||||
}
|
||||
|
||||
// Now store the actual data.
|
||||
putVarbitInt(a.b, t)
|
||||
putVarbitUint(a.b, h.Count)
|
||||
putVarbitUint(a.b, h.ZeroCount)
|
||||
a.b.writeBits(math.Float64bits(h.Sum), 64)
|
||||
for _, b := range h.PositiveBuckets {
|
||||
putVarbitInt(a.b, b)
|
||||
}
|
||||
for _, b := range h.NegativeBuckets {
|
||||
putVarbitInt(a.b, b)
|
||||
}
|
||||
} else {
|
||||
// The case for the 2nd sample with single deltas is implicitly handled correctly with the double delta code,
|
||||
// so we don't need a separate single delta logic for the 2nd sample.
|
||||
|
||||
tDelta = t - a.t
|
||||
cntDelta = int64(h.Count) - int64(a.cnt)
|
||||
zCntDelta = int64(h.ZeroCount) - int64(a.zCnt)
|
||||
|
||||
tDod := tDelta - a.tDelta
|
||||
cntDod := cntDelta - a.cntDelta
|
||||
zCntDod := zCntDelta - a.zCntDelta
|
||||
|
||||
if value.IsStaleNaN(h.Sum) {
|
||||
cntDod, zCntDod = 0, 0
|
||||
}
|
||||
|
||||
putVarbitInt(a.b, tDod)
|
||||
putVarbitInt(a.b, cntDod)
|
||||
putVarbitInt(a.b, zCntDod)
|
||||
|
||||
a.writeSumDelta(h.Sum)
|
||||
|
||||
for i, b := range h.PositiveBuckets {
|
||||
delta := b - a.pBuckets[i]
|
||||
dod := delta - a.pBucketsDelta[i]
|
||||
putVarbitInt(a.b, dod)
|
||||
a.pBucketsDelta[i] = delta
|
||||
}
|
||||
for i, b := range h.NegativeBuckets {
|
||||
delta := b - a.nBuckets[i]
|
||||
dod := delta - a.nBucketsDelta[i]
|
||||
putVarbitInt(a.b, dod)
|
||||
a.nBucketsDelta[i] = delta
|
||||
}
|
||||
}
|
||||
|
||||
binary.BigEndian.PutUint16(a.b.bytes(), num+1)
|
||||
|
||||
a.t = t
|
||||
a.cnt = h.Count
|
||||
a.zCnt = h.ZeroCount
|
||||
a.tDelta = tDelta
|
||||
a.cntDelta = cntDelta
|
||||
a.zCntDelta = zCntDelta
|
||||
|
||||
copy(a.pBuckets, h.PositiveBuckets)
|
||||
copy(a.nBuckets, h.NegativeBuckets)
|
||||
// Note that the bucket deltas were already updated above.
|
||||
a.sum = h.Sum
|
||||
}
|
||||
|
||||
// Recode converts the current chunk to accommodate an expansion of the set of
|
||||
// (positive and/or negative) buckets used, according to the provided
|
||||
// interjections, resulting in the honoring of the provided new positive and
|
||||
// negative spans. To continue appending, use the returned Appender rather than
|
||||
// the receiver of this method.
|
||||
func (a *HistogramAppender) Recode(
|
||||
positiveInterjections, negativeInterjections []Interjection,
|
||||
positiveSpans, negativeSpans []histogram.Span,
|
||||
) (Chunk, Appender) {
|
||||
// TODO(beorn7): This currently just decodes everything and then encodes
|
||||
// it again with the new span layout. This can probably be done in-place
|
||||
// by editing the chunk. But let's first see how expensive it is in the
|
||||
// big picture. Also, in-place editing might create concurrency issues.
|
||||
byts := a.b.bytes()
|
||||
it := newHistogramIterator(byts)
|
||||
hc := NewHistogramChunk()
|
||||
app, err := hc.Appender()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
numPositiveBuckets, numNegativeBuckets := countSpans(positiveSpans), countSpans(negativeSpans)
|
||||
|
||||
for it.Next() == ValHistogram {
|
||||
tOld, hOld := it.AtHistogram()
|
||||
|
||||
// We have to newly allocate slices for the modified buckets
|
||||
// here because they are kept by the appender until the next
|
||||
// append.
|
||||
// TODO(beorn7): We might be able to optimize this.
|
||||
var positiveBuckets, negativeBuckets []int64
|
||||
if numPositiveBuckets > 0 {
|
||||
positiveBuckets = make([]int64, numPositiveBuckets)
|
||||
}
|
||||
if numNegativeBuckets > 0 {
|
||||
negativeBuckets = make([]int64, numNegativeBuckets)
|
||||
}
|
||||
|
||||
// Save the modified histogram to the new chunk.
|
||||
hOld.PositiveSpans, hOld.NegativeSpans = positiveSpans, negativeSpans
|
||||
if len(positiveInterjections) > 0 {
|
||||
hOld.PositiveBuckets = interject(hOld.PositiveBuckets, positiveBuckets, positiveInterjections)
|
||||
}
|
||||
if len(negativeInterjections) > 0 {
|
||||
hOld.NegativeBuckets = interject(hOld.NegativeBuckets, negativeBuckets, negativeInterjections)
|
||||
}
|
||||
app.AppendHistogram(tOld, hOld)
|
||||
}
|
||||
|
||||
hc.SetCounterResetHeader(CounterResetHeader(byts[2] & 0b11000000))
|
||||
return hc, app
|
||||
}
|
||||
|
||||
func (a *HistogramAppender) writeSumDelta(v float64) {
|
||||
xorWrite(a.b, v, a.sum, &a.leading, &a.trailing)
|
||||
}
|
||||
|
||||
type histogramIterator struct {
|
||||
br bstreamReader
|
||||
numTotal uint16
|
||||
numRead uint16
|
||||
|
||||
// Layout:
|
||||
schema int32
|
||||
zThreshold float64
|
||||
pSpans, nSpans []histogram.Span
|
||||
|
||||
// For the fields that are tracked as deltas and ultimately dod's.
|
||||
t int64
|
||||
cnt, zCnt uint64
|
||||
tDelta, cntDelta, zCntDelta int64
|
||||
pBuckets, nBuckets []int64 // Delta between buckets.
|
||||
pFloatBuckets, nFloatBuckets []float64 // Absolute counts.
|
||||
pBucketsDelta, nBucketsDelta []int64
|
||||
|
||||
// The sum is Gorilla xor encoded.
|
||||
sum float64
|
||||
leading uint8
|
||||
trailing uint8
|
||||
|
||||
// Track calls to retrieve methods. Once they have been called, we
|
||||
// cannot recycle the bucket slices anymore because we have returned
|
||||
// them in the histogram.
|
||||
atHistogramCalled, atFloatHistogramCalled bool
|
||||
|
||||
err error
|
||||
}
|
||||
|
||||
func (it *histogramIterator) Seek(t int64) ValueType {
|
||||
if it.err != nil {
|
||||
return ValNone
|
||||
}
|
||||
|
||||
for t > it.t || it.numRead == 0 {
|
||||
if it.Next() == ValNone {
|
||||
return ValNone
|
||||
}
|
||||
}
|
||||
return ValHistogram
|
||||
}
|
||||
|
||||
func (it *histogramIterator) At() (int64, float64) {
|
||||
panic("cannot call histogramIterator.At")
|
||||
}
|
||||
|
||||
func (it *histogramIterator) AtHistogram() (int64, *histogram.Histogram) {
|
||||
if value.IsStaleNaN(it.sum) {
|
||||
return it.t, &histogram.Histogram{Sum: it.sum}
|
||||
}
|
||||
it.atHistogramCalled = true
|
||||
return it.t, &histogram.Histogram{
|
||||
Count: it.cnt,
|
||||
ZeroCount: it.zCnt,
|
||||
Sum: it.sum,
|
||||
ZeroThreshold: it.zThreshold,
|
||||
Schema: it.schema,
|
||||
PositiveSpans: it.pSpans,
|
||||
NegativeSpans: it.nSpans,
|
||||
PositiveBuckets: it.pBuckets,
|
||||
NegativeBuckets: it.nBuckets,
|
||||
}
|
||||
}
|
||||
|
||||
func (it *histogramIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
|
||||
if value.IsStaleNaN(it.sum) {
|
||||
return it.t, &histogram.FloatHistogram{Sum: it.sum}
|
||||
}
|
||||
it.atFloatHistogramCalled = true
|
||||
return it.t, &histogram.FloatHistogram{
|
||||
Count: float64(it.cnt),
|
||||
ZeroCount: float64(it.zCnt),
|
||||
Sum: it.sum,
|
||||
ZeroThreshold: it.zThreshold,
|
||||
Schema: it.schema,
|
||||
PositiveSpans: it.pSpans,
|
||||
NegativeSpans: it.nSpans,
|
||||
PositiveBuckets: it.pFloatBuckets,
|
||||
NegativeBuckets: it.nFloatBuckets,
|
||||
}
|
||||
}
|
||||
|
||||
func (it *histogramIterator) AtT() int64 {
|
||||
return it.t
|
||||
}
|
||||
|
||||
func (it *histogramIterator) Err() error {
|
||||
return it.err
|
||||
}
|
||||
|
||||
func (it *histogramIterator) Reset(b []byte) {
|
||||
// The first 2 bytes contain chunk headers.
|
||||
// We skip that for actual samples.
|
||||
it.br = newBReader(b[2:])
|
||||
it.numTotal = binary.BigEndian.Uint16(b)
|
||||
it.numRead = 0
|
||||
|
||||
it.t, it.cnt, it.zCnt = 0, 0, 0
|
||||
it.tDelta, it.cntDelta, it.zCntDelta = 0, 0, 0
|
||||
|
||||
// Recycle slices that have not been returned yet. Otherwise, start from
|
||||
// scratch.
|
||||
if it.atHistogramCalled {
|
||||
it.atHistogramCalled = false
|
||||
it.pBuckets, it.nBuckets = nil, nil
|
||||
} else {
|
||||
it.pBuckets = it.pBuckets[:0]
|
||||
it.nBuckets = it.nBuckets[:0]
|
||||
}
|
||||
if it.atFloatHistogramCalled {
|
||||
it.atFloatHistogramCalled = false
|
||||
it.pFloatBuckets, it.nFloatBuckets = nil, nil
|
||||
} else {
|
||||
it.pFloatBuckets = it.pFloatBuckets[:0]
|
||||
it.nFloatBuckets = it.nFloatBuckets[:0]
|
||||
}
|
||||
|
||||
it.pBucketsDelta = it.pBucketsDelta[:0]
|
||||
it.pBucketsDelta = it.pBucketsDelta[:0]
|
||||
|
||||
it.sum = 0
|
||||
it.leading = 0
|
||||
it.trailing = 0
|
||||
it.err = nil
|
||||
}
|
||||
|
||||
func (it *histogramIterator) Next() ValueType {
|
||||
if it.err != nil || it.numRead == it.numTotal {
|
||||
return ValNone
|
||||
}
|
||||
|
||||
if it.numRead == 0 {
|
||||
// The first read is responsible for reading the chunk layout
|
||||
// and for initializing fields that depend on it. We give
|
||||
// counter reset info at chunk level, hence we discard it here.
|
||||
schema, zeroThreshold, posSpans, negSpans, err := readHistogramChunkLayout(&it.br)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return ValNone
|
||||
}
|
||||
it.schema = schema
|
||||
it.zThreshold = zeroThreshold
|
||||
it.pSpans, it.nSpans = posSpans, negSpans
|
||||
numPBuckets, numNBuckets := countSpans(posSpans), countSpans(negSpans)
|
||||
// Allocate bucket slices as needed, recycling existing slices
|
||||
// in case this iterator was reset and already has slices of a
|
||||
// sufficient capacity.
|
||||
if numPBuckets > 0 {
|
||||
if cap(it.pBuckets) < numPBuckets {
|
||||
it.pBuckets = make([]int64, numPBuckets)
|
||||
// If cap(it.pBuckets) isn't sufficient, neither is the cap of the others.
|
||||
it.pBucketsDelta = make([]int64, numPBuckets)
|
||||
it.pFloatBuckets = make([]float64, numPBuckets)
|
||||
} else {
|
||||
for i := 0; i < numPBuckets; i++ {
|
||||
it.pBuckets = append(it.pBuckets, 0)
|
||||
it.pBucketsDelta = append(it.pBucketsDelta, 0)
|
||||
it.pFloatBuckets = append(it.pFloatBuckets, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
if numNBuckets > 0 {
|
||||
if cap(it.nBuckets) < numNBuckets {
|
||||
it.nBuckets = make([]int64, numNBuckets)
|
||||
// If cap(it.nBuckets) isn't sufficient, neither is the cap of the others.
|
||||
it.nBucketsDelta = make([]int64, numNBuckets)
|
||||
it.nFloatBuckets = make([]float64, numNBuckets)
|
||||
} else {
|
||||
for i := 0; i < numNBuckets; i++ {
|
||||
it.nBuckets = append(it.nBuckets, 0)
|
||||
it.nBucketsDelta = append(it.nBucketsDelta, 0)
|
||||
it.pFloatBuckets = append(it.pFloatBuckets, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Now read the actual data.
|
||||
t, err := readVarbitInt(&it.br)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return ValNone
|
||||
}
|
||||
it.t = t
|
||||
|
||||
cnt, err := readVarbitUint(&it.br)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return ValNone
|
||||
}
|
||||
it.cnt = cnt
|
||||
|
||||
zcnt, err := readVarbitUint(&it.br)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return ValNone
|
||||
}
|
||||
it.zCnt = zcnt
|
||||
|
||||
sum, err := it.br.readBits(64)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return ValNone
|
||||
}
|
||||
it.sum = math.Float64frombits(sum)
|
||||
|
||||
var current int64
|
||||
for i := range it.pBuckets {
|
||||
v, err := readVarbitInt(&it.br)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return ValNone
|
||||
}
|
||||
it.pBuckets[i] = v
|
||||
current += it.pBuckets[i]
|
||||
it.pFloatBuckets[i] = float64(current)
|
||||
}
|
||||
current = 0
|
||||
for i := range it.nBuckets {
|
||||
v, err := readVarbitInt(&it.br)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return ValNone
|
||||
}
|
||||
it.nBuckets[i] = v
|
||||
current += it.nBuckets[i]
|
||||
it.nFloatBuckets[i] = float64(current)
|
||||
}
|
||||
|
||||
it.numRead++
|
||||
return ValHistogram
|
||||
}
|
||||
|
||||
// The case for the 2nd sample with single deltas is implicitly handled correctly with the double delta code,
|
||||
// so we don't need a separate single delta logic for the 2nd sample.
|
||||
|
||||
// Recycle bucket slices that have not been returned yet. Otherwise,
|
||||
// copy them.
|
||||
if it.atHistogramCalled {
|
||||
it.atHistogramCalled = false
|
||||
if len(it.pBuckets) > 0 {
|
||||
newBuckets := make([]int64, len(it.pBuckets))
|
||||
copy(newBuckets, it.pBuckets)
|
||||
it.pBuckets = newBuckets
|
||||
} else {
|
||||
it.pBuckets = nil
|
||||
}
|
||||
if len(it.nBuckets) > 0 {
|
||||
newBuckets := make([]int64, len(it.nBuckets))
|
||||
copy(newBuckets, it.nBuckets)
|
||||
it.nBuckets = newBuckets
|
||||
} else {
|
||||
it.nBuckets = nil
|
||||
}
|
||||
}
|
||||
// FloatBuckets are set from scratch, so simply create empty ones.
|
||||
if it.atFloatHistogramCalled {
|
||||
it.atFloatHistogramCalled = false
|
||||
if len(it.pFloatBuckets) > 0 {
|
||||
it.pFloatBuckets = make([]float64, len(it.pFloatBuckets))
|
||||
} else {
|
||||
it.pFloatBuckets = nil
|
||||
}
|
||||
if len(it.nFloatBuckets) > 0 {
|
||||
it.nFloatBuckets = make([]float64, len(it.nFloatBuckets))
|
||||
} else {
|
||||
it.nFloatBuckets = nil
|
||||
}
|
||||
}
|
||||
|
||||
tDod, err := readVarbitInt(&it.br)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return ValNone
|
||||
}
|
||||
it.tDelta = it.tDelta + tDod
|
||||
it.t += it.tDelta
|
||||
|
||||
cntDod, err := readVarbitInt(&it.br)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return ValNone
|
||||
}
|
||||
it.cntDelta = it.cntDelta + cntDod
|
||||
it.cnt = uint64(int64(it.cnt) + it.cntDelta)
|
||||
|
||||
zcntDod, err := readVarbitInt(&it.br)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return ValNone
|
||||
}
|
||||
it.zCntDelta = it.zCntDelta + zcntDod
|
||||
it.zCnt = uint64(int64(it.zCnt) + it.zCntDelta)
|
||||
|
||||
ok := it.readSum()
|
||||
if !ok {
|
||||
return ValNone
|
||||
}
|
||||
|
||||
if value.IsStaleNaN(it.sum) {
|
||||
it.numRead++
|
||||
return ValHistogram
|
||||
}
|
||||
|
||||
var current int64
|
||||
for i := range it.pBuckets {
|
||||
dod, err := readVarbitInt(&it.br)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return ValNone
|
||||
}
|
||||
it.pBucketsDelta[i] += dod
|
||||
it.pBuckets[i] += it.pBucketsDelta[i]
|
||||
current += it.pBuckets[i]
|
||||
it.pFloatBuckets[i] = float64(current)
|
||||
}
|
||||
|
||||
current = 0
|
||||
for i := range it.nBuckets {
|
||||
dod, err := readVarbitInt(&it.br)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return ValNone
|
||||
}
|
||||
it.nBucketsDelta[i] += dod
|
||||
it.nBuckets[i] += it.nBucketsDelta[i]
|
||||
current += it.nBuckets[i]
|
||||
it.nFloatBuckets[i] = float64(current)
|
||||
}
|
||||
|
||||
it.numRead++
|
||||
return ValHistogram
|
||||
}
|
||||
|
||||
func (it *histogramIterator) readSum() bool {
|
||||
err := xorRead(&it.br, &it.sum, &it.leading, &it.trailing)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
334
vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go
generated
vendored
Normal file
334
vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go
generated
vendored
Normal file
|
@ -0,0 +1,334 @@
|
|||
// Copyright 2021 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package chunkenc
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
)
|
||||
|
||||
func writeHistogramChunkLayout(b *bstream, schema int32, zeroThreshold float64, positiveSpans, negativeSpans []histogram.Span) {
|
||||
putZeroThreshold(b, zeroThreshold)
|
||||
putVarbitInt(b, int64(schema))
|
||||
putHistogramChunkLayoutSpans(b, positiveSpans)
|
||||
putHistogramChunkLayoutSpans(b, negativeSpans)
|
||||
}
|
||||
|
||||
func readHistogramChunkLayout(b *bstreamReader) (
|
||||
schema int32, zeroThreshold float64,
|
||||
positiveSpans, negativeSpans []histogram.Span,
|
||||
err error,
|
||||
) {
|
||||
zeroThreshold, err = readZeroThreshold(b)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
v, err := readVarbitInt(b)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
schema = int32(v)
|
||||
|
||||
positiveSpans, err = readHistogramChunkLayoutSpans(b)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
negativeSpans, err = readHistogramChunkLayoutSpans(b)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func putHistogramChunkLayoutSpans(b *bstream, spans []histogram.Span) {
|
||||
putVarbitUint(b, uint64(len(spans)))
|
||||
for _, s := range spans {
|
||||
putVarbitUint(b, uint64(s.Length))
|
||||
putVarbitInt(b, int64(s.Offset))
|
||||
}
|
||||
}
|
||||
|
||||
func readHistogramChunkLayoutSpans(b *bstreamReader) ([]histogram.Span, error) {
|
||||
var spans []histogram.Span
|
||||
num, err := readVarbitUint(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i := 0; i < int(num); i++ {
|
||||
|
||||
length, err := readVarbitUint(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
offset, err := readVarbitInt(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
spans = append(spans, histogram.Span{
|
||||
Length: uint32(length),
|
||||
Offset: int32(offset),
|
||||
})
|
||||
}
|
||||
return spans, nil
|
||||
}
|
||||
|
||||
// putZeroThreshold writes the zero threshold to the bstream. It stores typical
|
||||
// values in just one byte, but needs 9 bytes for other values. In detail:
|
||||
//
|
||||
// * If the threshold is 0, store a single zero byte.
|
||||
//
|
||||
// - If the threshold is a power of 2 between (and including) 2^-243 and 2^10,
|
||||
// take the exponent from the IEEE 754 representation of the threshold, which
|
||||
// covers a range between (and including) -242 and 11. (2^-243 is 0.5*2^-242
|
||||
// in IEEE 754 representation, and 2^10 is 0.5*2^11.) Add 243 to the exponent
|
||||
// and store the result (which will be between 1 and 254) as a single
|
||||
// byte. Note that small powers of two are preferred values for the zero
|
||||
// threshold. The default value for the zero threshold is 2^-128 (or
|
||||
// 0.5*2^-127 in IEEE 754 representation) and will therefore be encoded as a
|
||||
// single byte (with value 116).
|
||||
//
|
||||
// - In all other cases, store 255 as a single byte, followed by the 8 bytes of
|
||||
// the threshold as a float64, i.e. taking 9 bytes in total.
|
||||
func putZeroThreshold(b *bstream, threshold float64) {
|
||||
if threshold == 0 {
|
||||
b.writeByte(0)
|
||||
return
|
||||
}
|
||||
frac, exp := math.Frexp(threshold)
|
||||
if frac != 0.5 || exp < -242 || exp > 11 {
|
||||
b.writeByte(255)
|
||||
b.writeBits(math.Float64bits(threshold), 64)
|
||||
return
|
||||
}
|
||||
b.writeByte(byte(exp + 243))
|
||||
}
|
||||
|
||||
// readZeroThreshold reads the zero threshold written with putZeroThreshold.
|
||||
func readZeroThreshold(br *bstreamReader) (float64, error) {
|
||||
b, err := br.ReadByte()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
switch b {
|
||||
case 0:
|
||||
return 0, nil
|
||||
case 255:
|
||||
v, err := br.readBits(64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return math.Float64frombits(v), nil
|
||||
default:
|
||||
return math.Ldexp(0.5, int(b)-243), nil
|
||||
}
|
||||
}
|
||||
|
||||
type bucketIterator struct {
|
||||
spans []histogram.Span
|
||||
span int // Span position of last yielded bucket.
|
||||
bucket int // Bucket position within span of last yielded bucket.
|
||||
idx int // Bucket index (globally across all spans) of last yielded bucket.
|
||||
}
|
||||
|
||||
func newBucketIterator(spans []histogram.Span) *bucketIterator {
|
||||
b := bucketIterator{
|
||||
spans: spans,
|
||||
span: 0,
|
||||
bucket: -1,
|
||||
idx: -1,
|
||||
}
|
||||
if len(spans) > 0 {
|
||||
b.idx += int(spans[0].Offset)
|
||||
}
|
||||
return &b
|
||||
}
|
||||
|
||||
func (b *bucketIterator) Next() (int, bool) {
|
||||
// We're already out of bounds.
|
||||
if b.span >= len(b.spans) {
|
||||
return 0, false
|
||||
}
|
||||
try:
|
||||
if b.bucket < int(b.spans[b.span].Length-1) { // Try to move within same span.
|
||||
b.bucket++
|
||||
b.idx++
|
||||
return b.idx, true
|
||||
} else if b.span < len(b.spans)-1 { // Try to move from one span to the next.
|
||||
b.span++
|
||||
b.idx += int(b.spans[b.span].Offset + 1)
|
||||
b.bucket = 0
|
||||
if b.spans[b.span].Length == 0 {
|
||||
// Pathological case that should never happen. We can't use this span, let's try again.
|
||||
goto try
|
||||
}
|
||||
return b.idx, true
|
||||
}
|
||||
// We're out of options.
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// An Interjection describes how many new buckets have to be introduced before
|
||||
// processing the pos'th delta from the original slice.
|
||||
type Interjection struct {
|
||||
pos int
|
||||
num int
|
||||
}
|
||||
|
||||
// compareSpans returns the interjections to convert a slice of deltas to a new
|
||||
// slice representing an expanded set of buckets, or false if incompatible
|
||||
// (e.g. if buckets were removed).
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// Let's say the old buckets look like this:
|
||||
//
|
||||
// span syntax: [offset, length]
|
||||
// spans : [ 0 , 2 ] [2,1] [ 3 , 2 ] [3,1] [1,1]
|
||||
// bucket idx : [0] [1] 2 3 [4] 5 6 7 [8] [9] 10 11 12 [13] 14 [15]
|
||||
// raw values 6 3 3 2 4 5 1
|
||||
// deltas 6 -3 0 -1 2 1 -4
|
||||
//
|
||||
// But now we introduce a new bucket layout. (Carefully chosen example where we
|
||||
// have a span appended, one unchanged[*], one prepended, and two merge - in
|
||||
// that order.)
|
||||
//
|
||||
// [*] unchanged in terms of which bucket indices they represent. but to achieve
|
||||
// that, their offset needs to change if "disrupted" by spans changing ahead of
|
||||
// them
|
||||
//
|
||||
// \/ this one is "unchanged"
|
||||
// spans : [ 0 , 3 ] [1,1] [ 1 , 4 ] [ 3 , 3 ]
|
||||
// bucket idx : [0] [1] [2] 3 [4] 5 [6] [7] [8] [9] 10 11 12 [13] [14] [15]
|
||||
// raw values 6 3 0 3 0 0 2 4 5 0 1
|
||||
// deltas 6 -3 -3 3 -3 0 2 2 1 -5 1
|
||||
// delta mods: / \ / \ / \
|
||||
//
|
||||
// Note that whenever any new buckets are introduced, the subsequent "old"
|
||||
// bucket needs to readjust its delta to the new base of 0. Thus, for the caller
|
||||
// who wants to transform the set of original deltas to a new set of deltas to
|
||||
// match a new span layout that adds buckets, we simply need to generate a list
|
||||
// of interjections.
|
||||
//
|
||||
// Note: Within compareSpans we don't have to worry about the changes to the
|
||||
// spans themselves, thanks to the iterators we get to work with the more useful
|
||||
// bucket indices (which of course directly correspond to the buckets we have to
|
||||
// adjust).
|
||||
func compareSpans(a, b []histogram.Span) ([]Interjection, bool) {
|
||||
ai := newBucketIterator(a)
|
||||
bi := newBucketIterator(b)
|
||||
|
||||
var interjections []Interjection
|
||||
|
||||
// When inter.num becomes > 0, this becomes a valid interjection that
|
||||
// should be yielded when we finish a streak of new buckets.
|
||||
var inter Interjection
|
||||
|
||||
av, aOK := ai.Next()
|
||||
bv, bOK := bi.Next()
|
||||
loop:
|
||||
for {
|
||||
switch {
|
||||
case aOK && bOK:
|
||||
switch {
|
||||
case av == bv: // Both have an identical value. move on!
|
||||
// Finish WIP interjection and reset.
|
||||
if inter.num > 0 {
|
||||
interjections = append(interjections, inter)
|
||||
}
|
||||
inter.num = 0
|
||||
av, aOK = ai.Next()
|
||||
bv, bOK = bi.Next()
|
||||
inter.pos++
|
||||
case av < bv: // b misses a value that is in a.
|
||||
return interjections, false
|
||||
case av > bv: // a misses a value that is in b. Forward b and recompare.
|
||||
inter.num++
|
||||
bv, bOK = bi.Next()
|
||||
}
|
||||
case aOK && !bOK: // b misses a value that is in a.
|
||||
return interjections, false
|
||||
case !aOK && bOK: // a misses a value that is in b. Forward b and recompare.
|
||||
inter.num++
|
||||
bv, bOK = bi.Next()
|
||||
default: // Both iterators ran out. We're done.
|
||||
if inter.num > 0 {
|
||||
interjections = append(interjections, inter)
|
||||
}
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
return interjections, true
|
||||
}
|
||||
|
||||
// interject merges 'in' with the provided interjections and writes them into
|
||||
// 'out', which must already have the appropriate length.
|
||||
func interject(in, out []int64, interjections []Interjection) []int64 {
|
||||
var (
|
||||
j int // Position in out.
|
||||
v int64 // The last value seen.
|
||||
interj int // The next interjection to process.
|
||||
)
|
||||
for i, d := range in {
|
||||
if interj < len(interjections) && i == interjections[interj].pos {
|
||||
|
||||
// We have an interjection!
|
||||
// Add interjection.num new delta values such that their
|
||||
// bucket values equate 0.
|
||||
out[j] = int64(-v)
|
||||
j++
|
||||
for x := 1; x < interjections[interj].num; x++ {
|
||||
out[j] = 0
|
||||
j++
|
||||
}
|
||||
interj++
|
||||
|
||||
// Now save the value from the input. The delta value we
|
||||
// should save is the original delta value + the last
|
||||
// value of the point before the interjection (to undo
|
||||
// the delta that was introduced by the interjection).
|
||||
out[j] = d + v
|
||||
j++
|
||||
v = d + v
|
||||
continue
|
||||
}
|
||||
|
||||
// If there was no interjection, the original delta is still
|
||||
// valid.
|
||||
out[j] = d
|
||||
j++
|
||||
v += d
|
||||
}
|
||||
switch interj {
|
||||
case len(interjections):
|
||||
// All interjections processed. Nothing more to do.
|
||||
case len(interjections) - 1:
|
||||
// One more interjection to process at the end.
|
||||
out[j] = int64(-v)
|
||||
j++
|
||||
for x := 1; x < interjections[interj].num; x++ {
|
||||
out[j] = 0
|
||||
j++
|
||||
}
|
||||
default:
|
||||
panic("unprocessed interjections left")
|
||||
}
|
||||
return out
|
||||
}
|
232
vendor/github.com/prometheus/prometheus/tsdb/chunkenc/varbit.go
generated
vendored
Normal file
232
vendor/github.com/prometheus/prometheus/tsdb/chunkenc/varbit.go
generated
vendored
Normal file
|
@ -0,0 +1,232 @@
|
|||
// Copyright 2021 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package chunkenc
|
||||
|
||||
import (
|
||||
"math/bits"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// putVarbitInt writes an int64 using varbit encoding with a bit bucketing
|
||||
// optimized for the dod's observed in histogram buckets, plus a few additional
|
||||
// buckets for large numbers.
|
||||
//
|
||||
// For optimal space utilization, each branch didn't need to support any values
|
||||
// of any of the prior branches. So we could expand the range of each branch. Do
|
||||
// more with fewer bits. It would come at the price of more expensive encoding
|
||||
// and decoding (cutting out and later adding back that center-piece we
|
||||
// skip). With the distributions of values we see in practice, we would reduce
|
||||
// the size by around 1%. A more detailed study would be needed for precise
|
||||
// values, but it's appears quite certain that we would end up far below 10%,
|
||||
// which would maybe convince us to invest the increased coding/decoding cost.
|
||||
func putVarbitInt(b *bstream, val int64) {
|
||||
switch {
|
||||
case val == 0: // Precisely 0, needs 1 bit.
|
||||
b.writeBit(zero)
|
||||
case bitRange(val, 3): // -3 <= val <= 4, needs 5 bits.
|
||||
b.writeBits(0b10, 2)
|
||||
b.writeBits(uint64(val), 3)
|
||||
case bitRange(val, 6): // -31 <= val <= 32, 9 bits.
|
||||
b.writeBits(0b110, 3)
|
||||
b.writeBits(uint64(val), 6)
|
||||
case bitRange(val, 9): // -255 <= val <= 256, 13 bits.
|
||||
b.writeBits(0b1110, 4)
|
||||
b.writeBits(uint64(val), 9)
|
||||
case bitRange(val, 12): // -2047 <= val <= 2048, 17 bits.
|
||||
b.writeBits(0b11110, 5)
|
||||
b.writeBits(uint64(val), 12)
|
||||
case bitRange(val, 18): // -131071 <= val <= 131072, 3 bytes.
|
||||
b.writeBits(0b111110, 6)
|
||||
b.writeBits(uint64(val), 18)
|
||||
case bitRange(val, 25): // -16777215 <= val <= 16777216, 4 bytes.
|
||||
b.writeBits(0b1111110, 7)
|
||||
b.writeBits(uint64(val), 25)
|
||||
case bitRange(val, 56): // -36028797018963967 <= val <= 36028797018963968, 8 bytes.
|
||||
b.writeBits(0b11111110, 8)
|
||||
b.writeBits(uint64(val), 56)
|
||||
default:
|
||||
b.writeBits(0b11111111, 8) // Worst case, needs 9 bytes.
|
||||
b.writeBits(uint64(val), 64)
|
||||
}
|
||||
}
|
||||
|
||||
// readVarbitInt reads an int64 encoced with putVarbitInt.
|
||||
func readVarbitInt(b *bstreamReader) (int64, error) {
|
||||
var d byte
|
||||
for i := 0; i < 8; i++ {
|
||||
d <<= 1
|
||||
bit, err := b.readBitFast()
|
||||
if err != nil {
|
||||
bit, err = b.readBit()
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if bit == zero {
|
||||
break
|
||||
}
|
||||
d |= 1
|
||||
}
|
||||
|
||||
var val int64
|
||||
var sz uint8
|
||||
|
||||
switch d {
|
||||
case 0b0:
|
||||
// val == 0
|
||||
case 0b10:
|
||||
sz = 3
|
||||
case 0b110:
|
||||
sz = 6
|
||||
case 0b1110:
|
||||
sz = 9
|
||||
case 0b11110:
|
||||
sz = 12
|
||||
case 0b111110:
|
||||
sz = 18
|
||||
case 0b1111110:
|
||||
sz = 25
|
||||
case 0b11111110:
|
||||
sz = 56
|
||||
case 0b11111111:
|
||||
// Do not use fast because it's very unlikely it will succeed.
|
||||
bits, err := b.readBits(64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
val = int64(bits)
|
||||
default:
|
||||
return 0, errors.Errorf("invalid bit pattern %b", d)
|
||||
}
|
||||
|
||||
if sz != 0 {
|
||||
bits, err := b.readBitsFast(sz)
|
||||
if err != nil {
|
||||
bits, err = b.readBits(sz)
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if bits > (1 << (sz - 1)) {
|
||||
// Or something.
|
||||
bits = bits - (1 << sz)
|
||||
}
|
||||
val = int64(bits)
|
||||
}
|
||||
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func bitRangeUint(x uint64, nbits int) bool {
|
||||
return bits.LeadingZeros64(x) >= 64-nbits
|
||||
}
|
||||
|
||||
// putVarbitUint writes a uint64 using varbit encoding. It uses the same bit
|
||||
// buckets as putVarbitInt.
|
||||
func putVarbitUint(b *bstream, val uint64) {
|
||||
switch {
|
||||
case val == 0: // Precisely 0, needs 1 bit.
|
||||
b.writeBit(zero)
|
||||
case bitRangeUint(val, 3): // val <= 7, needs 5 bits.
|
||||
b.writeBits(0b10, 2)
|
||||
b.writeBits(val, 3)
|
||||
case bitRangeUint(val, 6): // val <= 63, 9 bits.
|
||||
b.writeBits(0b110, 3)
|
||||
b.writeBits(val, 6)
|
||||
case bitRangeUint(val, 9): // val <= 511, 13 bits.
|
||||
b.writeBits(0b1110, 4)
|
||||
b.writeBits(val, 9)
|
||||
case bitRangeUint(val, 12): // val <= 4095, 17 bits.
|
||||
b.writeBits(0b11110, 5)
|
||||
b.writeBits(val, 12)
|
||||
case bitRangeUint(val, 18): // val <= 262143, 3 bytes.
|
||||
b.writeBits(0b111110, 6)
|
||||
b.writeBits(val, 18)
|
||||
case bitRangeUint(val, 25): // val <= 33554431, 4 bytes.
|
||||
b.writeBits(0b1111110, 7)
|
||||
b.writeBits(val, 25)
|
||||
case bitRangeUint(val, 56): // val <= 72057594037927935, 8 bytes.
|
||||
b.writeBits(0b11111110, 8)
|
||||
b.writeBits(val, 56)
|
||||
default:
|
||||
b.writeBits(0b11111111, 8) // Worst case, needs 9 bytes.
|
||||
b.writeBits(val, 64)
|
||||
}
|
||||
}
|
||||
|
||||
// readVarbitUint reads a uint64 encoced with putVarbitUint.
|
||||
func readVarbitUint(b *bstreamReader) (uint64, error) {
|
||||
var d byte
|
||||
for i := 0; i < 8; i++ {
|
||||
d <<= 1
|
||||
bit, err := b.readBitFast()
|
||||
if err != nil {
|
||||
bit, err = b.readBit()
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if bit == zero {
|
||||
break
|
||||
}
|
||||
d |= 1
|
||||
}
|
||||
|
||||
var (
|
||||
bits uint64
|
||||
sz uint8
|
||||
err error
|
||||
)
|
||||
|
||||
switch d {
|
||||
case 0b0:
|
||||
// val == 0
|
||||
case 0b10:
|
||||
sz = 3
|
||||
case 0b110:
|
||||
sz = 6
|
||||
case 0b1110:
|
||||
sz = 9
|
||||
case 0b11110:
|
||||
sz = 12
|
||||
case 0b111110:
|
||||
sz = 18
|
||||
case 0b1111110:
|
||||
sz = 25
|
||||
case 0b11111110:
|
||||
sz = 56
|
||||
case 0b11111111:
|
||||
// Do not use fast because it's very unlikely it will succeed.
|
||||
bits, err = b.readBits(64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
default:
|
||||
return 0, errors.Errorf("invalid bit pattern %b", d)
|
||||
}
|
||||
|
||||
if sz != 0 {
|
||||
bits, err = b.readBitsFast(sz)
|
||||
if err != nil {
|
||||
bits, err = b.readBits(sz)
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
return bits, nil
|
||||
}
|
241
vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go
generated
vendored
241
vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go
generated
vendored
|
@ -47,6 +47,8 @@ import (
|
|||
"encoding/binary"
|
||||
"math"
|
||||
"math/bits"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -79,6 +81,7 @@ func (c *XORChunk) NumSamples() int {
|
|||
return int(binary.BigEndian.Uint16(c.Bytes()))
|
||||
}
|
||||
|
||||
// Compact implements the Chunk interface.
|
||||
func (c *XORChunk) Compact() {
|
||||
if l := len(c.b.stream); cap(c.b.stream) > l+chunkCompactCapacityThreshold {
|
||||
buf := make([]byte, l)
|
||||
|
@ -96,7 +99,7 @@ func (c *XORChunk) Appender() (Appender, error) {
|
|||
// To get an appender we must know the state it would have if we had
|
||||
// appended all existing data from scratch.
|
||||
// We iterate through the end and populate via the iterator's state.
|
||||
for it.Next() {
|
||||
for it.Next() != ValNone {
|
||||
}
|
||||
if err := it.Err(); err != nil {
|
||||
return nil, err
|
||||
|
@ -110,7 +113,7 @@ func (c *XORChunk) Appender() (Appender, error) {
|
|||
leading: it.leading,
|
||||
trailing: it.trailing,
|
||||
}
|
||||
if binary.BigEndian.Uint16(a.b.bytes()) == 0 {
|
||||
if it.numTotal == 0 {
|
||||
a.leading = 0xff
|
||||
}
|
||||
return a, nil
|
||||
|
@ -149,6 +152,10 @@ type xorAppender struct {
|
|||
trailing uint8
|
||||
}
|
||||
|
||||
func (a *xorAppender) AppendHistogram(t int64, h *histogram.Histogram) {
|
||||
panic("appended a histogram to an xor chunk")
|
||||
}
|
||||
|
||||
func (a *xorAppender) Append(t int64, v float64) {
|
||||
var tDelta uint64
|
||||
num := binary.BigEndian.Uint16(a.b.bytes())
|
||||
|
@ -176,6 +183,12 @@ func (a *xorAppender) Append(t int64, v float64) {
|
|||
|
||||
// Gorilla has a max resolution of seconds, Prometheus milliseconds.
|
||||
// Thus we use higher value range steps with larger bit size.
|
||||
//
|
||||
// TODO(beorn7): This seems to needlessly jump to large bit
|
||||
// sizes even for very small deviations from zero. Timestamp
|
||||
// compression can probably benefit from some smaller bit
|
||||
// buckets. See also what was done for histogram encoding in
|
||||
// varbit.go.
|
||||
switch {
|
||||
case dod == 0:
|
||||
a.b.writeBit(zero)
|
||||
|
@ -209,38 +222,7 @@ func bitRange(x int64, nbits uint8) bool {
|
|||
}
|
||||
|
||||
func (a *xorAppender) writeVDelta(v float64) {
|
||||
vDelta := math.Float64bits(v) ^ math.Float64bits(a.v)
|
||||
|
||||
if vDelta == 0 {
|
||||
a.b.writeBit(zero)
|
||||
return
|
||||
}
|
||||
a.b.writeBit(one)
|
||||
|
||||
leading := uint8(bits.LeadingZeros64(vDelta))
|
||||
trailing := uint8(bits.TrailingZeros64(vDelta))
|
||||
|
||||
// Clamp number of leading zeros to avoid overflow when encoding.
|
||||
if leading >= 32 {
|
||||
leading = 31
|
||||
}
|
||||
|
||||
if a.leading != 0xff && leading >= a.leading && trailing >= a.trailing {
|
||||
a.b.writeBit(zero)
|
||||
a.b.writeBits(vDelta>>a.trailing, 64-int(a.leading)-int(a.trailing))
|
||||
} else {
|
||||
a.leading, a.trailing = leading, trailing
|
||||
|
||||
a.b.writeBit(one)
|
||||
a.b.writeBits(uint64(leading), 5)
|
||||
|
||||
// Note that if leading == trailing == 0, then sigbits == 64. But that value doesn't actually fit into the 6 bits we have.
|
||||
// Luckily, we never need to encode 0 significant bits, since that would put us in the other case (vdelta == 0).
|
||||
// So instead we write out a 0 and adjust it back to 64 on unpacking.
|
||||
sigbits := 64 - leading - trailing
|
||||
a.b.writeBits(uint64(sigbits), 6)
|
||||
a.b.writeBits(vDelta>>trailing, int(sigbits))
|
||||
}
|
||||
xorWrite(a.b, v, a.v, &a.leading, &a.trailing)
|
||||
}
|
||||
|
||||
type xorIterator struct {
|
||||
|
@ -258,23 +240,35 @@ type xorIterator struct {
|
|||
err error
|
||||
}
|
||||
|
||||
func (it *xorIterator) Seek(t int64) bool {
|
||||
func (it *xorIterator) Seek(t int64) ValueType {
|
||||
if it.err != nil {
|
||||
return false
|
||||
return ValNone
|
||||
}
|
||||
|
||||
for t > it.t || it.numRead == 0 {
|
||||
if !it.Next() {
|
||||
return false
|
||||
if it.Next() == ValNone {
|
||||
return ValNone
|
||||
}
|
||||
}
|
||||
return true
|
||||
return ValFloat
|
||||
}
|
||||
|
||||
func (it *xorIterator) At() (int64, float64) {
|
||||
return it.t, it.val
|
||||
}
|
||||
|
||||
func (it *xorIterator) AtHistogram() (int64, *histogram.Histogram) {
|
||||
panic("cannot call xorIterator.AtHistogram")
|
||||
}
|
||||
|
||||
func (it *xorIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
|
||||
panic("cannot call xorIterator.AtFloatHistogram")
|
||||
}
|
||||
|
||||
func (it *xorIterator) AtT() int64 {
|
||||
return it.t
|
||||
}
|
||||
|
||||
func (it *xorIterator) Err() error {
|
||||
return it.err
|
||||
}
|
||||
|
@ -294,33 +288,33 @@ func (it *xorIterator) Reset(b []byte) {
|
|||
it.err = nil
|
||||
}
|
||||
|
||||
func (it *xorIterator) Next() bool {
|
||||
func (it *xorIterator) Next() ValueType {
|
||||
if it.err != nil || it.numRead == it.numTotal {
|
||||
return false
|
||||
return ValNone
|
||||
}
|
||||
|
||||
if it.numRead == 0 {
|
||||
t, err := binary.ReadVarint(&it.br)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return false
|
||||
return ValNone
|
||||
}
|
||||
v, err := it.br.readBits(64)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return false
|
||||
return ValNone
|
||||
}
|
||||
it.t = t
|
||||
it.val = math.Float64frombits(v)
|
||||
|
||||
it.numRead++
|
||||
return true
|
||||
return ValFloat
|
||||
}
|
||||
if it.numRead == 1 {
|
||||
tDelta, err := binary.ReadUvarint(&it.br)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return false
|
||||
return ValNone
|
||||
}
|
||||
it.tDelta = tDelta
|
||||
it.t = it.t + int64(it.tDelta)
|
||||
|
@ -338,7 +332,7 @@ func (it *xorIterator) Next() bool {
|
|||
}
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return false
|
||||
return ValNone
|
||||
}
|
||||
if bit == zero {
|
||||
break
|
||||
|
@ -361,7 +355,7 @@ func (it *xorIterator) Next() bool {
|
|||
bits, err := it.br.readBits(64)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return false
|
||||
return ValNone
|
||||
}
|
||||
|
||||
dod = int64(bits)
|
||||
|
@ -374,7 +368,7 @@ func (it *xorIterator) Next() bool {
|
|||
}
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return false
|
||||
return ValNone
|
||||
}
|
||||
|
||||
// Account for negative numbers, which come back as high unsigned numbers.
|
||||
|
@ -391,73 +385,122 @@ func (it *xorIterator) Next() bool {
|
|||
return it.readValue()
|
||||
}
|
||||
|
||||
func (it *xorIterator) readValue() bool {
|
||||
bit, err := it.br.readBitFast()
|
||||
if err != nil {
|
||||
bit, err = it.br.readBit()
|
||||
}
|
||||
func (it *xorIterator) readValue() ValueType {
|
||||
err := xorRead(&it.br, &it.val, &it.leading, &it.trailing)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return false
|
||||
return ValNone
|
||||
}
|
||||
it.numRead++
|
||||
return ValFloat
|
||||
}
|
||||
|
||||
func xorWrite(b *bstream, newValue, currentValue float64, leading, trailing *uint8) {
|
||||
delta := math.Float64bits(newValue) ^ math.Float64bits(currentValue)
|
||||
|
||||
if delta == 0 {
|
||||
b.writeBit(zero)
|
||||
return
|
||||
}
|
||||
b.writeBit(one)
|
||||
|
||||
newLeading := uint8(bits.LeadingZeros64(delta))
|
||||
newTrailing := uint8(bits.TrailingZeros64(delta))
|
||||
|
||||
// Clamp number of leading zeros to avoid overflow when encoding.
|
||||
if newLeading >= 32 {
|
||||
newLeading = 31
|
||||
}
|
||||
|
||||
if *leading != 0xff && newLeading >= *leading && newTrailing >= *trailing {
|
||||
// In this case, we stick with the current leading/trailing.
|
||||
b.writeBit(zero)
|
||||
b.writeBits(delta>>*trailing, 64-int(*leading)-int(*trailing))
|
||||
return
|
||||
}
|
||||
|
||||
// Update leading/trailing for the caller.
|
||||
*leading, *trailing = newLeading, newTrailing
|
||||
|
||||
b.writeBit(one)
|
||||
b.writeBits(uint64(newLeading), 5)
|
||||
|
||||
// Note that if newLeading == newTrailing == 0, then sigbits == 64. But
|
||||
// that value doesn't actually fit into the 6 bits we have. Luckily, we
|
||||
// never need to encode 0 significant bits, since that would put us in
|
||||
// the other case (vdelta == 0). So instead we write out a 0 and adjust
|
||||
// it back to 64 on unpacking.
|
||||
sigbits := 64 - newLeading - newTrailing
|
||||
b.writeBits(uint64(sigbits), 6)
|
||||
b.writeBits(delta>>newTrailing, int(sigbits))
|
||||
}
|
||||
|
||||
func xorRead(br *bstreamReader, value *float64, leading, trailing *uint8) error {
|
||||
bit, err := br.readBitFast()
|
||||
if err != nil {
|
||||
bit, err = br.readBit()
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bit == zero {
|
||||
return nil
|
||||
}
|
||||
bit, err = br.readBitFast()
|
||||
if err != nil {
|
||||
bit, err = br.readBit()
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
bits uint64
|
||||
newLeading, newTrailing, mbits uint8
|
||||
)
|
||||
|
||||
if bit == zero {
|
||||
// it.val = it.val
|
||||
// Reuse leading/trailing zero bits.
|
||||
newLeading, newTrailing = *leading, *trailing
|
||||
mbits = 64 - newLeading - newTrailing
|
||||
} else {
|
||||
bit, err := it.br.readBitFast()
|
||||
bits, err = br.readBitsFast(5)
|
||||
if err != nil {
|
||||
bit, err = it.br.readBit()
|
||||
bits, err = br.readBits(5)
|
||||
}
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return false
|
||||
return err
|
||||
}
|
||||
if bit == zero {
|
||||
// reuse leading/trailing zero bits
|
||||
// it.leading, it.trailing = it.leading, it.trailing
|
||||
} else {
|
||||
bits, err := it.br.readBitsFast(5)
|
||||
if err != nil {
|
||||
bits, err = it.br.readBits(5)
|
||||
}
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return false
|
||||
}
|
||||
it.leading = uint8(bits)
|
||||
newLeading = uint8(bits)
|
||||
|
||||
bits, err = it.br.readBitsFast(6)
|
||||
if err != nil {
|
||||
bits, err = it.br.readBits(6)
|
||||
}
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return false
|
||||
}
|
||||
mbits := uint8(bits)
|
||||
// 0 significant bits here means we overflowed and we actually need 64; see comment in encoder
|
||||
if mbits == 0 {
|
||||
mbits = 64
|
||||
}
|
||||
it.trailing = 64 - it.leading - mbits
|
||||
}
|
||||
|
||||
mbits := 64 - it.leading - it.trailing
|
||||
bits, err := it.br.readBitsFast(mbits)
|
||||
bits, err = br.readBitsFast(6)
|
||||
if err != nil {
|
||||
bits, err = it.br.readBits(mbits)
|
||||
bits, err = br.readBits(6)
|
||||
}
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return false
|
||||
return err
|
||||
}
|
||||
vbits := math.Float64bits(it.val)
|
||||
vbits ^= bits << it.trailing
|
||||
it.val = math.Float64frombits(vbits)
|
||||
mbits = uint8(bits)
|
||||
// 0 significant bits here means we overflowed and we actually
|
||||
// need 64; see comment in xrWrite.
|
||||
if mbits == 0 {
|
||||
mbits = 64
|
||||
}
|
||||
newTrailing = 64 - newLeading - mbits
|
||||
// Update leading/trailing zero bits for the caller.
|
||||
*leading, *trailing = newLeading, newTrailing
|
||||
}
|
||||
|
||||
it.numRead++
|
||||
return true
|
||||
bits, err = br.readBitsFast(mbits)
|
||||
if err != nil {
|
||||
bits, err = br.readBits(mbits)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
vbits := math.Float64bits(*value)
|
||||
vbits ^= bits << newTrailing
|
||||
*value = math.Float64frombits(vbits)
|
||||
return nil
|
||||
}
|
||||
|
||||
// OOOXORChunk holds a XORChunk and overrides the Encoding() method.
|
||||
|
|
10
vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go
generated
vendored
10
vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go
generated
vendored
|
@ -21,7 +21,6 @@ import (
|
|||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
|
@ -29,6 +28,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"go.uber.org/atomic"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||
|
@ -308,7 +308,7 @@ func (cdm *ChunkDiskMapper) openMMapFiles() (returnErr error) {
|
|||
}
|
||||
|
||||
// Check for gaps in the files.
|
||||
sort.Ints(chkFileIndices)
|
||||
slices.Sort(chkFileIndices)
|
||||
if len(chkFileIndices) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
@ -777,7 +777,7 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chu
|
|||
for seg := range cdm.mmappedChunkFiles {
|
||||
segIDs = append(segIDs, seg)
|
||||
}
|
||||
sort.Ints(segIDs)
|
||||
slices.Sort(segIDs)
|
||||
for _, segID := range segIDs {
|
||||
mmapFile := cdm.mmappedChunkFiles[segID]
|
||||
fileEnd := mmapFile.byteSlice.Len()
|
||||
|
@ -894,7 +894,7 @@ func (cdm *ChunkDiskMapper) Truncate(fileNo uint32) error {
|
|||
for seq := range cdm.mmappedChunkFiles {
|
||||
chkFileIndices = append(chkFileIndices, seq)
|
||||
}
|
||||
sort.Ints(chkFileIndices)
|
||||
slices.Sort(chkFileIndices)
|
||||
|
||||
var removedFiles []int
|
||||
for _, seq := range chkFileIndices {
|
||||
|
@ -934,7 +934,7 @@ func (cdm *ChunkDiskMapper) Truncate(fileNo uint32) error {
|
|||
// deleteFiles deletes the given file sequences in order of the sequence.
|
||||
// In case of an error, it returns the sorted file sequences that were not deleted from the _disk_.
|
||||
func (cdm *ChunkDiskMapper) deleteFiles(removedFiles []int) ([]int, error) {
|
||||
sort.Ints(removedFiles) // To delete them in order.
|
||||
slices.Sort(removedFiles) // To delete them in order.
|
||||
cdm.readPathMtx.Lock()
|
||||
for _, seq := range removedFiles {
|
||||
if err := cdm.closers[seq].Close(); err != nil {
|
||||
|
|
3
vendor/github.com/prometheus/prometheus/tsdb/compact.go
generated
vendored
3
vendor/github.com/prometheus/prometheus/tsdb/compact.go
generated
vendored
|
@ -768,7 +768,8 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta,
|
|||
chksIter := s.Iterator()
|
||||
chks = chks[:0]
|
||||
for chksIter.Next() {
|
||||
// We are not iterating in streaming way over chunk as it's more efficient to do bulk write for index and
|
||||
// We are not iterating in streaming way over chunk as
|
||||
// it's more efficient to do bulk write for index and
|
||||
// chunk file purposes.
|
||||
chks = append(chks, chksIter.At())
|
||||
}
|
||||
|
|
63
vendor/github.com/prometheus/prometheus/tsdb/db.go
generated
vendored
63
vendor/github.com/prometheus/prometheus/tsdb/db.go
generated
vendored
|
@ -45,7 +45,7 @@ import (
|
|||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||
_ "github.com/prometheus/prometheus/tsdb/goversion" // Load the package into main to make sure minium Go version is met.
|
||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
||||
"github.com/prometheus/prometheus/tsdb/wal"
|
||||
"github.com/prometheus/prometheus/tsdb/wlog"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -70,7 +70,7 @@ var ErrNotReady = errors.New("TSDB not ready")
|
|||
// millisecond precision timestamps.
|
||||
func DefaultOptions() *Options {
|
||||
return &Options{
|
||||
WALSegmentSize: wal.DefaultSegmentSize,
|
||||
WALSegmentSize: wlog.DefaultSegmentSize,
|
||||
MaxBlockChunkSegmentSize: chunks.DefaultChunkSegmentSize,
|
||||
RetentionDuration: int64(15 * 24 * time.Hour / time.Millisecond),
|
||||
MinBlockDuration: DefaultBlockDuration,
|
||||
|
@ -81,6 +81,7 @@ func DefaultOptions() *Options {
|
|||
StripeSize: DefaultStripeSize,
|
||||
HeadChunksWriteBufferSize: chunks.DefaultWriteBufferSize,
|
||||
IsolationDisabled: defaultIsolationDisabled,
|
||||
HeadChunksWriteQueueSize: chunks.DefaultWriteQueueSize,
|
||||
OutOfOrderCapMax: DefaultOutOfOrderCapMax,
|
||||
}
|
||||
}
|
||||
|
@ -166,6 +167,9 @@ type Options struct {
|
|||
// Disables isolation between reads and in-flight appends.
|
||||
IsolationDisabled bool
|
||||
|
||||
// EnableNativeHistograms enables the ingestion of native histograms.
|
||||
EnableNativeHistograms bool
|
||||
|
||||
// OutOfOrderTimeWindow specifies how much out of order is allowed, if any.
|
||||
// This can change during run-time, so this value from here should only be used
|
||||
// while initialising.
|
||||
|
@ -389,14 +393,14 @@ func (db *DBReadOnly) FlushWAL(dir string) (returnErr error) {
|
|||
if len(blockReaders) > 0 {
|
||||
maxBlockTime = blockReaders[len(blockReaders)-1].Meta().MaxTime
|
||||
}
|
||||
w, err := wal.Open(db.logger, filepath.Join(db.dir, "wal"))
|
||||
w, err := wlog.Open(db.logger, filepath.Join(db.dir, "wal"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var wbl *wal.WAL
|
||||
wblDir := filepath.Join(db.dir, wal.WblDirName)
|
||||
var wbl *wlog.WL
|
||||
wblDir := filepath.Join(db.dir, wlog.WblDirName)
|
||||
if _, err := os.Stat(wblDir); !os.IsNotExist(err) {
|
||||
wbl, err = wal.Open(db.logger, wblDir)
|
||||
wbl, err = wlog.Open(db.logger, wblDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -473,14 +477,14 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue
|
|||
if err := head.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w, err := wal.Open(db.logger, filepath.Join(db.dir, "wal"))
|
||||
w, err := wlog.Open(db.logger, filepath.Join(db.dir, "wal"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var wbl *wal.WAL
|
||||
wblDir := filepath.Join(db.dir, wal.WblDirName)
|
||||
var wbl *wlog.WL
|
||||
wblDir := filepath.Join(db.dir, wlog.WblDirName)
|
||||
if _, err := os.Stat(wblDir); !os.IsNotExist(err) {
|
||||
wbl, err = wal.Open(db.logger, wblDir)
|
||||
wbl, err = wlog.Open(db.logger, wblDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -677,7 +681,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
|
|||
}
|
||||
|
||||
walDir := filepath.Join(dir, "wal")
|
||||
wblDir := filepath.Join(dir, wal.WblDirName)
|
||||
wblDir := filepath.Join(dir, wlog.WblDirName)
|
||||
|
||||
// Migrate old WAL if one exists.
|
||||
if err := MigrateWAL(l, walDir); err != nil {
|
||||
|
@ -739,15 +743,15 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
|
|||
}
|
||||
db.compactCancel = cancel
|
||||
|
||||
var wlog, wblog *wal.WAL
|
||||
segmentSize := wal.DefaultSegmentSize
|
||||
var wal, wbl *wlog.WL
|
||||
segmentSize := wlog.DefaultSegmentSize
|
||||
// Wal is enabled.
|
||||
if opts.WALSegmentSize >= 0 {
|
||||
// Wal is set to a custom size.
|
||||
if opts.WALSegmentSize > 0 {
|
||||
segmentSize = opts.WALSegmentSize
|
||||
}
|
||||
wlog, err = wal.NewSize(l, r, walDir, segmentSize, opts.WALCompression)
|
||||
wal, err = wlog.NewSize(l, r, walDir, segmentSize, opts.WALCompression)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -757,7 +761,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
|
|||
return nil, err
|
||||
}
|
||||
if opts.OutOfOrderTimeWindow > 0 || wblSize > 0 {
|
||||
wblog, err = wal.NewSize(l, r, wblDir, segmentSize, opts.WALCompression)
|
||||
wbl, err = wlog.NewSize(l, r, wblDir, segmentSize, opts.WALCompression)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -775,13 +779,14 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
|
|||
headOpts.EnableExemplarStorage = opts.EnableExemplarStorage
|
||||
headOpts.MaxExemplars.Store(opts.MaxExemplars)
|
||||
headOpts.EnableMemorySnapshotOnShutdown = opts.EnableMemorySnapshotOnShutdown
|
||||
headOpts.EnableNativeHistograms.Store(opts.EnableNativeHistograms)
|
||||
headOpts.OutOfOrderTimeWindow.Store(opts.OutOfOrderTimeWindow)
|
||||
headOpts.OutOfOrderCapMax.Store(opts.OutOfOrderCapMax)
|
||||
if opts.IsolationDisabled {
|
||||
// We only override this flag if isolation is disabled at DB level. We use the default otherwise.
|
||||
headOpts.IsolationDisabled = opts.IsolationDisabled
|
||||
}
|
||||
db.head, err = NewHead(r, l, wlog, wblog, headOpts, stats.Head)
|
||||
db.head, err = NewHead(r, l, wal, wbl, headOpts, stats.Head)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -813,12 +818,12 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
|
|||
isOOOErr := isErrLoadOOOWal(initErr)
|
||||
if isOOOErr {
|
||||
level.Warn(db.logger).Log("msg", "Encountered OOO WAL read error, attempting repair", "err", initErr)
|
||||
if err := wblog.Repair(initErr); err != nil {
|
||||
if err := wbl.Repair(initErr); err != nil {
|
||||
return nil, errors.Wrap(err, "repair corrupted OOO WAL")
|
||||
}
|
||||
} else {
|
||||
level.Warn(db.logger).Log("msg", "Encountered WAL read error, attempting repair", "err", initErr)
|
||||
if err := wlog.Repair(initErr); err != nil {
|
||||
if err := wal.Repair(initErr); err != nil {
|
||||
return nil, errors.Wrap(err, "repair corrupted WAL")
|
||||
}
|
||||
}
|
||||
|
@ -947,19 +952,19 @@ func (db *DB) ApplyConfig(conf *config.Config) error {
|
|||
}
|
||||
|
||||
// Create WBL if it was not present and if OOO is enabled with WAL enabled.
|
||||
var wblog *wal.WAL
|
||||
var wblog *wlog.WL
|
||||
var err error
|
||||
if db.head.wbl != nil {
|
||||
// The existing WBL from the disk might have been replayed while OOO was disabled.
|
||||
wblog = db.head.wbl
|
||||
} else if !db.oooWasEnabled.Load() && oooTimeWindow > 0 && db.opts.WALSegmentSize >= 0 {
|
||||
segmentSize := wal.DefaultSegmentSize
|
||||
segmentSize := wlog.DefaultSegmentSize
|
||||
// Wal is set to a custom size.
|
||||
if db.opts.WALSegmentSize > 0 {
|
||||
segmentSize = db.opts.WALSegmentSize
|
||||
}
|
||||
oooWalDir := filepath.Join(db.dir, wal.WblDirName)
|
||||
wblog, err = wal.NewSize(db.logger, db.registerer, oooWalDir, segmentSize, db.opts.WALCompression)
|
||||
oooWalDir := filepath.Join(db.dir, wlog.WblDirName)
|
||||
wblog, err = wlog.NewSize(db.logger, db.registerer, oooWalDir, segmentSize, db.opts.WALCompression)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -974,6 +979,16 @@ func (db *DB) ApplyConfig(conf *config.Config) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// EnableNativeHistograms enables the native histogram feature.
|
||||
func (db *DB) EnableNativeHistograms() {
|
||||
db.head.EnableNativeHistograms()
|
||||
}
|
||||
|
||||
// DisableNativeHistograms disables the native histogram feature.
|
||||
func (db *DB) DisableNativeHistograms() {
|
||||
db.head.DisableNativeHistograms()
|
||||
}
|
||||
|
||||
// dbAppender wraps the DB's head appender and triggers compactions on commit
|
||||
// if necessary.
|
||||
type dbAppender struct {
|
||||
|
@ -1939,7 +1954,9 @@ func (db *DB) CleanTombstones() (err error) {
|
|||
defer db.cmtx.Unlock()
|
||||
|
||||
start := time.Now()
|
||||
defer db.metrics.tombCleanTimer.Observe(time.Since(start).Seconds())
|
||||
defer func() {
|
||||
db.metrics.tombCleanTimer.Observe(time.Since(start).Seconds())
|
||||
}()
|
||||
|
||||
cleanUpCompleted := false
|
||||
// Repeat cleanup until there is no tombstones left.
|
||||
|
|
7
vendor/github.com/prometheus/prometheus/tsdb/encoding/encoding.go
generated
vendored
7
vendor/github.com/prometheus/prometheus/tsdb/encoding/encoding.go
generated
vendored
|
@ -178,9 +178,10 @@ func NewDecbufRaw(bs ByteSlice, length int) Decbuf {
|
|||
return Decbuf{B: bs.Range(0, length)}
|
||||
}
|
||||
|
||||
func (d *Decbuf) Uvarint() int { return int(d.Uvarint64()) }
|
||||
func (d *Decbuf) Be32int() int { return int(d.Be32()) }
|
||||
func (d *Decbuf) Be64int64() int64 { return int64(d.Be64()) }
|
||||
func (d *Decbuf) Uvarint() int { return int(d.Uvarint64()) }
|
||||
func (d *Decbuf) Uvarint32() uint32 { return uint32(d.Uvarint64()) }
|
||||
func (d *Decbuf) Be32int() int { return int(d.Be32()) }
|
||||
func (d *Decbuf) Be64int64() int64 { return int64(d.Be64()) }
|
||||
|
||||
// Crc32 returns a CRC32 checksum over the remaining bytes.
|
||||
func (d *Decbuf) Crc32(castagnoliTable *crc32.Table) uint32 {
|
||||
|
|
149
vendor/github.com/prometheus/prometheus/tsdb/head.go
generated
vendored
149
vendor/github.com/prometheus/prometheus/tsdb/head.go
generated
vendored
|
@ -31,6 +31,7 @@ import (
|
|||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/metadata"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
|
@ -41,7 +42,7 @@ import (
|
|||
"github.com/prometheus/prometheus/tsdb/record"
|
||||
"github.com/prometheus/prometheus/tsdb/tombstones"
|
||||
"github.com/prometheus/prometheus/tsdb/tsdbutil"
|
||||
"github.com/prometheus/prometheus/tsdb/wal"
|
||||
"github.com/prometheus/prometheus/tsdb/wlog"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -75,12 +76,13 @@ type Head struct {
|
|||
|
||||
metrics *headMetrics
|
||||
opts *HeadOptions
|
||||
wal, wbl *wal.WAL
|
||||
wal, wbl *wlog.WL
|
||||
exemplarMetrics *ExemplarMetrics
|
||||
exemplars ExemplarStorage
|
||||
logger log.Logger
|
||||
appendPool sync.Pool
|
||||
exemplarsPool sync.Pool
|
||||
histogramsPool sync.Pool
|
||||
metadataPool sync.Pool
|
||||
seriesPool sync.Pool
|
||||
bytesPool sync.Pool
|
||||
|
@ -130,14 +132,18 @@ type HeadOptions struct {
|
|||
// https://pkg.go.dev/sync/atomic#pkg-note-BUG
|
||||
MaxExemplars atomic.Int64
|
||||
|
||||
OutOfOrderTimeWindow atomic.Int64
|
||||
OutOfOrderCapMax atomic.Int64
|
||||
|
||||
// EnableNativeHistograms enables the ingestion of native histograms.
|
||||
EnableNativeHistograms atomic.Bool
|
||||
|
||||
ChunkRange int64
|
||||
// ChunkDirRoot is the parent directory of the chunks directory.
|
||||
ChunkDirRoot string
|
||||
ChunkPool chunkenc.Pool
|
||||
ChunkWriteBufferSize int
|
||||
ChunkWriteQueueSize int
|
||||
OutOfOrderTimeWindow atomic.Int64
|
||||
OutOfOrderCapMax atomic.Int64
|
||||
|
||||
// StripeSize sets the number of entries in the hash map, it must be a power of 2.
|
||||
// A larger StripeSize will allocate more memory up-front, but will increase performance when handling a large number of series.
|
||||
|
@ -186,7 +192,7 @@ type SeriesLifecycleCallback interface {
|
|||
}
|
||||
|
||||
// NewHead opens the head block in dir.
|
||||
func NewHead(r prometheus.Registerer, l log.Logger, wal, wbl *wal.WAL, opts *HeadOptions, stats *HeadStats) (*Head, error) {
|
||||
func NewHead(r prometheus.Registerer, l log.Logger, wal, wbl *wlog.WL, opts *HeadOptions, stats *HeadStats) (*Head, error) {
|
||||
var err error
|
||||
if l == nil {
|
||||
l = log.NewNopLogger()
|
||||
|
@ -299,11 +305,11 @@ type headMetrics struct {
|
|||
chunksCreated prometheus.Counter
|
||||
chunksRemoved prometheus.Counter
|
||||
gcDuration prometheus.Summary
|
||||
samplesAppended prometheus.Counter
|
||||
samplesAppended *prometheus.CounterVec
|
||||
outOfOrderSamplesAppended prometheus.Counter
|
||||
outOfBoundSamples prometheus.Counter
|
||||
outOfOrderSamples prometheus.Counter
|
||||
tooOldSamples prometheus.Counter
|
||||
outOfBoundSamples *prometheus.CounterVec
|
||||
outOfOrderSamples *prometheus.CounterVec
|
||||
tooOldSamples *prometheus.CounterVec
|
||||
walTruncateDuration prometheus.Summary
|
||||
walCorruptionsTotal prometheus.Counter
|
||||
dataTotalReplayDuration prometheus.Gauge
|
||||
|
@ -318,6 +324,11 @@ type headMetrics struct {
|
|||
oooHistogram prometheus.Histogram
|
||||
}
|
||||
|
||||
const (
|
||||
sampleMetricTypeFloat = "float"
|
||||
sampleMetricTypeHistogram = "histogram"
|
||||
)
|
||||
|
||||
func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics {
|
||||
m := &headMetrics{
|
||||
activeAppenders: prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
|
@ -370,26 +381,26 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics {
|
|||
Name: "prometheus_tsdb_data_replay_duration_seconds",
|
||||
Help: "Time taken to replay the data on disk.",
|
||||
}),
|
||||
samplesAppended: prometheus.NewCounter(prometheus.CounterOpts{
|
||||
samplesAppended: prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "prometheus_tsdb_head_samples_appended_total",
|
||||
Help: "Total number of appended samples.",
|
||||
}),
|
||||
}, []string{"type"}),
|
||||
outOfOrderSamplesAppended: prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "prometheus_tsdb_head_out_of_order_samples_appended_total",
|
||||
Help: "Total number of appended out of order samples.",
|
||||
}),
|
||||
outOfBoundSamples: prometheus.NewCounter(prometheus.CounterOpts{
|
||||
outOfBoundSamples: prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "prometheus_tsdb_out_of_bound_samples_total",
|
||||
Help: "Total number of out of bound samples ingestion failed attempts with out of order support disabled.",
|
||||
}),
|
||||
outOfOrderSamples: prometheus.NewCounter(prometheus.CounterOpts{
|
||||
}, []string{"type"}),
|
||||
outOfOrderSamples: prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "prometheus_tsdb_out_of_order_samples_total",
|
||||
Help: "Total number of out of order samples ingestion failed attempts due to out of order being disabled.",
|
||||
}),
|
||||
tooOldSamples: prometheus.NewCounter(prometheus.CounterOpts{
|
||||
}, []string{"type"}),
|
||||
tooOldSamples: prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "prometheus_tsdb_too_old_samples_total",
|
||||
Help: "Total number of out of order samples ingestion failed attempts with out of support enabled, but sample outside of time window.",
|
||||
}),
|
||||
}, []string{"type"}),
|
||||
headTruncateFail: prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "prometheus_tsdb_head_truncations_failed_total",
|
||||
Help: "Total number of head truncations that failed.",
|
||||
|
@ -602,13 +613,13 @@ func (h *Head) Init(minValidTime int64) error {
|
|||
|
||||
checkpointReplayStart := time.Now()
|
||||
// Backfill the checkpoint first if it exists.
|
||||
dir, startFrom, err := wal.LastCheckpoint(h.wal.Dir())
|
||||
dir, startFrom, err := wlog.LastCheckpoint(h.wal.Dir())
|
||||
if err != nil && err != record.ErrNotFound {
|
||||
return errors.Wrap(err, "find last checkpoint")
|
||||
}
|
||||
|
||||
// Find the last segment.
|
||||
_, endAt, e := wal.Segments(h.wal.Dir())
|
||||
_, endAt, e := wlog.Segments(h.wal.Dir())
|
||||
if e != nil {
|
||||
return errors.Wrap(e, "finding WAL segments")
|
||||
}
|
||||
|
@ -617,7 +628,7 @@ func (h *Head) Init(minValidTime int64) error {
|
|||
|
||||
multiRef := map[chunks.HeadSeriesRef]chunks.HeadSeriesRef{}
|
||||
if err == nil && startFrom >= snapIdx {
|
||||
sr, err := wal.NewSegmentsReader(dir)
|
||||
sr, err := wlog.NewSegmentsReader(dir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "open checkpoint")
|
||||
}
|
||||
|
@ -629,7 +640,7 @@ func (h *Head) Init(minValidTime int64) error {
|
|||
|
||||
// A corrupted checkpoint is a hard error for now and requires user
|
||||
// intervention. There's likely little data that can be recovered anyway.
|
||||
if err := h.loadWAL(wal.NewReader(sr), multiRef, mmappedChunks, oooMmappedChunks); err != nil {
|
||||
if err := h.loadWAL(wlog.NewReader(sr), multiRef, mmappedChunks, oooMmappedChunks); err != nil {
|
||||
return errors.Wrap(err, "backfill checkpoint")
|
||||
}
|
||||
h.updateWALReplayStatusRead(startFrom)
|
||||
|
@ -645,7 +656,7 @@ func (h *Head) Init(minValidTime int64) error {
|
|||
}
|
||||
// Backfill segments from the most recent checkpoint onwards.
|
||||
for i := startFrom; i <= endAt; i++ {
|
||||
s, err := wal.OpenReadSegment(wal.SegmentName(h.wal.Dir(), i))
|
||||
s, err := wlog.OpenReadSegment(wlog.SegmentName(h.wal.Dir(), i))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("open WAL segment: %d", i))
|
||||
}
|
||||
|
@ -654,7 +665,7 @@ func (h *Head) Init(minValidTime int64) error {
|
|||
if i == snapIdx {
|
||||
offset = snapOffset
|
||||
}
|
||||
sr, err := wal.NewSegmentBufReaderWithOffset(offset, s)
|
||||
sr, err := wlog.NewSegmentBufReaderWithOffset(offset, s)
|
||||
if errors.Cause(err) == io.EOF {
|
||||
// File does not exist.
|
||||
continue
|
||||
|
@ -662,7 +673,7 @@ func (h *Head) Init(minValidTime int64) error {
|
|||
if err != nil {
|
||||
return errors.Wrapf(err, "segment reader (offset=%d)", offset)
|
||||
}
|
||||
err = h.loadWAL(wal.NewReader(sr), multiRef, mmappedChunks, oooMmappedChunks)
|
||||
err = h.loadWAL(wlog.NewReader(sr), multiRef, mmappedChunks, oooMmappedChunks)
|
||||
if err := sr.Close(); err != nil {
|
||||
level.Warn(h.logger).Log("msg", "Error while closing the wal segments reader", "err", err)
|
||||
}
|
||||
|
@ -677,20 +688,20 @@ func (h *Head) Init(minValidTime int64) error {
|
|||
wblReplayStart := time.Now()
|
||||
if h.wbl != nil {
|
||||
// Replay OOO WAL.
|
||||
startFrom, endAt, e = wal.Segments(h.wbl.Dir())
|
||||
startFrom, endAt, e = wlog.Segments(h.wbl.Dir())
|
||||
if e != nil {
|
||||
return errors.Wrap(e, "finding OOO WAL segments")
|
||||
}
|
||||
h.startWALReplayStatus(startFrom, endAt)
|
||||
|
||||
for i := startFrom; i <= endAt; i++ {
|
||||
s, err := wal.OpenReadSegment(wal.SegmentName(h.wbl.Dir(), i))
|
||||
s, err := wlog.OpenReadSegment(wlog.SegmentName(h.wbl.Dir(), i))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("open WBL segment: %d", i))
|
||||
}
|
||||
|
||||
sr := wal.NewSegmentBufReader(s)
|
||||
err = h.loadWBL(wal.NewReader(sr), multiRef, lastMmapRef)
|
||||
sr := wlog.NewSegmentBufReader(s)
|
||||
err = h.loadWBL(wlog.NewReader(sr), multiRef, lastMmapRef)
|
||||
if err := sr.Close(); err != nil {
|
||||
level.Warn(h.logger).Log("msg", "Error while closing the wbl segments reader", "err", err)
|
||||
}
|
||||
|
@ -840,7 +851,7 @@ func (h *Head) removeCorruptedMmappedChunks(err error) (map[chunks.HeadSeriesRef
|
|||
return mmappedChunks, oooMmappedChunks, lastRef, nil
|
||||
}
|
||||
|
||||
func (h *Head) ApplyConfig(cfg *config.Config, wbl *wal.WAL) {
|
||||
func (h *Head) ApplyConfig(cfg *config.Config, wbl *wlog.WL) {
|
||||
oooTimeWindow := int64(0)
|
||||
if cfg.StorageConfig.TSDBConfig != nil {
|
||||
oooTimeWindow = cfg.StorageConfig.TSDBConfig.OutOfOrderTimeWindow
|
||||
|
@ -872,7 +883,7 @@ func (h *Head) ApplyConfig(cfg *config.Config, wbl *wal.WAL) {
|
|||
|
||||
// SetOutOfOrderTimeWindow updates the out of order related parameters.
|
||||
// If the Head already has a WBL set, then the wbl will be ignored.
|
||||
func (h *Head) SetOutOfOrderTimeWindow(oooTimeWindow int64, wbl *wal.WAL) {
|
||||
func (h *Head) SetOutOfOrderTimeWindow(oooTimeWindow int64, wbl *wlog.WL) {
|
||||
if oooTimeWindow > 0 && h.wbl == nil {
|
||||
h.wbl = wbl
|
||||
}
|
||||
|
@ -880,6 +891,16 @@ func (h *Head) SetOutOfOrderTimeWindow(oooTimeWindow int64, wbl *wal.WAL) {
|
|||
h.opts.OutOfOrderTimeWindow.Store(oooTimeWindow)
|
||||
}
|
||||
|
||||
// EnableNativeHistograms enables the native histogram feature.
|
||||
func (h *Head) EnableNativeHistograms() {
|
||||
h.opts.EnableNativeHistograms.Store(true)
|
||||
}
|
||||
|
||||
// DisableNativeHistograms disables the native histogram feature.
|
||||
func (h *Head) DisableNativeHistograms() {
|
||||
h.opts.EnableNativeHistograms.Store(false)
|
||||
}
|
||||
|
||||
// PostingsCardinalityStats returns top 10 highest cardinality stats By label and value names.
|
||||
func (h *Head) PostingsCardinalityStats(statsByLabelName string) *index.PostingsStats {
|
||||
h.cardinalityMutex.Lock()
|
||||
|
@ -1095,7 +1116,7 @@ func (h *Head) truncateWAL(mint int64) error {
|
|||
start := time.Now()
|
||||
h.lastWALTruncationTime.Store(mint)
|
||||
|
||||
first, last, err := wal.Segments(h.wal.Dir())
|
||||
first, last, err := wlog.Segments(h.wal.Dir())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get segment range")
|
||||
}
|
||||
|
@ -1127,9 +1148,9 @@ func (h *Head) truncateWAL(mint int64) error {
|
|||
return ok
|
||||
}
|
||||
h.metrics.checkpointCreationTotal.Inc()
|
||||
if _, err = wal.Checkpoint(h.logger, h.wal, first, last, keep, mint); err != nil {
|
||||
if _, err = wlog.Checkpoint(h.logger, h.wal, first, last, keep, mint); err != nil {
|
||||
h.metrics.checkpointCreationFail.Inc()
|
||||
if _, ok := errors.Cause(err).(*wal.CorruptionErr); ok {
|
||||
if _, ok := errors.Cause(err).(*wlog.CorruptionErr); ok {
|
||||
h.metrics.walCorruptionsTotal.Inc()
|
||||
}
|
||||
return errors.Wrap(err, "create checkpoint")
|
||||
|
@ -1152,7 +1173,7 @@ func (h *Head) truncateWAL(mint int64) error {
|
|||
h.deletedMtx.Unlock()
|
||||
|
||||
h.metrics.checkpointDeleteTotal.Inc()
|
||||
if err := wal.DeleteCheckpoints(h.wal.Dir(), last); err != nil {
|
||||
if err := wlog.DeleteCheckpoints(h.wal.Dir(), last); err != nil {
|
||||
// Leftover old checkpoints do not cause problems down the line beyond
|
||||
// occupying disk space.
|
||||
// They will just be ignored since a higher checkpoint exists.
|
||||
|
@ -1395,7 +1416,7 @@ func (h *Head) gc() (actualInOrderMint, minOOOTime int64, minMmapFile int) {
|
|||
h.tombstones.TruncateBefore(mint)
|
||||
|
||||
if h.wal != nil {
|
||||
_, last, _ := wal.Segments(h.wal.Dir())
|
||||
_, last, _ := wlog.Segments(h.wal.Dir())
|
||||
h.deletedMtx.Lock()
|
||||
// Keep series records until we're past segment 'last'
|
||||
// because the WAL will still have samples records with
|
||||
|
@ -1472,7 +1493,11 @@ func (h *Head) Close() error {
|
|||
h.closedMtx.Lock()
|
||||
defer h.closedMtx.Unlock()
|
||||
h.closed = true
|
||||
|
||||
errs := tsdb_errors.NewMulti(h.chunkDiskMapper.Close())
|
||||
if errs.Err() == nil && h.opts.EnableMemorySnapshotOnShutdown {
|
||||
errs.Add(h.performChunkSnapshot())
|
||||
}
|
||||
if h.wal != nil {
|
||||
errs.Add(h.wal.Close())
|
||||
}
|
||||
|
@ -1765,13 +1790,31 @@ func (s *stripeSeries) getOrSet(hash uint64, lset labels.Labels, createSeries fu
|
|||
}
|
||||
|
||||
type sample struct {
|
||||
t int64
|
||||
v float64
|
||||
t int64
|
||||
v float64
|
||||
h *histogram.Histogram
|
||||
fh *histogram.FloatHistogram
|
||||
}
|
||||
|
||||
func newSample(t int64, v float64) tsdbutil.Sample { return sample{t, v} }
|
||||
func (s sample) T() int64 { return s.t }
|
||||
func (s sample) V() float64 { return s.v }
|
||||
func newSample(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram) tsdbutil.Sample {
|
||||
return sample{t, v, h, fh}
|
||||
}
|
||||
|
||||
func (s sample) T() int64 { return s.t }
|
||||
func (s sample) V() float64 { return s.v }
|
||||
func (s sample) H() *histogram.Histogram { return s.h }
|
||||
func (s sample) FH() *histogram.FloatHistogram { return s.fh }
|
||||
|
||||
func (s sample) Type() chunkenc.ValueType {
|
||||
switch {
|
||||
case s.h != nil:
|
||||
return chunkenc.ValHistogram
|
||||
case s.fh != nil:
|
||||
return chunkenc.ValFloatHistogram
|
||||
default:
|
||||
return chunkenc.ValFloat
|
||||
}
|
||||
}
|
||||
|
||||
// memSeries is the in-memory representation of a series. None of its methods
|
||||
// are goroutine safe and it is the caller's responsibility to lock it.
|
||||
|
@ -1806,6 +1849,9 @@ type memSeries struct {
|
|||
// We keep the last value here (in addition to appending it to the chunk) so we can check for duplicates.
|
||||
lastValue float64
|
||||
|
||||
// We keep the last histogram value here (in addition to appending it to the chunk) so we can check for duplicates.
|
||||
lastHistogramValue *histogram.Histogram
|
||||
|
||||
// Current appender for the head chunk. Set when a new head chunk is cut.
|
||||
// It is nil only if headChunk is nil. E.g. if there was an appender that created a new series, but rolled back the commit
|
||||
// (the first sample would create a headChunk, hence appender, but rollback skipped it while the Append() call would create a series).
|
||||
|
@ -1814,6 +1860,10 @@ type memSeries struct {
|
|||
// txs is nil if isolation is disabled.
|
||||
txs *txRing
|
||||
|
||||
// TODO(beorn7): The only reason we track this is to create a staleness
|
||||
// marker as either histogram or float sample. Perhaps there is a better way.
|
||||
isHistogramSeries bool
|
||||
|
||||
pendingCommit bool // Whether there are samples waiting to be committed to this series.
|
||||
}
|
||||
|
||||
|
@ -1974,3 +2024,22 @@ func (h *Head) updateWALReplayStatusRead(current int) {
|
|||
|
||||
h.stats.WALReplayStatus.Current = current
|
||||
}
|
||||
|
||||
func GenerateTestHistograms(n int) (r []*histogram.Histogram) {
|
||||
for i := 0; i < n; i++ {
|
||||
r = append(r, &histogram.Histogram{
|
||||
Count: 5 + uint64(i*4),
|
||||
ZeroCount: 2 + uint64(i),
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 18.4 * float64(i+1),
|
||||
Schema: 1,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{int64(i + 1), 1, -1, 0},
|
||||
})
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
|
401
vendor/github.com/prometheus/prometheus/tsdb/head_append.go
generated
vendored
401
vendor/github.com/prometheus/prometheus/tsdb/head_append.go
generated
vendored
|
@ -22,8 +22,10 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/metadata"
|
||||
"github.com/prometheus/prometheus/model/value"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
|
@ -66,6 +68,16 @@ func (a *initAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e
|
|||
return a.app.AppendExemplar(ref, l, e)
|
||||
}
|
||||
|
||||
func (a *initAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram) (storage.SeriesRef, error) {
|
||||
if a.app != nil {
|
||||
return a.app.AppendHistogram(ref, l, t, h)
|
||||
}
|
||||
a.head.initTime(t)
|
||||
a.app = a.head.appender()
|
||||
|
||||
return a.app.AppendHistogram(ref, l, t, h)
|
||||
}
|
||||
|
||||
func (a *initAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
|
||||
if a.app != nil {
|
||||
return a.app.UpdateMetadata(ref, l, m)
|
||||
|
@ -143,6 +155,7 @@ func (h *Head) appender() *headAppender {
|
|||
samples: h.getAppendBuffer(),
|
||||
sampleSeries: h.getSeriesBuffer(),
|
||||
exemplars: exemplarsBuf,
|
||||
histograms: h.getHistogramBuffer(),
|
||||
metadata: h.getMetadataBuffer(),
|
||||
appendID: appendID,
|
||||
cleanupAppendIDsBelow: cleanupAppendIDsBelow,
|
||||
|
@ -210,6 +223,19 @@ func (h *Head) putExemplarBuffer(b []exemplarWithSeriesRef) {
|
|||
h.exemplarsPool.Put(b[:0])
|
||||
}
|
||||
|
||||
func (h *Head) getHistogramBuffer() []record.RefHistogramSample {
|
||||
b := h.histogramsPool.Get()
|
||||
if b == nil {
|
||||
return make([]record.RefHistogramSample, 0, 512)
|
||||
}
|
||||
return b.([]record.RefHistogramSample)
|
||||
}
|
||||
|
||||
func (h *Head) putHistogramBuffer(b []record.RefHistogramSample) {
|
||||
//nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty.
|
||||
h.histogramsPool.Put(b[:0])
|
||||
}
|
||||
|
||||
func (h *Head) getMetadataBuffer() []record.RefMetadata {
|
||||
b := h.metadataPool.Get()
|
||||
if b == nil {
|
||||
|
@ -261,12 +287,14 @@ type headAppender struct {
|
|||
headMaxt int64 // We track it here to not take the lock for every sample appended.
|
||||
oooTimeWindow int64 // Use the same for the entire append, and don't load the atomic for each sample.
|
||||
|
||||
series []record.RefSeries // New series held by this appender.
|
||||
metadata []record.RefMetadata // New metadata held by this appender.
|
||||
samples []record.RefSample // New samples held by this appender.
|
||||
exemplars []exemplarWithSeriesRef // New exemplars held by this appender.
|
||||
sampleSeries []*memSeries // Series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
|
||||
metadataSeries []*memSeries // Series corresponding to the metadata held by this appender.
|
||||
series []record.RefSeries // New series held by this appender.
|
||||
samples []record.RefSample // New float samples held by this appender.
|
||||
exemplars []exemplarWithSeriesRef // New exemplars held by this appender.
|
||||
sampleSeries []*memSeries // Float series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
|
||||
histograms []record.RefHistogramSample // New histogram samples held by this appender.
|
||||
histogramSeries []*memSeries // HistogramSamples series corresponding to the samples held by this appender (using corresponding slice indices - same series may appear more than once).
|
||||
metadata []record.RefMetadata // New metadata held by this appender.
|
||||
metadataSeries []*memSeries // Series corresponding to the metadata held by this appender.
|
||||
|
||||
appendID, cleanupAppendIDsBelow uint64
|
||||
closed bool
|
||||
|
@ -276,7 +304,7 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64
|
|||
// For OOO inserts, this restriction is irrelevant and will be checked later once we confirm the sample is an in-order append.
|
||||
// If OOO inserts are disabled, we may as well as check this as early as we can and avoid more work.
|
||||
if a.oooTimeWindow == 0 && t < a.minValidTime {
|
||||
a.head.metrics.outOfBoundSamples.Inc()
|
||||
a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat).Inc()
|
||||
return 0, storage.ErrOutOfBounds
|
||||
}
|
||||
|
||||
|
@ -306,6 +334,10 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64
|
|||
}
|
||||
}
|
||||
|
||||
if value.IsStaleNaN(v) && s.isHistogramSeries {
|
||||
return a.AppendHistogram(ref, lset, t, &histogram.Histogram{Sum: v})
|
||||
}
|
||||
|
||||
s.Lock()
|
||||
// TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise
|
||||
// to skip that sample from the WAL and write only in the WBL.
|
||||
|
@ -320,9 +352,9 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64
|
|||
if err != nil {
|
||||
switch err {
|
||||
case storage.ErrOutOfOrderSample:
|
||||
a.head.metrics.outOfOrderSamples.Inc()
|
||||
a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Inc()
|
||||
case storage.ErrTooOldSample:
|
||||
a.head.metrics.tooOldSamples.Inc()
|
||||
a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Inc()
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
|
@ -385,6 +417,28 @@ func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooTi
|
|||
return false, headMaxt - t, storage.ErrOutOfOrderSample
|
||||
}
|
||||
|
||||
// appendableHistogram checks whether the given sample is valid for appending to the series.
|
||||
func (s *memSeries) appendableHistogram(t int64, h *histogram.Histogram) error {
|
||||
c := s.head()
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if t > c.maxTime {
|
||||
return nil
|
||||
}
|
||||
if t < c.maxTime {
|
||||
return storage.ErrOutOfOrderSample
|
||||
}
|
||||
|
||||
// We are allowing exact duplicates as we can encounter them in valid cases
|
||||
// like federation and erroring out at that time would be extremely noisy.
|
||||
if !h.Equals(s.lastHistogramValue) {
|
||||
return storage.ErrDuplicateSampleForTimestamp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AppendExemplar for headAppender assumes the series ref already exists, and so it doesn't
|
||||
// use getOrCreate or make any of the lset sanity checks that Append does.
|
||||
func (a *headAppender) AppendExemplar(ref storage.SeriesRef, lset labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
|
||||
|
@ -422,6 +476,74 @@ func (a *headAppender) AppendExemplar(ref storage.SeriesRef, lset labels.Labels,
|
|||
return storage.SeriesRef(s.ref), nil
|
||||
}
|
||||
|
||||
func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram) (storage.SeriesRef, error) {
|
||||
if !a.head.opts.EnableNativeHistograms.Load() {
|
||||
return 0, storage.ErrNativeHistogramsDisabled
|
||||
}
|
||||
|
||||
if t < a.minValidTime {
|
||||
a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeHistogram).Inc()
|
||||
return 0, storage.ErrOutOfBounds
|
||||
}
|
||||
|
||||
if err := ValidateHistogram(h); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
s := a.head.series.getByID(chunks.HeadSeriesRef(ref))
|
||||
if s == nil {
|
||||
// Ensure no empty labels have gotten through.
|
||||
lset = lset.WithoutEmpty()
|
||||
if len(lset) == 0 {
|
||||
return 0, errors.Wrap(ErrInvalidSample, "empty labelset")
|
||||
}
|
||||
|
||||
if l, dup := lset.HasDuplicateLabelNames(); dup {
|
||||
return 0, errors.Wrap(ErrInvalidSample, fmt.Sprintf(`label name "%s" is not unique`, l))
|
||||
}
|
||||
|
||||
var created bool
|
||||
var err error
|
||||
s, created, err = a.head.getOrCreate(lset.Hash(), lset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
s.isHistogramSeries = true
|
||||
if created {
|
||||
a.series = append(a.series, record.RefSeries{
|
||||
Ref: s.ref,
|
||||
Labels: lset,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
s.Lock()
|
||||
if err := s.appendableHistogram(t, h); err != nil {
|
||||
s.Unlock()
|
||||
if err == storage.ErrOutOfOrderSample {
|
||||
a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Inc()
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
s.pendingCommit = true
|
||||
s.Unlock()
|
||||
|
||||
if t < a.mint {
|
||||
a.mint = t
|
||||
}
|
||||
if t > a.maxt {
|
||||
a.maxt = t
|
||||
}
|
||||
|
||||
a.histograms = append(a.histograms, record.RefHistogramSample{
|
||||
Ref: s.ref,
|
||||
T: t,
|
||||
H: h,
|
||||
})
|
||||
a.histogramSeries = append(a.histogramSeries, s)
|
||||
return storage.SeriesRef(s.ref), nil
|
||||
}
|
||||
|
||||
// UpdateMetadata for headAppender assumes the series ref already exists, and so it doesn't
|
||||
// use getOrCreate or make any of the lset sanity checks that Append does.
|
||||
func (a *headAppender) UpdateMetadata(ref storage.SeriesRef, lset labels.Labels, meta metadata.Metadata) (storage.SeriesRef, error) {
|
||||
|
@ -453,6 +575,76 @@ func (a *headAppender) UpdateMetadata(ref storage.SeriesRef, lset labels.Labels,
|
|||
return ref, nil
|
||||
}
|
||||
|
||||
func ValidateHistogram(h *histogram.Histogram) error {
|
||||
if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
|
||||
return errors.Wrap(err, "negative side")
|
||||
}
|
||||
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
|
||||
return errors.Wrap(err, "positive side")
|
||||
}
|
||||
|
||||
negativeCount, err := checkHistogramBuckets(h.NegativeBuckets)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "negative side")
|
||||
}
|
||||
positiveCount, err := checkHistogramBuckets(h.PositiveBuckets)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "positive side")
|
||||
}
|
||||
|
||||
if c := negativeCount + positiveCount; c > h.Count {
|
||||
return errors.Wrap(
|
||||
storage.ErrHistogramCountNotBigEnough,
|
||||
fmt.Sprintf("%d observations found in buckets, but the Count field is %d", c, h.Count),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkHistogramSpans(spans []histogram.Span, numBuckets int) error {
|
||||
var spanBuckets int
|
||||
for n, span := range spans {
|
||||
if n > 0 && span.Offset < 0 {
|
||||
return errors.Wrap(
|
||||
storage.ErrHistogramSpanNegativeOffset,
|
||||
fmt.Sprintf("span number %d with offset %d", n+1, span.Offset),
|
||||
)
|
||||
}
|
||||
spanBuckets += int(span.Length)
|
||||
}
|
||||
if spanBuckets != numBuckets {
|
||||
return errors.Wrap(
|
||||
storage.ErrHistogramSpansBucketsMismatch,
|
||||
fmt.Sprintf("spans need %d buckets, have %d buckets", spanBuckets, numBuckets),
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkHistogramBuckets(buckets []int64) (uint64, error) {
|
||||
if len(buckets) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
var count uint64
|
||||
var last int64
|
||||
|
||||
for i := 0; i < len(buckets); i++ {
|
||||
c := last + buckets[i]
|
||||
if c < 0 {
|
||||
return 0, errors.Wrap(
|
||||
storage.ErrHistogramNegativeBucketCount,
|
||||
fmt.Sprintf("bucket number %d has observation count of %d", i+1, c),
|
||||
)
|
||||
}
|
||||
last = c
|
||||
count += uint64(c)
|
||||
}
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
var _ storage.GetRef = &headAppender{}
|
||||
|
||||
func (a *headAppender) GetRef(lset labels.Labels) (storage.SeriesRef, labels.Labels) {
|
||||
|
@ -508,6 +700,13 @@ func (a *headAppender) log() error {
|
|||
return errors.Wrap(err, "log exemplars")
|
||||
}
|
||||
}
|
||||
if len(a.histograms) > 0 {
|
||||
rec = enc.HistogramSamples(a.histograms, buf)
|
||||
buf = rec[:0]
|
||||
if err := a.head.wal.Log(rec); err != nil {
|
||||
return errors.Wrap(err, "log histograms")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -553,6 +752,7 @@ func (a *headAppender) Commit() (err error) {
|
|||
defer a.head.putAppendBuffer(a.samples)
|
||||
defer a.head.putSeriesBuffer(a.sampleSeries)
|
||||
defer a.head.putExemplarBuffer(a.exemplars)
|
||||
defer a.head.putHistogramBuffer(a.histograms)
|
||||
defer a.head.putMetadataBuffer(a.metadata)
|
||||
defer a.head.iso.closeAppend(a.appendID)
|
||||
|
||||
|
@ -697,6 +897,33 @@ func (a *headAppender) Commit() (err error) {
|
|||
series.Unlock()
|
||||
}
|
||||
|
||||
histogramsTotal := len(a.histograms)
|
||||
histoOOORejected := 0
|
||||
for i, s := range a.histograms {
|
||||
series = a.histogramSeries[i]
|
||||
series.Lock()
|
||||
ok, chunkCreated := series.appendHistogram(s.T, s.H, a.appendID, a.head.chunkDiskMapper, chunkRange)
|
||||
series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
|
||||
series.pendingCommit = false
|
||||
series.Unlock()
|
||||
|
||||
if ok {
|
||||
if s.T < inOrderMint {
|
||||
inOrderMint = s.T
|
||||
}
|
||||
if s.T > inOrderMaxt {
|
||||
inOrderMaxt = s.T
|
||||
}
|
||||
} else {
|
||||
histogramsTotal--
|
||||
histoOOORejected++
|
||||
}
|
||||
if chunkCreated {
|
||||
a.head.metrics.chunks.Inc()
|
||||
a.head.metrics.chunksCreated.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
for i, m := range a.metadata {
|
||||
series = a.metadataSeries[i]
|
||||
series.Lock()
|
||||
|
@ -704,10 +931,12 @@ func (a *headAppender) Commit() (err error) {
|
|||
series.Unlock()
|
||||
}
|
||||
|
||||
a.head.metrics.outOfOrderSamples.Add(float64(oooRejected))
|
||||
a.head.metrics.outOfBoundSamples.Add(float64(oobRejected))
|
||||
a.head.metrics.tooOldSamples.Add(float64(tooOldRejected))
|
||||
a.head.metrics.samplesAppended.Add(float64(samplesAppended))
|
||||
a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(oooRejected))
|
||||
a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Add(float64(histoOOORejected))
|
||||
a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(oobRejected))
|
||||
a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(tooOldRejected))
|
||||
a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(samplesAppended))
|
||||
a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(histogramsTotal))
|
||||
a.head.metrics.outOfOrderSamplesAppended.Add(float64(oooAccepted))
|
||||
a.head.updateMinMaxTime(inOrderMint, inOrderMaxt)
|
||||
a.head.updateMinOOOMaxOOOTime(ooomint, ooomaxt)
|
||||
|
@ -751,26 +980,126 @@ func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDisk
|
|||
// isolation for this append.)
|
||||
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
|
||||
func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64) (sampleInOrder, chunkCreated bool) {
|
||||
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncXOR, chunkDiskMapper, chunkRange)
|
||||
if !sampleInOrder {
|
||||
return sampleInOrder, chunkCreated
|
||||
}
|
||||
s.app.Append(t, v)
|
||||
s.isHistogramSeries = false
|
||||
|
||||
c.maxTime = t
|
||||
|
||||
s.lastValue = v
|
||||
|
||||
if appendID > 0 {
|
||||
s.txs.add(appendID)
|
||||
}
|
||||
|
||||
return true, chunkCreated
|
||||
}
|
||||
|
||||
// appendHistogram adds the histogram.
|
||||
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
|
||||
func (s *memSeries) appendHistogram(t int64, h *histogram.Histogram, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64) (sampleInOrder, chunkCreated bool) {
|
||||
// Head controls the execution of recoding, so that we own the proper
|
||||
// chunk reference afterwards. We check for Appendable before
|
||||
// appendPreprocessor because in case it ends up creating a new chunk,
|
||||
// we need to know if there was also a counter reset or not to set the
|
||||
// meta properly.
|
||||
app, _ := s.app.(*chunkenc.HistogramAppender)
|
||||
var (
|
||||
positiveInterjections, negativeInterjections []chunkenc.Interjection
|
||||
okToAppend, counterReset bool
|
||||
)
|
||||
c, sampleInOrder, chunkCreated := s.appendPreprocessor(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange)
|
||||
if !sampleInOrder {
|
||||
return sampleInOrder, chunkCreated
|
||||
}
|
||||
|
||||
if app != nil {
|
||||
positiveInterjections, negativeInterjections, okToAppend, counterReset = app.Appendable(h)
|
||||
}
|
||||
|
||||
if !chunkCreated {
|
||||
// We have 3 cases here
|
||||
// - !okToAppend -> We need to cut a new chunk.
|
||||
// - okToAppend but we have interjections → Existing chunk needs
|
||||
// recoding before we can append our histogram.
|
||||
// - okToAppend and no interjections → Chunk is ready to support our histogram.
|
||||
if !okToAppend || counterReset {
|
||||
c = s.cutNewHeadChunk(t, chunkenc.EncHistogram, chunkDiskMapper, chunkRange)
|
||||
chunkCreated = true
|
||||
} else if len(positiveInterjections) > 0 || len(negativeInterjections) > 0 {
|
||||
// New buckets have appeared. We need to recode all
|
||||
// prior histogram samples within the chunk before we
|
||||
// can process this one.
|
||||
chunk, app := app.Recode(
|
||||
positiveInterjections, negativeInterjections,
|
||||
h.PositiveSpans, h.NegativeSpans,
|
||||
)
|
||||
c.chunk = chunk
|
||||
s.app = app
|
||||
}
|
||||
}
|
||||
|
||||
if chunkCreated {
|
||||
hc := s.headChunk.chunk.(*chunkenc.HistogramChunk)
|
||||
header := chunkenc.UnknownCounterReset
|
||||
if counterReset {
|
||||
header = chunkenc.CounterReset
|
||||
} else if okToAppend {
|
||||
header = chunkenc.NotCounterReset
|
||||
}
|
||||
hc.SetCounterResetHeader(header)
|
||||
}
|
||||
|
||||
s.app.AppendHistogram(t, h)
|
||||
s.isHistogramSeries = true
|
||||
|
||||
c.maxTime = t
|
||||
|
||||
s.lastHistogramValue = h
|
||||
|
||||
if appendID > 0 {
|
||||
s.txs.add(appendID)
|
||||
}
|
||||
|
||||
return true, chunkCreated
|
||||
}
|
||||
|
||||
// appendPreprocessor takes care of cutting new chunks and m-mapping old chunks.
|
||||
// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock.
|
||||
// This should be called only when appending data.
|
||||
func (s *memSeries) appendPreprocessor(
|
||||
t int64, e chunkenc.Encoding, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64,
|
||||
) (c *memChunk, sampleInOrder, chunkCreated bool) {
|
||||
// Based on Gorilla white papers this offers near-optimal compression ratio
|
||||
// so anything bigger that this has diminishing returns and increases
|
||||
// the time range within which we have to decompress all samples.
|
||||
const samplesPerChunk = 120
|
||||
|
||||
c := s.head()
|
||||
c = s.head()
|
||||
|
||||
if c == nil {
|
||||
if len(s.mmappedChunks) > 0 && s.mmappedChunks[len(s.mmappedChunks)-1].maxTime >= t {
|
||||
// Out of order sample. Sample timestamp is already in the mmapped chunks, so ignore it.
|
||||
return false, false
|
||||
return c, false, false
|
||||
}
|
||||
// There is no head chunk in this series yet, create the first chunk for the sample.
|
||||
c = s.cutNewHeadChunk(t, chunkDiskMapper, chunkRange)
|
||||
c = s.cutNewHeadChunk(t, e, chunkDiskMapper, chunkRange)
|
||||
chunkCreated = true
|
||||
}
|
||||
|
||||
// Out of order sample.
|
||||
if c.maxTime >= t {
|
||||
return false, chunkCreated
|
||||
return c, false, chunkCreated
|
||||
}
|
||||
|
||||
if c.chunk.Encoding() != e {
|
||||
// The chunk encoding expected by this append is different than the head chunk's
|
||||
// encoding. So we cut a new chunk with the expected encoding.
|
||||
c = s.cutNewHeadChunk(t, e, chunkDiskMapper, chunkRange)
|
||||
chunkCreated = true
|
||||
}
|
||||
|
||||
numSamples := c.chunk.NumSamples()
|
||||
|
@ -794,19 +1123,11 @@ func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper
|
|||
// as we expect more chunks to come.
|
||||
// Note that next chunk will have its nextAt recalculated for the new rate.
|
||||
if t >= s.nextAt || numSamples >= samplesPerChunk*2 {
|
||||
c = s.cutNewHeadChunk(t, chunkDiskMapper, chunkRange)
|
||||
c = s.cutNewHeadChunk(t, e, chunkDiskMapper, chunkRange)
|
||||
chunkCreated = true
|
||||
}
|
||||
s.app.Append(t, v)
|
||||
|
||||
c.maxTime = t
|
||||
s.lastValue = v
|
||||
|
||||
if appendID > 0 && s.txs != nil {
|
||||
s.txs.add(appendID)
|
||||
}
|
||||
|
||||
return true, chunkCreated
|
||||
return c, true, chunkCreated
|
||||
}
|
||||
|
||||
// computeChunkEndTime estimates the end timestamp based the beginning of a
|
||||
|
@ -822,15 +1143,26 @@ func computeChunkEndTime(start, cur, max int64) int64 {
|
|||
return start + (max-start)/n
|
||||
}
|
||||
|
||||
func (s *memSeries) cutNewHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64) *memChunk {
|
||||
func (s *memSeries) cutNewHeadChunk(
|
||||
mint int64, e chunkenc.Encoding, chunkDiskMapper *chunks.ChunkDiskMapper, chunkRange int64,
|
||||
) *memChunk {
|
||||
s.mmapCurrentHeadChunk(chunkDiskMapper)
|
||||
|
||||
s.headChunk = &memChunk{
|
||||
chunk: chunkenc.NewXORChunk(),
|
||||
minTime: mint,
|
||||
maxTime: math.MinInt64,
|
||||
}
|
||||
|
||||
if chunkenc.IsValidEncoding(e) {
|
||||
var err error
|
||||
s.headChunk.chunk, err = chunkenc.NewEmptyChunk(e)
|
||||
if err != nil {
|
||||
panic(err) // This should never happen.
|
||||
}
|
||||
} else {
|
||||
s.headChunk.chunk = chunkenc.NewXORChunk()
|
||||
}
|
||||
|
||||
// Set upper bound on when the next chunk must be started. An earlier timestamp
|
||||
// may be chosen dynamically at a later point.
|
||||
s.nextAt = rangeForTimestamp(mint, chunkRange)
|
||||
|
@ -874,7 +1206,7 @@ func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMap
|
|||
}
|
||||
|
||||
func (s *memSeries) mmapCurrentHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) {
|
||||
if s.headChunk == nil {
|
||||
if s.headChunk == nil || s.headChunk.chunk.NumSamples() == 0 {
|
||||
// There is no head chunk, so nothing to m-map here.
|
||||
return
|
||||
}
|
||||
|
@ -912,11 +1244,20 @@ func (a *headAppender) Rollback() (err error) {
|
|||
series.pendingCommit = false
|
||||
series.Unlock()
|
||||
}
|
||||
for i := range a.histograms {
|
||||
series = a.histogramSeries[i]
|
||||
series.Lock()
|
||||
series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
|
||||
series.pendingCommit = false
|
||||
series.Unlock()
|
||||
}
|
||||
a.head.putAppendBuffer(a.samples)
|
||||
a.head.putExemplarBuffer(a.exemplars)
|
||||
a.head.putHistogramBuffer(a.histograms)
|
||||
a.head.putMetadataBuffer(a.metadata)
|
||||
a.samples = nil
|
||||
a.exemplars = nil
|
||||
a.histograms = nil
|
||||
a.metadata = nil
|
||||
|
||||
// Series are created in the head memory regardless of rollback. Thus we have
|
||||
|
|
70
vendor/github.com/prometheus/prometheus/tsdb/head_read.go
generated
vendored
70
vendor/github.com/prometheus/prometheus/tsdb/head_read.go
generated
vendored
|
@ -21,6 +21,7 @@ import (
|
|||
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
|
@ -65,7 +66,7 @@ func (h *headIndexReader) Symbols() index.StringIter {
|
|||
func (h *headIndexReader) SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) {
|
||||
values, err := h.LabelValues(name, matchers...)
|
||||
if err == nil {
|
||||
sort.Strings(values)
|
||||
slices.Sort(values)
|
||||
}
|
||||
return values, err
|
||||
}
|
||||
|
@ -95,7 +96,7 @@ func (h *headIndexReader) LabelNames(matchers ...*labels.Matcher) ([]string, err
|
|||
|
||||
if len(matchers) == 0 {
|
||||
labelNames := h.head.postings.LabelNames()
|
||||
sort.Strings(labelNames)
|
||||
slices.Sort(labelNames)
|
||||
return labelNames, nil
|
||||
}
|
||||
|
||||
|
@ -229,7 +230,7 @@ func (h *headIndexReader) LabelNamesFor(ids ...storage.SeriesRef) ([]string, err
|
|||
for name := range namesMap {
|
||||
names = append(names, name)
|
||||
}
|
||||
sort.Strings(names)
|
||||
slices.Sort(names)
|
||||
return names, nil
|
||||
}
|
||||
|
||||
|
@ -485,7 +486,7 @@ func (o mergedOOOChunks) Bytes() []byte {
|
|||
panic(err)
|
||||
}
|
||||
it := o.Iterator(nil)
|
||||
for it.Next() {
|
||||
for it.Next() == chunkenc.ValFloat {
|
||||
t, v := it.At()
|
||||
app.Append(t, v)
|
||||
}
|
||||
|
@ -534,7 +535,7 @@ func (b boundedChunk) Bytes() []byte {
|
|||
xor := chunkenc.NewXORChunk()
|
||||
a, _ := xor.Appender()
|
||||
it := b.Iterator(nil)
|
||||
for it.Next() {
|
||||
for it.Next() == chunkenc.ValFloat {
|
||||
t, v := it.At()
|
||||
a.Append(t, v)
|
||||
}
|
||||
|
@ -563,33 +564,35 @@ type boundedIterator struct {
|
|||
// until its able to find a sample within the bounds minT and maxT.
|
||||
// If there are samples within bounds it will advance one by one amongst them.
|
||||
// If there are no samples within bounds it will return false.
|
||||
func (b boundedIterator) Next() bool {
|
||||
for b.Iterator.Next() {
|
||||
func (b boundedIterator) Next() chunkenc.ValueType {
|
||||
for b.Iterator.Next() == chunkenc.ValFloat {
|
||||
t, _ := b.Iterator.At()
|
||||
if t < b.minT {
|
||||
continue
|
||||
} else if t > b.maxT {
|
||||
return false
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
return true
|
||||
return chunkenc.ValFloat
|
||||
}
|
||||
return false
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
|
||||
func (b boundedIterator) Seek(t int64) bool {
|
||||
func (b boundedIterator) Seek(t int64) chunkenc.ValueType {
|
||||
if t < b.minT {
|
||||
// We must seek at least up to b.minT if it is asked for something before that.
|
||||
ok := b.Iterator.Seek(b.minT)
|
||||
if !ok {
|
||||
return false
|
||||
val := b.Iterator.Seek(b.minT)
|
||||
if !(val == chunkenc.ValFloat) {
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
t, _ := b.Iterator.At()
|
||||
return t <= b.maxT
|
||||
if t <= b.maxT {
|
||||
return chunkenc.ValFloat
|
||||
}
|
||||
}
|
||||
if t > b.maxT {
|
||||
// We seek anyway so that the subsequent Next() calls will also return false.
|
||||
b.Iterator.Seek(t)
|
||||
return false
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
return b.Iterator.Seek(t)
|
||||
}
|
||||
|
@ -683,21 +686,6 @@ func (s *memSeries) iterator(id chunks.HeadChunkID, isoState *isolationState, ch
|
|||
return makeStopIterator(c.chunk, it, stopAfter)
|
||||
}
|
||||
|
||||
func makeStopIterator(c chunkenc.Chunk, it chunkenc.Iterator, stopAfter int) chunkenc.Iterator {
|
||||
// Re-use the Iterator object if it is a stopIterator.
|
||||
if stopIter, ok := it.(*stopIterator); ok {
|
||||
stopIter.Iterator = c.Iterator(stopIter.Iterator)
|
||||
stopIter.i = -1
|
||||
stopIter.stopAfter = stopAfter
|
||||
return stopIter
|
||||
}
|
||||
return &stopIterator{
|
||||
Iterator: c.Iterator(it),
|
||||
i: -1,
|
||||
stopAfter: stopAfter,
|
||||
}
|
||||
}
|
||||
|
||||
// stopIterator wraps an Iterator, but only returns the first
|
||||
// stopAfter values, if initialized with i=-1.
|
||||
type stopIterator struct {
|
||||
|
@ -706,10 +694,26 @@ type stopIterator struct {
|
|||
i, stopAfter int
|
||||
}
|
||||
|
||||
func (it *stopIterator) Next() bool {
|
||||
func (it *stopIterator) Next() chunkenc.ValueType {
|
||||
if it.i+1 >= it.stopAfter {
|
||||
return false
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
it.i++
|
||||
return it.Iterator.Next()
|
||||
}
|
||||
|
||||
func makeStopIterator(c chunkenc.Chunk, it chunkenc.Iterator, stopAfter int) chunkenc.Iterator {
|
||||
// Re-use the Iterator object if it is a stopIterator.
|
||||
if stopIter, ok := it.(*stopIterator); ok {
|
||||
stopIter.Iterator = c.Iterator(stopIter.Iterator)
|
||||
stopIter.i = -1
|
||||
stopIter.stopAfter = stopAfter
|
||||
return stopIter
|
||||
}
|
||||
|
||||
return &stopIterator{
|
||||
Iterator: c.Iterator(it),
|
||||
i: -1,
|
||||
stopAfter: stopAfter,
|
||||
}
|
||||
}
|
||||
|
|
173
vendor/github.com/prometheus/prometheus/tsdb/head_wal.go
generated
vendored
173
vendor/github.com/prometheus/prometheus/tsdb/head_wal.go
generated
vendored
|
@ -39,14 +39,15 @@ import (
|
|||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/prometheus/tsdb/record"
|
||||
"github.com/prometheus/prometheus/tsdb/tombstones"
|
||||
"github.com/prometheus/prometheus/tsdb/wal"
|
||||
"github.com/prometheus/prometheus/tsdb/wlog"
|
||||
)
|
||||
|
||||
func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, mmappedChunks, oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk) (err error) {
|
||||
func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, mmappedChunks, oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk) (err error) {
|
||||
// Track number of samples that referenced a series we don't know about
|
||||
// for error reporting.
|
||||
var unknownRefs atomic.Uint64
|
||||
var unknownExemplarRefs atomic.Uint64
|
||||
var unknownHistogramRefs atomic.Uint64
|
||||
var unknownMetadataRefs atomic.Uint64
|
||||
// Track number of series records that had overlapping m-map chunks.
|
||||
var mmapOverlappingChunks atomic.Uint64
|
||||
|
@ -58,8 +59,9 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
|
|||
processors = make([]walSubsetProcessor, n)
|
||||
exemplarsInput chan record.RefExemplar
|
||||
|
||||
dec record.Decoder
|
||||
shards = make([][]record.RefSample, n)
|
||||
dec record.Decoder
|
||||
shards = make([][]record.RefSample, n)
|
||||
histogramShards = make([][]record.RefHistogramSample, n)
|
||||
|
||||
decoded = make(chan interface{}, 10)
|
||||
decodeErr, seriesCreationErr error
|
||||
|
@ -83,6 +85,11 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
|
|||
return []record.RefExemplar{}
|
||||
},
|
||||
}
|
||||
histogramsPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return []record.RefHistogramSample{}
|
||||
},
|
||||
}
|
||||
metadataPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return []record.RefMetadata{}
|
||||
|
@ -92,7 +99,7 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
|
|||
|
||||
defer func() {
|
||||
// For CorruptionErr ensure to terminate all workers before exiting.
|
||||
_, ok := err.(*wal.CorruptionErr)
|
||||
_, ok := err.(*wlog.CorruptionErr)
|
||||
if ok || seriesCreationErr != nil {
|
||||
for i := 0; i < n; i++ {
|
||||
processors[i].closeAndDrain()
|
||||
|
@ -107,9 +114,10 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
|
|||
processors[i].setup()
|
||||
|
||||
go func(wp *walSubsetProcessor) {
|
||||
unknown, overlapping := wp.processWALSamples(h, mmappedChunks, oooMmappedChunks)
|
||||
unknown, unknownHistograms, overlapping := wp.processWALSamples(h, mmappedChunks, oooMmappedChunks)
|
||||
unknownRefs.Add(unknown)
|
||||
mmapOverlappingChunks.Add(overlapping)
|
||||
unknownHistogramRefs.Add(unknownHistograms)
|
||||
wg.Done()
|
||||
}(&processors[i])
|
||||
}
|
||||
|
@ -148,7 +156,7 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
|
|||
series := seriesPool.Get().([]record.RefSeries)[:0]
|
||||
series, err = dec.Series(rec, series)
|
||||
if err != nil {
|
||||
decodeErr = &wal.CorruptionErr{
|
||||
decodeErr = &wlog.CorruptionErr{
|
||||
Err: errors.Wrap(err, "decode series"),
|
||||
Segment: r.Segment(),
|
||||
Offset: r.Offset(),
|
||||
|
@ -160,7 +168,7 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
|
|||
samples := samplesPool.Get().([]record.RefSample)[:0]
|
||||
samples, err = dec.Samples(rec, samples)
|
||||
if err != nil {
|
||||
decodeErr = &wal.CorruptionErr{
|
||||
decodeErr = &wlog.CorruptionErr{
|
||||
Err: errors.Wrap(err, "decode samples"),
|
||||
Segment: r.Segment(),
|
||||
Offset: r.Offset(),
|
||||
|
@ -172,7 +180,7 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
|
|||
tstones := tstonesPool.Get().([]tombstones.Stone)[:0]
|
||||
tstones, err = dec.Tombstones(rec, tstones)
|
||||
if err != nil {
|
||||
decodeErr = &wal.CorruptionErr{
|
||||
decodeErr = &wlog.CorruptionErr{
|
||||
Err: errors.Wrap(err, "decode tombstones"),
|
||||
Segment: r.Segment(),
|
||||
Offset: r.Offset(),
|
||||
|
@ -184,7 +192,7 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
|
|||
exemplars := exemplarsPool.Get().([]record.RefExemplar)[:0]
|
||||
exemplars, err = dec.Exemplars(rec, exemplars)
|
||||
if err != nil {
|
||||
decodeErr = &wal.CorruptionErr{
|
||||
decodeErr = &wlog.CorruptionErr{
|
||||
Err: errors.Wrap(err, "decode exemplars"),
|
||||
Segment: r.Segment(),
|
||||
Offset: r.Offset(),
|
||||
|
@ -192,11 +200,23 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
|
|||
return
|
||||
}
|
||||
decoded <- exemplars
|
||||
case record.HistogramSamples:
|
||||
hists := histogramsPool.Get().([]record.RefHistogramSample)[:0]
|
||||
hists, err = dec.HistogramSamples(rec, hists)
|
||||
if err != nil {
|
||||
decodeErr = &wlog.CorruptionErr{
|
||||
Err: errors.Wrap(err, "decode histograms"),
|
||||
Segment: r.Segment(),
|
||||
Offset: r.Offset(),
|
||||
}
|
||||
return
|
||||
}
|
||||
decoded <- hists
|
||||
case record.Metadata:
|
||||
meta := metadataPool.Get().([]record.RefMetadata)[:0]
|
||||
meta, err := dec.Metadata(rec, meta)
|
||||
if err != nil {
|
||||
decodeErr = &wal.CorruptionErr{
|
||||
decodeErr = &wlog.CorruptionErr{
|
||||
Err: errors.Wrap(err, "decode metadata"),
|
||||
Segment: r.Segment(),
|
||||
Offset: r.Offset(),
|
||||
|
@ -292,6 +312,43 @@ Outer:
|
|||
}
|
||||
//nolint:staticcheck // Ignore SA6002 relax staticcheck verification.
|
||||
exemplarsPool.Put(v)
|
||||
case []record.RefHistogramSample:
|
||||
samples := v
|
||||
minValidTime := h.minValidTime.Load()
|
||||
// We split up the samples into chunks of 5000 samples or less.
|
||||
// With O(300 * #cores) in-flight sample batches, large scrapes could otherwise
|
||||
// cause thousands of very large in flight buffers occupying large amounts
|
||||
// of unused memory.
|
||||
for len(samples) > 0 {
|
||||
m := 5000
|
||||
if len(samples) < m {
|
||||
m = len(samples)
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
if histogramShards[i] == nil {
|
||||
histogramShards[i] = processors[i].reuseHistogramBuf()
|
||||
}
|
||||
}
|
||||
for _, sam := range samples[:m] {
|
||||
if sam.T < minValidTime {
|
||||
continue // Before minValidTime: discard.
|
||||
}
|
||||
if r, ok := multiRef[sam.Ref]; ok {
|
||||
sam.Ref = r
|
||||
}
|
||||
mod := uint64(sam.Ref) % uint64(n)
|
||||
histogramShards[mod] = append(histogramShards[mod], sam)
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
if len(histogramShards[i]) > 0 {
|
||||
processors[i].input <- walSubsetProcessorInputItem{histogramSamples: histogramShards[i]}
|
||||
histogramShards[i] = nil
|
||||
}
|
||||
}
|
||||
samples = samples[m:]
|
||||
}
|
||||
//nolint:staticcheck // Ignore SA6002 relax staticcheck verification.
|
||||
histogramsPool.Put(v)
|
||||
case []record.RefMetadata:
|
||||
for _, m := range v {
|
||||
s := h.series.getByID(chunks.HeadSeriesRef(m.Ref))
|
||||
|
@ -333,8 +390,14 @@ Outer:
|
|||
return errors.Wrap(r.Err(), "read records")
|
||||
}
|
||||
|
||||
if unknownRefs.Load() > 0 || unknownExemplarRefs.Load() > 0 || unknownMetadataRefs.Load() > 0 {
|
||||
level.Warn(h.logger).Log("msg", "Unknown series references", "samples", unknownRefs.Load(), "exemplars", unknownExemplarRefs.Load(), "metadata", unknownMetadataRefs.Load())
|
||||
if unknownRefs.Load()+unknownExemplarRefs.Load()+unknownHistogramRefs.Load()+unknownMetadataRefs.Load() > 0 {
|
||||
level.Warn(h.logger).Log(
|
||||
"msg", "Unknown series references",
|
||||
"samples", unknownRefs.Load(),
|
||||
"exemplars", unknownExemplarRefs.Load(),
|
||||
"histograms", unknownHistogramRefs.Load(),
|
||||
"metadata", unknownMetadataRefs.Load(),
|
||||
)
|
||||
}
|
||||
if count := mmapOverlappingChunks.Load(); count > 0 {
|
||||
level.Info(h.logger).Log("msg", "Overlapping m-map chunks on duplicate series records", "count", count)
|
||||
|
@ -402,25 +465,30 @@ func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*m
|
|||
}
|
||||
|
||||
type walSubsetProcessor struct {
|
||||
input chan walSubsetProcessorInputItem
|
||||
output chan []record.RefSample
|
||||
input chan walSubsetProcessorInputItem
|
||||
output chan []record.RefSample
|
||||
histogramsOutput chan []record.RefHistogramSample
|
||||
}
|
||||
|
||||
type walSubsetProcessorInputItem struct {
|
||||
samples []record.RefSample
|
||||
existingSeries *memSeries
|
||||
walSeriesRef chunks.HeadSeriesRef
|
||||
samples []record.RefSample
|
||||
histogramSamples []record.RefHistogramSample
|
||||
existingSeries *memSeries
|
||||
walSeriesRef chunks.HeadSeriesRef
|
||||
}
|
||||
|
||||
func (wp *walSubsetProcessor) setup() {
|
||||
wp.output = make(chan []record.RefSample, 300)
|
||||
wp.input = make(chan walSubsetProcessorInputItem, 300)
|
||||
wp.output = make(chan []record.RefSample, 300)
|
||||
wp.histogramsOutput = make(chan []record.RefHistogramSample, 300)
|
||||
}
|
||||
|
||||
func (wp *walSubsetProcessor) closeAndDrain() {
|
||||
close(wp.input)
|
||||
for range wp.output {
|
||||
}
|
||||
for range wp.histogramsOutput {
|
||||
}
|
||||
}
|
||||
|
||||
// If there is a buffer in the output chan, return it for reuse, otherwise return nil.
|
||||
|
@ -433,11 +501,24 @@ func (wp *walSubsetProcessor) reuseBuf() []record.RefSample {
|
|||
return nil
|
||||
}
|
||||
|
||||
// If there is a buffer in the output chan, return it for reuse, otherwise return nil.
|
||||
func (wp *walSubsetProcessor) reuseHistogramBuf() []record.RefHistogramSample {
|
||||
select {
|
||||
case buf := <-wp.histogramsOutput:
|
||||
return buf[:0]
|
||||
default:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// processWALSamples adds the samples it receives to the head and passes
|
||||
// the buffer received to an output channel for reuse.
|
||||
func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk) (unknownRefs, mmapOverlappingChunks uint64) {
|
||||
// Samples before the minValidTime timestamp are discarded.
|
||||
func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk) (unknownRefs, unknownHistogramRefs, mmapOverlappingChunks uint64) {
|
||||
defer close(wp.output)
|
||||
defer close(wp.histogramsOutput)
|
||||
|
||||
minValidTime := h.minValidTime.Load()
|
||||
mint, maxt := int64(math.MaxInt64), int64(math.MinInt64)
|
||||
chunkRange := h.chunkRange.Load()
|
||||
|
||||
|
@ -460,6 +541,10 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp
|
|||
if s.T <= ms.mmMaxTime {
|
||||
continue
|
||||
}
|
||||
ms.isHistogramSeries = false
|
||||
if s.T <= ms.mmMaxTime {
|
||||
continue
|
||||
}
|
||||
if _, chunkCreated := ms.append(s.T, s.V, 0, h.chunkDiskMapper, chunkRange); chunkCreated {
|
||||
h.metrics.chunksCreated.Inc()
|
||||
h.metrics.chunks.Inc()
|
||||
|
@ -475,13 +560,43 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp
|
|||
case wp.output <- in.samples:
|
||||
default:
|
||||
}
|
||||
|
||||
for _, s := range in.histogramSamples {
|
||||
if s.T < minValidTime {
|
||||
continue
|
||||
}
|
||||
ms := h.series.getByID(s.Ref)
|
||||
if ms == nil {
|
||||
unknownHistogramRefs++
|
||||
continue
|
||||
}
|
||||
ms.isHistogramSeries = true
|
||||
if s.T <= ms.mmMaxTime {
|
||||
continue
|
||||
}
|
||||
if _, chunkCreated := ms.appendHistogram(s.T, s.H, 0, h.chunkDiskMapper, chunkRange); chunkCreated {
|
||||
h.metrics.chunksCreated.Inc()
|
||||
h.metrics.chunks.Inc()
|
||||
}
|
||||
if s.T > maxt {
|
||||
maxt = s.T
|
||||
}
|
||||
if s.T < mint {
|
||||
mint = s.T
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case wp.histogramsOutput <- in.histogramSamples:
|
||||
default:
|
||||
}
|
||||
}
|
||||
h.updateMinMaxTime(mint, maxt)
|
||||
|
||||
return unknownRefs, mmapOverlappingChunks
|
||||
return unknownRefs, unknownHistogramRefs, mmapOverlappingChunks
|
||||
}
|
||||
|
||||
func (h *Head) loadWBL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, lastMmapRef chunks.ChunkDiskMapperRef) (err error) {
|
||||
func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, lastMmapRef chunks.ChunkDiskMapperRef) (err error) {
|
||||
// Track number of samples, m-map markers, that referenced a series we don't know about
|
||||
// for error reporting.
|
||||
var unknownRefs, mmapMarkerUnknownRefs atomic.Uint64
|
||||
|
@ -513,7 +628,7 @@ func (h *Head) loadWBL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
|
|||
defer func() {
|
||||
// For CorruptionErr ensure to terminate all workers before exiting.
|
||||
// We also wrap it to identify OOO WBL corruption.
|
||||
_, ok := err.(*wal.CorruptionErr)
|
||||
_, ok := err.(*wlog.CorruptionErr)
|
||||
if ok {
|
||||
err = &errLoadWbl{err: err}
|
||||
for i := 0; i < n; i++ {
|
||||
|
@ -543,7 +658,7 @@ func (h *Head) loadWBL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
|
|||
samples := samplesPool.Get().([]record.RefSample)[:0]
|
||||
samples, err = dec.Samples(rec, samples)
|
||||
if err != nil {
|
||||
decodeErr = &wal.CorruptionErr{
|
||||
decodeErr = &wlog.CorruptionErr{
|
||||
Err: errors.Wrap(err, "decode samples"),
|
||||
Segment: r.Segment(),
|
||||
Offset: r.Offset(),
|
||||
|
@ -555,7 +670,7 @@ func (h *Head) loadWBL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
|
|||
markers := markersPool.Get().([]record.RefMmapMarker)[:0]
|
||||
markers, err = dec.MmapMarkers(rec, markers)
|
||||
if err != nil {
|
||||
decodeErr = &wal.CorruptionErr{
|
||||
decodeErr = &wlog.CorruptionErr{
|
||||
Err: errors.Wrap(err, "decode mmap markers"),
|
||||
Segment: r.Segment(),
|
||||
Offset: r.Offset(),
|
||||
|
@ -745,7 +860,7 @@ func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs uint64) {
|
|||
}
|
||||
}
|
||||
wp.mx.Unlock()
|
||||
wp.output <- samples
|
||||
|
||||
}
|
||||
|
||||
h.updateMinOOOMaxOOOTime(mint, maxt)
|
||||
|
@ -931,7 +1046,7 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) {
|
|||
if err := os.MkdirAll(cpdirtmp, 0o777); err != nil {
|
||||
return stats, errors.Wrap(err, "create chunk snapshot dir")
|
||||
}
|
||||
cp, err := wal.New(nil, nil, cpdirtmp, h.wal.CompressionEnabled())
|
||||
cp, err := wlog.New(nil, nil, cpdirtmp, h.wal.CompressionEnabled())
|
||||
if err != nil {
|
||||
return stats, errors.Wrap(err, "open chunk snapshot")
|
||||
}
|
||||
|
@ -1170,7 +1285,7 @@ func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSerie
|
|||
}
|
||||
|
||||
start := time.Now()
|
||||
sr, err := wal.NewSegmentsReader(dir)
|
||||
sr, err := wlog.NewSegmentsReader(dir)
|
||||
if err != nil {
|
||||
return snapIdx, snapOffset, nil, errors.Wrap(err, "open chunk snapshot")
|
||||
}
|
||||
|
@ -1241,7 +1356,7 @@ func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSerie
|
|||
}(i, recordChan)
|
||||
}
|
||||
|
||||
r := wal.NewReader(sr)
|
||||
r := wlog.NewReader(sr)
|
||||
var loopErr error
|
||||
Outer:
|
||||
for r.Next() {
|
||||
|
|
9
vendor/github.com/prometheus/prometheus/tsdb/index/index.go
generated
vendored
9
vendor/github.com/prometheus/prometheus/tsdb/index/index.go
generated
vendored
|
@ -29,6 +29,7 @@ import (
|
|||
"unsafe"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
|
@ -819,7 +820,7 @@ func (w *Writer) writePostingsToTmpFiles() error {
|
|||
for n := range w.labelNames {
|
||||
names = append(names, n)
|
||||
}
|
||||
sort.Strings(names)
|
||||
slices.Sort(names)
|
||||
|
||||
if err := w.f.Flush(); err != nil {
|
||||
return err
|
||||
|
@ -1469,7 +1470,7 @@ func (r *Reader) SymbolTableSize() uint64 {
|
|||
func (r *Reader) SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) {
|
||||
values, err := r.LabelValues(name, matchers...)
|
||||
if err == nil && r.version == FormatV1 {
|
||||
sort.Strings(values)
|
||||
slices.Sort(values)
|
||||
}
|
||||
return values, err
|
||||
}
|
||||
|
@ -1571,7 +1572,7 @@ func (r *Reader) LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) {
|
|||
names = append(names, name)
|
||||
}
|
||||
|
||||
sort.Strings(names)
|
||||
slices.Sort(names)
|
||||
|
||||
return names, nil
|
||||
}
|
||||
|
@ -1743,7 +1744,7 @@ func (r *Reader) LabelNames(matchers ...*labels.Matcher) ([]string, error) {
|
|||
}
|
||||
labelNames = append(labelNames, name)
|
||||
}
|
||||
sort.Strings(labelNames)
|
||||
slices.Sort(labelNames)
|
||||
return labelNames, nil
|
||||
}
|
||||
|
||||
|
|
14
vendor/github.com/prometheus/prometheus/tsdb/index/postings.go
generated
vendored
14
vendor/github.com/prometheus/prometheus/tsdb/index/postings.go
generated
vendored
|
@ -21,6 +21,7 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
|
@ -90,7 +91,7 @@ func (p *MemPostings) Symbols() StringIter {
|
|||
res = append(res, k)
|
||||
}
|
||||
|
||||
sort.Strings(res)
|
||||
slices.Sort(res)
|
||||
return NewStringListIter(res)
|
||||
}
|
||||
|
||||
|
@ -239,11 +240,9 @@ func (p *MemPostings) EnsureOrder() {
|
|||
|
||||
for i := 0; i < n; i++ {
|
||||
go func() {
|
||||
var sortable seriesRefSlice
|
||||
for job := range workc {
|
||||
for _, l := range *job {
|
||||
sortable = l
|
||||
sort.Sort(&sortable)
|
||||
slices.Sort(l)
|
||||
}
|
||||
|
||||
*job = (*job)[:0]
|
||||
|
@ -830,13 +829,6 @@ func (it *bigEndianPostings) Err() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// seriesRefSlice attaches the methods of sort.Interface to []storage.SeriesRef, sorting in increasing order.
|
||||
type seriesRefSlice []storage.SeriesRef
|
||||
|
||||
func (x seriesRefSlice) Len() int { return len(x) }
|
||||
func (x seriesRefSlice) Less(i, j int) bool { return x[i] < x[j] }
|
||||
func (x seriesRefSlice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
// FindIntersectingPostings checks the intersection of p and candidates[i] for each i in candidates,
|
||||
// if intersection is non empty, then i is added to the indexes returned.
|
||||
// Returned indexes are not sorted.
|
||||
|
|
4
vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go
generated
vendored
4
vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go
generated
vendored
|
@ -41,7 +41,7 @@ func (o *OOOChunk) Insert(t int64, v float64) bool {
|
|||
|
||||
if i >= len(o.samples) {
|
||||
// none found. append it at the end
|
||||
o.samples = append(o.samples, sample{t, v})
|
||||
o.samples = append(o.samples, sample{t, v, nil, nil})
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -52,7 +52,7 @@ func (o *OOOChunk) Insert(t int64, v float64) bool {
|
|||
// Expand length by 1 to make room. use a zero sample, we will overwrite it anyway.
|
||||
o.samples = append(o.samples, sample{})
|
||||
copy(o.samples[i+1:], o.samples[i:])
|
||||
o.samples[i] = sample{t, v}
|
||||
o.samples[i] = sample{t, v, nil, nil}
|
||||
|
||||
return true
|
||||
}
|
||||
|
|
216
vendor/github.com/prometheus/prometheus/tsdb/querier.go
generated
vendored
216
vendor/github.com/prometheus/prometheus/tsdb/querier.go
generated
vendored
|
@ -14,13 +14,15 @@
|
|||
package tsdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
|
@ -317,7 +319,7 @@ func postingsForMatcher(ix IndexReader, m *labels.Matcher) (index.Postings, erro
|
|||
if m.Type == labels.MatchRegexp {
|
||||
setMatches := findSetMatches(m.GetRegexString())
|
||||
if len(setMatches) > 0 {
|
||||
sort.Strings(setMatches)
|
||||
slices.Sort(setMatches)
|
||||
return ix.Postings(m.Name, setMatches...)
|
||||
}
|
||||
}
|
||||
|
@ -344,7 +346,7 @@ func postingsForMatcher(ix IndexReader, m *labels.Matcher) (index.Postings, erro
|
|||
}
|
||||
|
||||
if !isSorted {
|
||||
sort.Strings(res)
|
||||
slices.Sort(res)
|
||||
}
|
||||
return ix.Postings(m.Name, res...)
|
||||
}
|
||||
|
@ -369,7 +371,7 @@ func inversePostingsForMatcher(ix IndexReader, m *labels.Matcher) (index.Posting
|
|||
}
|
||||
|
||||
if !isSorted {
|
||||
sort.Strings(res)
|
||||
slices.Sort(res)
|
||||
}
|
||||
return ix.Postings(m.Name, res...)
|
||||
}
|
||||
|
@ -526,16 +528,20 @@ func (b *blockBaseSeriesSet) Err() error {
|
|||
|
||||
func (b *blockBaseSeriesSet) Warnings() storage.Warnings { return nil }
|
||||
|
||||
// populateWithDelGenericSeriesIterator allows to iterate over given chunk metas. In each iteration it ensures
|
||||
// that chunks are trimmed based on given tombstones interval if any.
|
||||
// populateWithDelGenericSeriesIterator allows to iterate over given chunk
|
||||
// metas. In each iteration it ensures that chunks are trimmed based on given
|
||||
// tombstones interval if any.
|
||||
//
|
||||
// populateWithDelGenericSeriesIterator assumes that chunks that would be fully removed by intervals are filtered out in previous phase.
|
||||
// populateWithDelGenericSeriesIterator assumes that chunks that would be fully
|
||||
// removed by intervals are filtered out in previous phase.
|
||||
//
|
||||
// On each iteration currChkMeta is available. If currDelIter is not nil, it means that chunk iterator in currChkMeta
|
||||
// is invalid and chunk rewrite is needed, currDelIter should be used.
|
||||
// On each iteration currChkMeta is available. If currDelIter is not nil, it
|
||||
// means that the chunk iterator in currChkMeta is invalid and a chunk rewrite
|
||||
// is needed, for which currDelIter should be used.
|
||||
type populateWithDelGenericSeriesIterator struct {
|
||||
chunks ChunkReader
|
||||
// chks are expected to be sorted by minTime and should be related to the same, single series.
|
||||
// chks are expected to be sorted by minTime and should be related to
|
||||
// the same, single series.
|
||||
chks []chunks.Meta
|
||||
|
||||
i int
|
||||
|
@ -587,15 +593,17 @@ func (p *populateWithDelGenericSeriesIterator) next() bool {
|
|||
// The chunk.Bytes() method is not safe for open chunks hence the re-encoding.
|
||||
// This happens when snapshotting the head block or just fetching chunks from TSDB.
|
||||
//
|
||||
// TODO think how to avoid the typecasting to verify when it is head block.
|
||||
// TODO(codesome): think how to avoid the typecasting to verify when it is head block.
|
||||
_, isSafeChunk := p.currChkMeta.Chunk.(*safeChunk)
|
||||
if len(p.bufIter.Intervals) == 0 && !(isSafeChunk && p.currChkMeta.MaxTime == math.MaxInt64) {
|
||||
// If there are no overlap with deletion intervals AND it's NOT an "open" head chunk, we can take chunk as it is.
|
||||
// If there is no overlap with deletion intervals AND it's NOT
|
||||
// an "open" head chunk, we can take chunk as it is.
|
||||
p.currDelIter = nil
|
||||
return true
|
||||
}
|
||||
|
||||
// We don't want full chunk or it's potentially still opened, take just part of it.
|
||||
// We don't want the full chunk, or it's potentially still opened, take
|
||||
// just a part of it.
|
||||
p.bufIter.Iter = p.currChkMeta.Chunk.Iterator(nil)
|
||||
p.currDelIter = p.bufIter
|
||||
return true
|
||||
|
@ -618,9 +626,11 @@ type populateWithDelSeriesIterator struct {
|
|||
curr chunkenc.Iterator
|
||||
}
|
||||
|
||||
func (p *populateWithDelSeriesIterator) Next() bool {
|
||||
if p.curr != nil && p.curr.Next() {
|
||||
return true
|
||||
func (p *populateWithDelSeriesIterator) Next() chunkenc.ValueType {
|
||||
if p.curr != nil {
|
||||
if valueType := p.curr.Next(); valueType != chunkenc.ValNone {
|
||||
return valueType
|
||||
}
|
||||
}
|
||||
|
||||
for p.next() {
|
||||
|
@ -629,26 +639,42 @@ func (p *populateWithDelSeriesIterator) Next() bool {
|
|||
} else {
|
||||
p.curr = p.currChkMeta.Chunk.Iterator(nil)
|
||||
}
|
||||
if p.curr.Next() {
|
||||
return true
|
||||
if valueType := p.curr.Next(); valueType != chunkenc.ValNone {
|
||||
return valueType
|
||||
}
|
||||
}
|
||||
return false
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
|
||||
func (p *populateWithDelSeriesIterator) Seek(t int64) bool {
|
||||
if p.curr != nil && p.curr.Seek(t) {
|
||||
return true
|
||||
}
|
||||
for p.Next() {
|
||||
if p.curr.Seek(t) {
|
||||
return true
|
||||
func (p *populateWithDelSeriesIterator) Seek(t int64) chunkenc.ValueType {
|
||||
if p.curr != nil {
|
||||
if valueType := p.curr.Seek(t); valueType != chunkenc.ValNone {
|
||||
return valueType
|
||||
}
|
||||
}
|
||||
return false
|
||||
for p.Next() != chunkenc.ValNone {
|
||||
if valueType := p.curr.Seek(t); valueType != chunkenc.ValNone {
|
||||
return valueType
|
||||
}
|
||||
}
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
|
||||
func (p *populateWithDelSeriesIterator) At() (int64, float64) { return p.curr.At() }
|
||||
func (p *populateWithDelSeriesIterator) At() (int64, float64) {
|
||||
return p.curr.At()
|
||||
}
|
||||
|
||||
func (p *populateWithDelSeriesIterator) AtHistogram() (int64, *histogram.Histogram) {
|
||||
return p.curr.AtHistogram()
|
||||
}
|
||||
|
||||
func (p *populateWithDelSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
|
||||
return p.curr.AtFloatHistogram()
|
||||
}
|
||||
|
||||
func (p *populateWithDelSeriesIterator) AtT() int64 {
|
||||
return p.curr.AtT()
|
||||
}
|
||||
|
||||
func (p *populateWithDelSeriesIterator) Err() error {
|
||||
if err := p.populateWithDelGenericSeriesIterator.Err(); err != nil {
|
||||
|
@ -670,38 +696,94 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool {
|
|||
if !p.next() {
|
||||
return false
|
||||
}
|
||||
|
||||
p.curr = p.currChkMeta
|
||||
if p.currDelIter == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// Re-encode the chunk if iterator is provider. This means that it has some samples to be deleted or chunk is opened.
|
||||
newChunk := chunkenc.NewXORChunk()
|
||||
app, err := newChunk.Appender()
|
||||
if err != nil {
|
||||
p.err = err
|
||||
return false
|
||||
}
|
||||
|
||||
if !p.currDelIter.Next() {
|
||||
valueType := p.currDelIter.Next()
|
||||
if valueType == chunkenc.ValNone {
|
||||
if err := p.currDelIter.Err(); err != nil {
|
||||
p.err = errors.Wrap(err, "iterate chunk while re-encoding")
|
||||
return false
|
||||
}
|
||||
|
||||
// Empty chunk, this should not happen, as we assume full deletions being filtered before this iterator.
|
||||
p.err = errors.Wrap(err, "populateWithDelChunkSeriesIterator: unexpected empty chunk found while rewriting chunk")
|
||||
return false
|
||||
}
|
||||
|
||||
t, v := p.currDelIter.At()
|
||||
p.curr.MinTime = t
|
||||
app.Append(t, v)
|
||||
// Re-encode the chunk if iterator is provider. This means that it has
|
||||
// some samples to be deleted or chunk is opened.
|
||||
var (
|
||||
newChunk chunkenc.Chunk
|
||||
app chunkenc.Appender
|
||||
t int64
|
||||
err error
|
||||
)
|
||||
switch valueType {
|
||||
case chunkenc.ValHistogram:
|
||||
newChunk = chunkenc.NewHistogramChunk()
|
||||
if app, err = newChunk.Appender(); err != nil {
|
||||
break
|
||||
}
|
||||
if hc, ok := p.currChkMeta.Chunk.(*chunkenc.HistogramChunk); ok {
|
||||
newChunk.(*chunkenc.HistogramChunk).SetCounterResetHeader(hc.GetCounterResetHeader())
|
||||
}
|
||||
var h *histogram.Histogram
|
||||
t, h = p.currDelIter.AtHistogram()
|
||||
p.curr.MinTime = t
|
||||
|
||||
for p.currDelIter.Next() {
|
||||
app.AppendHistogram(t, h)
|
||||
for vt := p.currDelIter.Next(); vt != chunkenc.ValNone; vt = p.currDelIter.Next() {
|
||||
if vt != chunkenc.ValHistogram {
|
||||
err = fmt.Errorf("found value type %v in histogram chunk", vt)
|
||||
break
|
||||
}
|
||||
t, h = p.currDelIter.AtHistogram()
|
||||
|
||||
// Defend against corrupted chunks.
|
||||
pI, nI, okToAppend, counterReset := app.(*chunkenc.HistogramAppender).Appendable(h)
|
||||
if len(pI)+len(nI) > 0 {
|
||||
err = fmt.Errorf(
|
||||
"bucket layout has changed unexpectedly: %d positive and %d negative bucket interjections required",
|
||||
len(pI), len(nI),
|
||||
)
|
||||
break
|
||||
}
|
||||
if counterReset {
|
||||
err = errors.New("detected unexpected counter reset in histogram")
|
||||
break
|
||||
}
|
||||
if !okToAppend {
|
||||
err = errors.New("unable to append histogram due to unexpected schema change")
|
||||
break
|
||||
}
|
||||
|
||||
app.AppendHistogram(t, h)
|
||||
}
|
||||
case chunkenc.ValFloat:
|
||||
newChunk = chunkenc.NewXORChunk()
|
||||
if app, err = newChunk.Appender(); err != nil {
|
||||
break
|
||||
}
|
||||
var v float64
|
||||
t, v = p.currDelIter.At()
|
||||
p.curr.MinTime = t
|
||||
app.Append(t, v)
|
||||
for vt := p.currDelIter.Next(); vt != chunkenc.ValNone; vt = p.currDelIter.Next() {
|
||||
if vt != chunkenc.ValFloat {
|
||||
err = fmt.Errorf("found value type %v in float chunk", vt)
|
||||
break
|
||||
}
|
||||
t, v = p.currDelIter.At()
|
||||
app.Append(t, v)
|
||||
}
|
||||
|
||||
default:
|
||||
// TODO(beorn7): Need FloatHistogram eventually.
|
||||
err = fmt.Errorf("populateWithDelChunkSeriesIterator: value type %v unsupported", valueType)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
p.err = errors.Wrap(err, "iterate chunk while re-encoding")
|
||||
return false
|
||||
}
|
||||
if err := p.currDelIter.Err(); err != nil {
|
||||
p.err = errors.Wrap(err, "iterate chunk while re-encoding")
|
||||
|
@ -838,19 +920,34 @@ func (it *DeletedIterator) At() (int64, float64) {
|
|||
return it.Iter.At()
|
||||
}
|
||||
|
||||
func (it *DeletedIterator) Seek(t int64) bool {
|
||||
func (it *DeletedIterator) AtHistogram() (int64, *histogram.Histogram) {
|
||||
t, h := it.Iter.AtHistogram()
|
||||
return t, h
|
||||
}
|
||||
|
||||
func (it *DeletedIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) {
|
||||
t, h := it.Iter.AtFloatHistogram()
|
||||
return t, h
|
||||
}
|
||||
|
||||
func (it *DeletedIterator) AtT() int64 {
|
||||
return it.Iter.AtT()
|
||||
}
|
||||
|
||||
func (it *DeletedIterator) Seek(t int64) chunkenc.ValueType {
|
||||
if it.Iter.Err() != nil {
|
||||
return false
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
if ok := it.Iter.Seek(t); !ok {
|
||||
return false
|
||||
valueType := it.Iter.Seek(t)
|
||||
if valueType == chunkenc.ValNone {
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
|
||||
// Now double check if the entry falls into a deleted interval.
|
||||
ts, _ := it.At()
|
||||
ts := it.AtT()
|
||||
for _, itv := range it.Intervals {
|
||||
if ts < itv.Mint {
|
||||
return true
|
||||
return valueType
|
||||
}
|
||||
|
||||
if ts > itv.Maxt {
|
||||
|
@ -863,27 +960,26 @@ func (it *DeletedIterator) Seek(t int64) bool {
|
|||
}
|
||||
|
||||
// The timestamp is greater than all the deleted intervals.
|
||||
return true
|
||||
return valueType
|
||||
}
|
||||
|
||||
func (it *DeletedIterator) Next() bool {
|
||||
func (it *DeletedIterator) Next() chunkenc.ValueType {
|
||||
Outer:
|
||||
for it.Iter.Next() {
|
||||
ts, _ := it.Iter.At()
|
||||
|
||||
for valueType := it.Iter.Next(); valueType != chunkenc.ValNone; valueType = it.Iter.Next() {
|
||||
ts := it.AtT()
|
||||
for _, tr := range it.Intervals {
|
||||
if tr.InBounds(ts) {
|
||||
continue Outer
|
||||
}
|
||||
|
||||
if ts <= tr.Maxt {
|
||||
return true
|
||||
return valueType
|
||||
}
|
||||
it.Intervals = it.Intervals[1:]
|
||||
}
|
||||
return true
|
||||
return valueType
|
||||
}
|
||||
return false
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
|
||||
func (it *DeletedIterator) Err() error { return it.Iter.Err() }
|
||||
|
|
172
vendor/github.com/prometheus/prometheus/tsdb/record/record.go
generated
vendored
172
vendor/github.com/prometheus/prometheus/tsdb/record/record.go
generated
vendored
|
@ -21,6 +21,7 @@ import (
|
|||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/textparse"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
|
@ -47,6 +48,8 @@ const (
|
|||
MmapMarkers Type = 5
|
||||
// Metadata is used to match WAL records of type Metadata.
|
||||
Metadata Type = 6
|
||||
// HistogramSamples is used to match WAL records of type Histograms.
|
||||
HistogramSamples Type = 7
|
||||
)
|
||||
|
||||
func (rt Type) String() string {
|
||||
|
@ -55,10 +58,12 @@ func (rt Type) String() string {
|
|||
return "series"
|
||||
case Samples:
|
||||
return "samples"
|
||||
case Exemplars:
|
||||
return "exemplars"
|
||||
case Tombstones:
|
||||
return "tombstones"
|
||||
case Exemplars:
|
||||
return "exemplars"
|
||||
case HistogramSamples:
|
||||
return "histogram_samples"
|
||||
case MmapMarkers:
|
||||
return "mmapmarkers"
|
||||
case Metadata:
|
||||
|
@ -72,14 +77,14 @@ func (rt Type) String() string {
|
|||
type MetricType uint8
|
||||
|
||||
const (
|
||||
UnknownMT MetricType = 0
|
||||
Counter MetricType = 1
|
||||
Gauge MetricType = 2
|
||||
Histogram MetricType = 3
|
||||
GaugeHistogram MetricType = 4
|
||||
Summary MetricType = 5
|
||||
Info MetricType = 6
|
||||
Stateset MetricType = 7
|
||||
UnknownMT MetricType = 0
|
||||
Counter MetricType = 1
|
||||
Gauge MetricType = 2
|
||||
HistogramSample MetricType = 3
|
||||
GaugeHistogram MetricType = 4
|
||||
Summary MetricType = 5
|
||||
Info MetricType = 6
|
||||
Stateset MetricType = 7
|
||||
)
|
||||
|
||||
func GetMetricType(t textparse.MetricType) uint8 {
|
||||
|
@ -89,7 +94,7 @@ func GetMetricType(t textparse.MetricType) uint8 {
|
|||
case textparse.MetricTypeGauge:
|
||||
return uint8(Gauge)
|
||||
case textparse.MetricTypeHistogram:
|
||||
return uint8(Histogram)
|
||||
return uint8(HistogramSample)
|
||||
case textparse.MetricTypeGaugeHistogram:
|
||||
return uint8(GaugeHistogram)
|
||||
case textparse.MetricTypeSummary:
|
||||
|
@ -109,7 +114,7 @@ func ToTextparseMetricType(m uint8) textparse.MetricType {
|
|||
return textparse.MetricTypeCounter
|
||||
case uint8(Gauge):
|
||||
return textparse.MetricTypeGauge
|
||||
case uint8(Histogram):
|
||||
case uint8(HistogramSample):
|
||||
return textparse.MetricTypeHistogram
|
||||
case uint8(GaugeHistogram):
|
||||
return textparse.MetricTypeGaugeHistogram
|
||||
|
@ -139,6 +144,7 @@ type RefSeries struct {
|
|||
}
|
||||
|
||||
// RefSample is a timestamp/value pair associated with a reference to a series.
|
||||
// TODO(beorn7): Perhaps make this "polymorphic", including histogram and float-histogram pointers? Then get rid of RefHistogramSample.
|
||||
type RefSample struct {
|
||||
Ref chunks.HeadSeriesRef
|
||||
T int64
|
||||
|
@ -161,6 +167,13 @@ type RefExemplar struct {
|
|||
Labels labels.Labels
|
||||
}
|
||||
|
||||
// RefHistogramSample is a histogram.
|
||||
type RefHistogramSample struct {
|
||||
Ref chunks.HeadSeriesRef
|
||||
T int64
|
||||
H *histogram.Histogram
|
||||
}
|
||||
|
||||
// RefMmapMarker marks that the all the samples of the given series until now have been m-mapped to disk.
|
||||
type RefMmapMarker struct {
|
||||
Ref chunks.HeadSeriesRef
|
||||
|
@ -178,7 +191,7 @@ func (d *Decoder) Type(rec []byte) Type {
|
|||
return Unknown
|
||||
}
|
||||
switch t := Type(rec[0]); t {
|
||||
case Series, Samples, Tombstones, Exemplars, MmapMarkers, Metadata:
|
||||
case Series, Samples, Tombstones, Exemplars, MmapMarkers, Metadata, HistogramSamples:
|
||||
return t
|
||||
}
|
||||
return Unknown
|
||||
|
@ -392,6 +405,88 @@ func (d *Decoder) MmapMarkers(rec []byte, markers []RefMmapMarker) ([]RefMmapMar
|
|||
return markers, nil
|
||||
}
|
||||
|
||||
func (d *Decoder) HistogramSamples(rec []byte, histograms []RefHistogramSample) ([]RefHistogramSample, error) {
|
||||
dec := encoding.Decbuf{B: rec}
|
||||
t := Type(dec.Byte())
|
||||
if t != HistogramSamples {
|
||||
return nil, errors.New("invalid record type")
|
||||
}
|
||||
if dec.Len() == 0 {
|
||||
return histograms, nil
|
||||
}
|
||||
var (
|
||||
baseRef = dec.Be64()
|
||||
baseTime = dec.Be64int64()
|
||||
)
|
||||
for len(dec.B) > 0 && dec.Err() == nil {
|
||||
dref := dec.Varint64()
|
||||
dtime := dec.Varint64()
|
||||
|
||||
rh := RefHistogramSample{
|
||||
Ref: chunks.HeadSeriesRef(baseRef + uint64(dref)),
|
||||
T: baseTime + dtime,
|
||||
H: &histogram.Histogram{
|
||||
Schema: 0,
|
||||
ZeroThreshold: 0,
|
||||
ZeroCount: 0,
|
||||
Count: 0,
|
||||
Sum: 0,
|
||||
},
|
||||
}
|
||||
|
||||
rh.H.Schema = int32(dec.Varint64())
|
||||
rh.H.ZeroThreshold = math.Float64frombits(dec.Be64())
|
||||
|
||||
rh.H.ZeroCount = dec.Uvarint64()
|
||||
rh.H.Count = dec.Uvarint64()
|
||||
rh.H.Sum = math.Float64frombits(dec.Be64())
|
||||
|
||||
l := dec.Uvarint()
|
||||
if l > 0 {
|
||||
rh.H.PositiveSpans = make([]histogram.Span, l)
|
||||
}
|
||||
for i := range rh.H.PositiveSpans {
|
||||
rh.H.PositiveSpans[i].Offset = int32(dec.Varint64())
|
||||
rh.H.PositiveSpans[i].Length = dec.Uvarint32()
|
||||
}
|
||||
|
||||
l = dec.Uvarint()
|
||||
if l > 0 {
|
||||
rh.H.NegativeSpans = make([]histogram.Span, l)
|
||||
}
|
||||
for i := range rh.H.NegativeSpans {
|
||||
rh.H.NegativeSpans[i].Offset = int32(dec.Varint64())
|
||||
rh.H.NegativeSpans[i].Length = dec.Uvarint32()
|
||||
}
|
||||
|
||||
l = dec.Uvarint()
|
||||
if l > 0 {
|
||||
rh.H.PositiveBuckets = make([]int64, l)
|
||||
}
|
||||
for i := range rh.H.PositiveBuckets {
|
||||
rh.H.PositiveBuckets[i] = dec.Varint64()
|
||||
}
|
||||
|
||||
l = dec.Uvarint()
|
||||
if l > 0 {
|
||||
rh.H.NegativeBuckets = make([]int64, l)
|
||||
}
|
||||
for i := range rh.H.NegativeBuckets {
|
||||
rh.H.NegativeBuckets[i] = dec.Varint64()
|
||||
}
|
||||
|
||||
histograms = append(histograms, rh)
|
||||
}
|
||||
|
||||
if dec.Err() != nil {
|
||||
return nil, errors.Wrapf(dec.Err(), "decode error after %d histograms", len(histograms))
|
||||
}
|
||||
if len(dec.B) > 0 {
|
||||
return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B))
|
||||
}
|
||||
return histograms, nil
|
||||
}
|
||||
|
||||
// Encoder encodes series, sample, and tombstones records.
|
||||
// The zero value is ready to use.
|
||||
type Encoder struct{}
|
||||
|
@ -517,3 +612,54 @@ func (e *Encoder) MmapMarkers(markers []RefMmapMarker, b []byte) []byte {
|
|||
|
||||
return buf.Get()
|
||||
}
|
||||
|
||||
func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) []byte {
|
||||
buf := encoding.Encbuf{B: b}
|
||||
buf.PutByte(byte(HistogramSamples))
|
||||
|
||||
if len(histograms) == 0 {
|
||||
return buf.Get()
|
||||
}
|
||||
|
||||
// Store base timestamp and base reference number of first histogram.
|
||||
// All histograms encode their timestamp and ref as delta to those.
|
||||
first := histograms[0]
|
||||
buf.PutBE64(uint64(first.Ref))
|
||||
buf.PutBE64int64(first.T)
|
||||
|
||||
for _, h := range histograms {
|
||||
buf.PutVarint64(int64(h.Ref) - int64(first.Ref))
|
||||
buf.PutVarint64(h.T - first.T)
|
||||
|
||||
buf.PutVarint64(int64(h.H.Schema))
|
||||
buf.PutBE64(math.Float64bits(h.H.ZeroThreshold))
|
||||
|
||||
buf.PutUvarint64(h.H.ZeroCount)
|
||||
buf.PutUvarint64(h.H.Count)
|
||||
buf.PutBE64(math.Float64bits(h.H.Sum))
|
||||
|
||||
buf.PutUvarint(len(h.H.PositiveSpans))
|
||||
for _, s := range h.H.PositiveSpans {
|
||||
buf.PutVarint64(int64(s.Offset))
|
||||
buf.PutUvarint32(s.Length)
|
||||
}
|
||||
|
||||
buf.PutUvarint(len(h.H.NegativeSpans))
|
||||
for _, s := range h.H.NegativeSpans {
|
||||
buf.PutVarint64(int64(s.Offset))
|
||||
buf.PutUvarint32(s.Length)
|
||||
}
|
||||
|
||||
buf.PutUvarint(len(h.H.PositiveBuckets))
|
||||
for _, b := range h.H.PositiveBuckets {
|
||||
buf.PutVarint64(b)
|
||||
}
|
||||
|
||||
buf.PutUvarint(len(h.H.NegativeBuckets))
|
||||
for _, b := range h.H.NegativeBuckets {
|
||||
buf.PutVarint64(b)
|
||||
}
|
||||
}
|
||||
|
||||
return buf.Get()
|
||||
}
|
||||
|
|
28
vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go
generated
vendored
28
vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go
generated
vendored
|
@ -21,6 +21,7 @@ import (
|
|||
"github.com/go-kit/log"
|
||||
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
)
|
||||
|
||||
var ErrInvalidTimes = fmt.Errorf("max time is lesser than min time")
|
||||
|
@ -53,13 +54,34 @@ func CreateBlock(series []storage.Series, dir string, chunkRange int64, logger l
|
|||
ref := storage.SeriesRef(0)
|
||||
it := s.Iterator()
|
||||
lset := s.Labels()
|
||||
for it.Next() {
|
||||
t, v := it.At()
|
||||
ref, err = app.Append(ref, lset, t, v)
|
||||
typ := it.Next()
|
||||
lastTyp := typ
|
||||
for ; typ != chunkenc.ValNone; typ = it.Next() {
|
||||
if lastTyp != typ {
|
||||
// The behaviour of appender is undefined if samples of different types
|
||||
// are appended to the same series in a single Commit().
|
||||
if err = app.Commit(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
app = w.Appender(ctx)
|
||||
sampleCount = 0
|
||||
}
|
||||
|
||||
switch typ {
|
||||
case chunkenc.ValFloat:
|
||||
t, v := it.At()
|
||||
ref, err = app.Append(ref, lset, t, v)
|
||||
case chunkenc.ValHistogram:
|
||||
t, h := it.AtHistogram()
|
||||
ref, err = app.AppendHistogram(ref, lset, t, h)
|
||||
default:
|
||||
return "", fmt.Errorf("unknown sample type %s", typ.String())
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sampleCount++
|
||||
lastTyp = typ
|
||||
}
|
||||
if it.Err() != nil {
|
||||
return "", it.Err()
|
||||
|
|
57
vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/chunks.go
generated
vendored
57
vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/chunks.go
generated
vendored
|
@ -14,6 +14,9 @@
|
|||
package tsdbutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
)
|
||||
|
@ -26,6 +29,9 @@ type Samples interface {
|
|||
type Sample interface {
|
||||
T() int64
|
||||
V() float64
|
||||
H() *histogram.Histogram
|
||||
FH() *histogram.FloatHistogram
|
||||
Type() chunkenc.ValueType
|
||||
}
|
||||
|
||||
type SampleSlice []Sample
|
||||
|
@ -33,10 +39,12 @@ type SampleSlice []Sample
|
|||
func (s SampleSlice) Get(i int) Sample { return s[i] }
|
||||
func (s SampleSlice) Len() int { return len(s) }
|
||||
|
||||
// ChunkFromSamples requires all samples to have the same type.
|
||||
func ChunkFromSamples(s []Sample) chunks.Meta {
|
||||
return ChunkFromSamplesGeneric(SampleSlice(s))
|
||||
}
|
||||
|
||||
// ChunkFromSamplesGeneric requires all samples to have the same type.
|
||||
func ChunkFromSamplesGeneric(s Samples) chunks.Meta {
|
||||
mint, maxt := int64(0), int64(0)
|
||||
|
||||
|
@ -44,11 +52,29 @@ func ChunkFromSamplesGeneric(s Samples) chunks.Meta {
|
|||
mint, maxt = s.Get(0).T(), s.Get(s.Len()-1).T()
|
||||
}
|
||||
|
||||
c := chunkenc.NewXORChunk()
|
||||
if s.Len() == 0 {
|
||||
return chunks.Meta{
|
||||
Chunk: chunkenc.NewXORChunk(),
|
||||
}
|
||||
}
|
||||
|
||||
sampleType := s.Get(0).Type()
|
||||
c, err := chunkenc.NewEmptyChunk(sampleType.ChunkEncoding())
|
||||
if err != nil {
|
||||
panic(err) // TODO(codesome): dont panic.
|
||||
}
|
||||
|
||||
ca, _ := c.Appender()
|
||||
|
||||
for i := 0; i < s.Len(); i++ {
|
||||
ca.Append(s.Get(i).T(), s.Get(i).V())
|
||||
switch sampleType {
|
||||
case chunkenc.ValFloat:
|
||||
ca.Append(s.Get(i).T(), s.Get(i).V())
|
||||
case chunkenc.ValHistogram:
|
||||
ca.AppendHistogram(s.Get(i).T(), s.Get(i).H())
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown sample type %s", sampleType.String()))
|
||||
}
|
||||
}
|
||||
return chunks.Meta{
|
||||
MinTime: mint,
|
||||
|
@ -58,8 +84,10 @@ func ChunkFromSamplesGeneric(s Samples) chunks.Meta {
|
|||
}
|
||||
|
||||
type sample struct {
|
||||
t int64
|
||||
v float64
|
||||
t int64
|
||||
v float64
|
||||
h *histogram.Histogram
|
||||
fh *histogram.FloatHistogram
|
||||
}
|
||||
|
||||
func (s sample) T() int64 {
|
||||
|
@ -70,11 +98,30 @@ func (s sample) V() float64 {
|
|||
return s.v
|
||||
}
|
||||
|
||||
func (s sample) H() *histogram.Histogram {
|
||||
return s.h
|
||||
}
|
||||
|
||||
func (s sample) FH() *histogram.FloatHistogram {
|
||||
return s.fh
|
||||
}
|
||||
|
||||
func (s sample) Type() chunkenc.ValueType {
|
||||
switch {
|
||||
case s.h != nil:
|
||||
return chunkenc.ValHistogram
|
||||
case s.fh != nil:
|
||||
return chunkenc.ValFloatHistogram
|
||||
default:
|
||||
return chunkenc.ValFloat
|
||||
}
|
||||
}
|
||||
|
||||
// PopulatedChunk creates a chunk populated with samples every second starting at minTime
|
||||
func PopulatedChunk(numSamples int, minTime int64) chunks.Meta {
|
||||
samples := make([]Sample, numSamples)
|
||||
for i := 0; i < numSamples; i++ {
|
||||
samples[i] = sample{minTime + int64(i*1000), 1.0}
|
||||
samples[i] = sample{t: minTime + int64(i*1000), v: 1.0}
|
||||
}
|
||||
return ChunkFromSamples(samples)
|
||||
}
|
||||
|
|
8
vendor/github.com/prometheus/prometheus/tsdb/wal.go
generated
vendored
8
vendor/github.com/prometheus/prometheus/tsdb/wal.go
generated
vendored
|
@ -37,7 +37,7 @@ import (
|
|||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/prometheus/tsdb/record"
|
||||
"github.com/prometheus/prometheus/tsdb/tombstones"
|
||||
"github.com/prometheus/prometheus/tsdb/wal"
|
||||
"github.com/prometheus/prometheus/tsdb/wlog"
|
||||
)
|
||||
|
||||
// WALEntryType indicates what data a WAL entry contains.
|
||||
|
@ -89,7 +89,7 @@ func newWalMetrics(r prometheus.Registerer) *walMetrics {
|
|||
// WAL is a write ahead log that can log new series labels and samples.
|
||||
// It must be completely read before new entries are logged.
|
||||
//
|
||||
// DEPRECATED: use wal pkg combined with the record codex instead.
|
||||
// DEPRECATED: use wlog pkg combined with the record codex instead.
|
||||
type WAL interface {
|
||||
Reader() WALReader
|
||||
LogSeries([]record.RefSeries) error
|
||||
|
@ -146,7 +146,7 @@ func newCRC32() hash.Hash32 {
|
|||
|
||||
// SegmentWAL is a write ahead log for series data.
|
||||
//
|
||||
// DEPRECATED: use wal pkg combined with the record coders instead.
|
||||
// DEPRECATED: use wlog pkg combined with the record coders instead.
|
||||
type SegmentWAL struct {
|
||||
mtx sync.Mutex
|
||||
metrics *walMetrics
|
||||
|
@ -1229,7 +1229,7 @@ func MigrateWAL(logger log.Logger, dir string) (err error) {
|
|||
if err := os.RemoveAll(tmpdir); err != nil {
|
||||
return errors.Wrap(err, "cleanup replacement dir")
|
||||
}
|
||||
repl, err := wal.New(logger, nil, tmpdir, false)
|
||||
repl, err := wlog.New(logger, nil, tmpdir, false)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "open new WAL")
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package wal
|
||||
package wlog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@ -38,12 +38,12 @@ import (
|
|||
// CheckpointStats returns stats about a created checkpoint.
|
||||
type CheckpointStats struct {
|
||||
DroppedSeries int
|
||||
DroppedSamples int
|
||||
DroppedSamples int // Includes histograms.
|
||||
DroppedTombstones int
|
||||
DroppedExemplars int
|
||||
DroppedMetadata int
|
||||
TotalSeries int // Processed series including dropped ones.
|
||||
TotalSamples int // Processed samples including dropped ones.
|
||||
TotalSamples int // Processed float and histogram samples including dropped ones.
|
||||
TotalTombstones int // Processed tombstones including dropped ones.
|
||||
TotalExemplars int // Processed exemplars including dropped ones.
|
||||
TotalMetadata int // Processed metadata including dropped ones.
|
||||
|
@ -93,7 +93,7 @@ const checkpointPrefix = "checkpoint."
|
|||
// segmented format as the original WAL itself.
|
||||
// This makes it easy to read it through the WAL package and concatenate
|
||||
// it with the original WAL.
|
||||
func Checkpoint(logger log.Logger, w *WAL, from, to int, keep func(id chunks.HeadSeriesRef) bool, mint int64) (*CheckpointStats, error) {
|
||||
func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.HeadSeriesRef) bool, mint int64) (*CheckpointStats, error) {
|
||||
stats := &CheckpointStats{}
|
||||
var sgmReader io.ReadCloser
|
||||
|
||||
|
@ -148,20 +148,21 @@ func Checkpoint(logger log.Logger, w *WAL, from, to int, keep func(id chunks.Hea
|
|||
r := NewReader(sgmReader)
|
||||
|
||||
var (
|
||||
series []record.RefSeries
|
||||
samples []record.RefSample
|
||||
tstones []tombstones.Stone
|
||||
exemplars []record.RefExemplar
|
||||
metadata []record.RefMetadata
|
||||
dec record.Decoder
|
||||
enc record.Encoder
|
||||
buf []byte
|
||||
recs [][]byte
|
||||
series []record.RefSeries
|
||||
samples []record.RefSample
|
||||
histogramSamples []record.RefHistogramSample
|
||||
tstones []tombstones.Stone
|
||||
exemplars []record.RefExemplar
|
||||
metadata []record.RefMetadata
|
||||
dec record.Decoder
|
||||
enc record.Encoder
|
||||
buf []byte
|
||||
recs [][]byte
|
||||
|
||||
latestMetadataMap = make(map[chunks.HeadSeriesRef]record.RefMetadata)
|
||||
)
|
||||
for r.Next() {
|
||||
series, samples, tstones, exemplars, metadata = series[:0], samples[:0], tstones[:0], exemplars[:0], metadata[:0]
|
||||
series, samples, histogramSamples, tstones, exemplars, metadata = series[:0], samples[:0], histogramSamples[:0], tstones[:0], exemplars[:0], metadata[:0]
|
||||
|
||||
// We don't reset the buffer since we batch up multiple records
|
||||
// before writing them to the checkpoint.
|
||||
|
@ -206,6 +207,24 @@ func Checkpoint(logger log.Logger, w *WAL, from, to int, keep func(id chunks.Hea
|
|||
stats.TotalSamples += len(samples)
|
||||
stats.DroppedSamples += len(samples) - len(repl)
|
||||
|
||||
case record.HistogramSamples:
|
||||
histogramSamples, err = dec.HistogramSamples(rec, histogramSamples)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "decode histogram samples")
|
||||
}
|
||||
// Drop irrelevant histogramSamples in place.
|
||||
repl := histogramSamples[:0]
|
||||
for _, h := range histogramSamples {
|
||||
if h.T >= mint {
|
||||
repl = append(repl, h)
|
||||
}
|
||||
}
|
||||
if len(repl) > 0 {
|
||||
buf = enc.HistogramSamples(repl, buf)
|
||||
}
|
||||
stats.TotalSamples += len(samples)
|
||||
stats.DroppedSamples += len(samples) - len(repl)
|
||||
|
||||
case record.Tombstones:
|
||||
tstones, err = dec.Tombstones(rec, tstones)
|
||||
if err != nil {
|
|
@ -12,7 +12,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package wal
|
||||
package wlog
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
|
@ -12,7 +12,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package wal
|
||||
package wlog
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
|
@ -11,7 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package wal
|
||||
package wlog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@ -19,7 +19,6 @@ import (
|
|||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -28,6 +27,7 @@ import (
|
|||
"github.com/go-kit/log/level"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/prometheus/prometheus/model/timestamp"
|
||||
"github.com/prometheus/prometheus/tsdb/record"
|
||||
|
@ -49,6 +49,7 @@ type WriteTo interface {
|
|||
// Once returned, the WAL Watcher will not attempt to pass that data again.
|
||||
Append([]record.RefSample) bool
|
||||
AppendExemplars([]record.RefExemplar) bool
|
||||
AppendHistograms([]record.RefHistogramSample) bool
|
||||
StoreSeries([]record.RefSeries, int)
|
||||
|
||||
// Next two methods are intended for garbage-collection: first we call
|
||||
|
@ -74,6 +75,7 @@ type Watcher struct {
|
|||
walDir string
|
||||
lastCheckpoint string
|
||||
sendExemplars bool
|
||||
sendHistograms bool
|
||||
metrics *WatcherMetrics
|
||||
readerMetrics *LiveReaderMetrics
|
||||
|
||||
|
@ -144,18 +146,19 @@ func NewWatcherMetrics(reg prometheus.Registerer) *WatcherMetrics {
|
|||
}
|
||||
|
||||
// NewWatcher creates a new WAL watcher for a given WriteTo.
|
||||
func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logger log.Logger, name string, writer WriteTo, dir string, sendExemplars bool) *Watcher {
|
||||
func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logger log.Logger, name string, writer WriteTo, dir string, sendExemplars, sendHistograms bool) *Watcher {
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
return &Watcher{
|
||||
logger: logger,
|
||||
writer: writer,
|
||||
metrics: metrics,
|
||||
readerMetrics: readerMetrics,
|
||||
walDir: path.Join(dir, "wal"),
|
||||
name: name,
|
||||
sendExemplars: sendExemplars,
|
||||
logger: logger,
|
||||
writer: writer,
|
||||
metrics: metrics,
|
||||
readerMetrics: readerMetrics,
|
||||
walDir: path.Join(dir, "wal"),
|
||||
name: name,
|
||||
sendExemplars: sendExemplars,
|
||||
sendHistograms: sendHistograms,
|
||||
|
||||
quit: make(chan struct{}),
|
||||
done: make(chan struct{}),
|
||||
|
@ -301,7 +304,7 @@ func (w *Watcher) firstAndLast() (int, int, error) {
|
|||
return refs[0], refs[len(refs)-1], nil
|
||||
}
|
||||
|
||||
// Copied from tsdb/wal/wal.go so we do not have to open a WAL.
|
||||
// Copied from tsdb/wlog/wlog.go so we do not have to open a WAL.
|
||||
// Plan is to move WAL watcher to TSDB and dedupe these implementations.
|
||||
func (w *Watcher) segments(dir string) ([]int, error) {
|
||||
files, err := os.ReadDir(dir)
|
||||
|
@ -317,7 +320,7 @@ func (w *Watcher) segments(dir string) ([]int, error) {
|
|||
}
|
||||
refs = append(refs, k)
|
||||
}
|
||||
sort.Ints(refs)
|
||||
slices.Sort(refs)
|
||||
for i := 0; i < len(refs)-1; i++ {
|
||||
if refs[i]+1 != refs[i+1] {
|
||||
return nil, errors.New("segments are not sequential")
|
||||
|
@ -473,11 +476,13 @@ func (w *Watcher) garbageCollectSeries(segmentNum int) error {
|
|||
// Also used with readCheckpoint - implements segmentReadFn.
|
||||
func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
|
||||
var (
|
||||
dec record.Decoder
|
||||
series []record.RefSeries
|
||||
samples []record.RefSample
|
||||
send []record.RefSample
|
||||
exemplars []record.RefExemplar
|
||||
dec record.Decoder
|
||||
series []record.RefSeries
|
||||
samples []record.RefSample
|
||||
samplesToSend []record.RefSample
|
||||
exemplars []record.RefExemplar
|
||||
histograms []record.RefHistogramSample
|
||||
histogramsToSend []record.RefHistogramSample
|
||||
)
|
||||
for r.Next() && !isClosed(w.quit) {
|
||||
rec := r.Record()
|
||||
|
@ -510,12 +515,12 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
|
|||
duration := time.Since(w.startTime)
|
||||
level.Info(w.logger).Log("msg", "Done replaying WAL", "duration", duration)
|
||||
}
|
||||
send = append(send, s)
|
||||
samplesToSend = append(samplesToSend, s)
|
||||
}
|
||||
}
|
||||
if len(send) > 0 {
|
||||
w.writer.Append(send)
|
||||
send = send[:0]
|
||||
if len(samplesToSend) > 0 {
|
||||
w.writer.Append(samplesToSend)
|
||||
samplesToSend = samplesToSend[:0]
|
||||
}
|
||||
|
||||
case record.Exemplars:
|
||||
|
@ -535,6 +540,34 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
|
|||
}
|
||||
w.writer.AppendExemplars(exemplars)
|
||||
|
||||
case record.HistogramSamples:
|
||||
// Skip if experimental "histograms over remote write" is not enabled.
|
||||
if !w.sendHistograms {
|
||||
break
|
||||
}
|
||||
if !tail {
|
||||
break
|
||||
}
|
||||
histograms, err := dec.HistogramSamples(rec, histograms[:0])
|
||||
if err != nil {
|
||||
w.recordDecodeFailsMetric.Inc()
|
||||
return err
|
||||
}
|
||||
for _, h := range histograms {
|
||||
if h.T > w.startTimestamp {
|
||||
if !w.sendSamples {
|
||||
w.sendSamples = true
|
||||
duration := time.Since(w.startTime)
|
||||
level.Info(w.logger).Log("msg", "Done replaying WAL", "duration", duration)
|
||||
}
|
||||
histogramsToSend = append(histogramsToSend, h)
|
||||
}
|
||||
}
|
||||
if len(histogramsToSend) > 0 {
|
||||
w.writer.AppendHistograms(histogramsToSend)
|
||||
histogramsToSend = histogramsToSend[:0]
|
||||
}
|
||||
|
||||
case record.Tombstones:
|
||||
|
||||
default:
|
|
@ -12,7 +12,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package wal
|
||||
package wlog
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
|
@ -133,7 +133,7 @@ func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) {
|
|||
// If it was torn mid-record, a full read (which the caller should do anyway
|
||||
// to ensure integrity) will detect it as a corruption by the end.
|
||||
if d := stat.Size() % pageSize; d != 0 {
|
||||
level.Warn(logger).Log("msg", "Last page of the wal is torn, filling it with zeros", "segment", segName)
|
||||
level.Warn(logger).Log("msg", "Last page of the wlog is torn, filling it with zeros", "segment", segName)
|
||||
if _, err := f.Write(make([]byte, pageSize-d)); err != nil {
|
||||
f.Close()
|
||||
return nil, errors.Wrap(err, "zero-pad torn page")
|
||||
|
@ -164,7 +164,7 @@ func OpenReadSegment(fn string) (*Segment, error) {
|
|||
return &Segment{SegmentFile: f, i: k, dir: filepath.Dir(fn)}, nil
|
||||
}
|
||||
|
||||
// WAL is a write ahead log that stores records in segment files.
|
||||
// WL is a write log that stores records in segment files.
|
||||
// It must be read from start to end once before logging new data.
|
||||
// If an error occurs during read, the repair procedure must be called
|
||||
// before it's safe to do further writes.
|
||||
|
@ -174,7 +174,7 @@ func OpenReadSegment(fn string) (*Segment, error) {
|
|||
// Records are never split across segments to allow full segments to be
|
||||
// safely truncated. It also ensures that torn writes never corrupt records
|
||||
// beyond the most recent segment.
|
||||
type WAL struct {
|
||||
type WL struct {
|
||||
dir string
|
||||
logger log.Logger
|
||||
segmentSize int
|
||||
|
@ -188,10 +188,10 @@ type WAL struct {
|
|||
compress bool
|
||||
snappyBuf []byte
|
||||
|
||||
metrics *walMetrics
|
||||
metrics *wlMetrics
|
||||
}
|
||||
|
||||
type walMetrics struct {
|
||||
type wlMetrics struct {
|
||||
fsyncDuration prometheus.Summary
|
||||
pageFlushes prometheus.Counter
|
||||
pageCompletions prometheus.Counter
|
||||
|
@ -201,12 +201,12 @@ type walMetrics struct {
|
|||
writesFailed prometheus.Counter
|
||||
}
|
||||
|
||||
func newWALMetrics(r prometheus.Registerer) *walMetrics {
|
||||
m := &walMetrics{}
|
||||
func newWLMetrics(r prometheus.Registerer) *wlMetrics {
|
||||
m := &wlMetrics{}
|
||||
|
||||
m.fsyncDuration = prometheus.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "fsync_duration_seconds",
|
||||
Help: "Duration of WAL fsync.",
|
||||
Help: "Duration of write log fsync.",
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||
})
|
||||
m.pageFlushes = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
|
@ -219,19 +219,19 @@ func newWALMetrics(r prometheus.Registerer) *walMetrics {
|
|||
})
|
||||
m.truncateFail = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "truncations_failed_total",
|
||||
Help: "Total number of WAL truncations that failed.",
|
||||
Help: "Total number of write log truncations that failed.",
|
||||
})
|
||||
m.truncateTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "truncations_total",
|
||||
Help: "Total number of WAL truncations attempted.",
|
||||
Help: "Total number of write log truncations attempted.",
|
||||
})
|
||||
m.currentSegment = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "segment_current",
|
||||
Help: "WAL segment index that TSDB is currently writing to.",
|
||||
Help: "Write log segment index that TSDB is currently writing to.",
|
||||
})
|
||||
m.writesFailed = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "writes_failed_total",
|
||||
Help: "Total number of WAL writes that failed.",
|
||||
Help: "Total number of write log writes that failed.",
|
||||
})
|
||||
|
||||
if r != nil {
|
||||
|
@ -250,13 +250,13 @@ func newWALMetrics(r prometheus.Registerer) *walMetrics {
|
|||
}
|
||||
|
||||
// New returns a new WAL over the given directory.
|
||||
func New(logger log.Logger, reg prometheus.Registerer, dir string, compress bool) (*WAL, error) {
|
||||
func New(logger log.Logger, reg prometheus.Registerer, dir string, compress bool) (*WL, error) {
|
||||
return NewSize(logger, reg, dir, DefaultSegmentSize, compress)
|
||||
}
|
||||
|
||||
// NewSize returns a new WAL over the given directory.
|
||||
// NewSize returns a new write log over the given directory.
|
||||
// New segments are created with the specified size.
|
||||
func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSize int, compress bool) (*WAL, error) {
|
||||
func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSize int, compress bool) (*WL, error) {
|
||||
if segmentSize%pageSize != 0 {
|
||||
return nil, errors.New("invalid segment size")
|
||||
}
|
||||
|
@ -266,7 +266,7 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi
|
|||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
w := &WAL{
|
||||
w := &WL{
|
||||
dir: dir,
|
||||
logger: logger,
|
||||
segmentSize: segmentSize,
|
||||
|
@ -277,9 +277,9 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi
|
|||
}
|
||||
prefix := "prometheus_tsdb_wal_"
|
||||
if filepath.Base(dir) == WblDirName {
|
||||
prefix = "prometheus_tsdb_out_of_order_wal_"
|
||||
prefix = "prometheus_tsdb_out_of_order_wbl_"
|
||||
}
|
||||
w.metrics = newWALMetrics(prometheus.WrapRegistererWithPrefix(prefix, reg))
|
||||
w.metrics = newWLMetrics(prometheus.WrapRegistererWithPrefix(prefix, reg))
|
||||
|
||||
_, last, err := Segments(w.Dir())
|
||||
if err != nil {
|
||||
|
@ -308,11 +308,11 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi
|
|||
}
|
||||
|
||||
// Open an existing WAL.
|
||||
func Open(logger log.Logger, dir string) (*WAL, error) {
|
||||
func Open(logger log.Logger, dir string) (*WL, error) {
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
w := &WAL{
|
||||
w := &WL{
|
||||
dir: dir,
|
||||
logger: logger,
|
||||
}
|
||||
|
@ -321,16 +321,16 @@ func Open(logger log.Logger, dir string) (*WAL, error) {
|
|||
}
|
||||
|
||||
// CompressionEnabled returns if compression is enabled on this WAL.
|
||||
func (w *WAL) CompressionEnabled() bool {
|
||||
func (w *WL) CompressionEnabled() bool {
|
||||
return w.compress
|
||||
}
|
||||
|
||||
// Dir returns the directory of the WAL.
|
||||
func (w *WAL) Dir() string {
|
||||
func (w *WL) Dir() string {
|
||||
return w.dir
|
||||
}
|
||||
|
||||
func (w *WAL) run() {
|
||||
func (w *WL) run() {
|
||||
Loop:
|
||||
for {
|
||||
select {
|
||||
|
@ -350,7 +350,7 @@ Loop:
|
|||
|
||||
// Repair attempts to repair the WAL based on the error.
|
||||
// It discards all data after the corruption.
|
||||
func (w *WAL) Repair(origErr error) error {
|
||||
func (w *WL) Repair(origErr error) error {
|
||||
// We could probably have a mode that only discards torn records right around
|
||||
// the corruption to preserve as data much as possible.
|
||||
// But that's not generally applicable if the records have any kind of causality.
|
||||
|
@ -466,7 +466,7 @@ func SegmentName(dir string, i int) string {
|
|||
|
||||
// NextSegment creates the next segment and closes the previous one asynchronously.
|
||||
// It returns the file number of the new file.
|
||||
func (w *WAL) NextSegment() (int, error) {
|
||||
func (w *WL) NextSegment() (int, error) {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
return w.nextSegment(true)
|
||||
|
@ -474,7 +474,7 @@ func (w *WAL) NextSegment() (int, error) {
|
|||
|
||||
// NextSegmentSync creates the next segment and closes the previous one in sync.
|
||||
// It returns the file number of the new file.
|
||||
func (w *WAL) NextSegmentSync() (int, error) {
|
||||
func (w *WL) NextSegmentSync() (int, error) {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
return w.nextSegment(false)
|
||||
|
@ -482,9 +482,9 @@ func (w *WAL) NextSegmentSync() (int, error) {
|
|||
|
||||
// nextSegment creates the next segment and closes the previous one.
|
||||
// It returns the file number of the new file.
|
||||
func (w *WAL) nextSegment(async bool) (int, error) {
|
||||
func (w *WL) nextSegment(async bool) (int, error) {
|
||||
if w.closed {
|
||||
return 0, errors.New("wal is closed")
|
||||
return 0, errors.New("wlog is closed")
|
||||
}
|
||||
|
||||
// Only flush the current page if it actually holds data.
|
||||
|
@ -519,7 +519,7 @@ func (w *WAL) nextSegment(async bool) (int, error) {
|
|||
return next.Index(), nil
|
||||
}
|
||||
|
||||
func (w *WAL) setSegment(segment *Segment) error {
|
||||
func (w *WL) setSegment(segment *Segment) error {
|
||||
w.segment = segment
|
||||
|
||||
// Correctly initialize donePages.
|
||||
|
@ -535,7 +535,7 @@ func (w *WAL) setSegment(segment *Segment) error {
|
|||
// flushPage writes the new contents of the page to disk. If no more records will fit into
|
||||
// the page, the remaining bytes will be set to zero and a new page will be started.
|
||||
// If clear is true, this is enforced regardless of how many bytes are left in the page.
|
||||
func (w *WAL) flushPage(clear bool) error {
|
||||
func (w *WL) flushPage(clear bool) error {
|
||||
w.metrics.pageFlushes.Inc()
|
||||
|
||||
p := w.page
|
||||
|
@ -601,13 +601,13 @@ func (t recType) String() string {
|
|||
}
|
||||
}
|
||||
|
||||
func (w *WAL) pagesPerSegment() int {
|
||||
func (w *WL) pagesPerSegment() int {
|
||||
return w.segmentSize / pageSize
|
||||
}
|
||||
|
||||
// Log writes the records into the log.
|
||||
// Multiple records can be passed at once to reduce writes and increase throughput.
|
||||
func (w *WAL) Log(recs ...[]byte) error {
|
||||
func (w *WL) Log(recs ...[]byte) error {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
// Callers could just implement their own list record format but adding
|
||||
|
@ -625,7 +625,7 @@ func (w *WAL) Log(recs ...[]byte) error {
|
|||
// - the final record of a batch
|
||||
// - the record is bigger than the page size
|
||||
// - the current page is full.
|
||||
func (w *WAL) log(rec []byte, final bool) error {
|
||||
func (w *WL) log(rec []byte, final bool) error {
|
||||
// When the last page flush failed the page will remain full.
|
||||
// When the page is full, need to flush it before trying to add more records to it.
|
||||
if w.page.full() {
|
||||
|
@ -721,7 +721,7 @@ func (w *WAL) log(rec []byte, final bool) error {
|
|||
|
||||
// LastSegmentAndOffset returns the last segment number of the WAL
|
||||
// and the offset in that file upto which the segment has been filled.
|
||||
func (w *WAL) LastSegmentAndOffset() (seg, offset int, err error) {
|
||||
func (w *WL) LastSegmentAndOffset() (seg, offset int, err error) {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
|
||||
|
@ -736,7 +736,7 @@ func (w *WAL) LastSegmentAndOffset() (seg, offset int, err error) {
|
|||
}
|
||||
|
||||
// Truncate drops all segments before i.
|
||||
func (w *WAL) Truncate(i int) (err error) {
|
||||
func (w *WL) Truncate(i int) (err error) {
|
||||
w.metrics.truncateTotal.Inc()
|
||||
defer func() {
|
||||
if err != nil {
|
||||
|
@ -758,27 +758,27 @@ func (w *WAL) Truncate(i int) (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (w *WAL) fsync(f *Segment) error {
|
||||
func (w *WL) fsync(f *Segment) error {
|
||||
start := time.Now()
|
||||
err := f.Sync()
|
||||
w.metrics.fsyncDuration.Observe(time.Since(start).Seconds())
|
||||
return err
|
||||
}
|
||||
|
||||
// Sync forces a file sync on the current wal segment. This function is meant
|
||||
// Sync forces a file sync on the current write log segment. This function is meant
|
||||
// to be used only on tests due to different behaviour on Operating Systems
|
||||
// like windows and linux
|
||||
func (w *WAL) Sync() error {
|
||||
func (w *WL) Sync() error {
|
||||
return w.fsync(w.segment)
|
||||
}
|
||||
|
||||
// Close flushes all writes and closes active segment.
|
||||
func (w *WAL) Close() (err error) {
|
||||
func (w *WL) Close() (err error) {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
|
||||
if w.closed {
|
||||
return errors.New("wal already closed")
|
||||
return errors.New("wlog already closed")
|
||||
}
|
||||
|
||||
if w.segment == nil {
|
||||
|
@ -811,8 +811,8 @@ func (w *WAL) Close() (err error) {
|
|||
|
||||
// Segments returns the range [first, n] of currently existing segments.
|
||||
// If no segments are found, first and n are -1.
|
||||
func Segments(walDir string) (first, last int, err error) {
|
||||
refs, err := listSegments(walDir)
|
||||
func Segments(wlDir string) (first, last int, err error) {
|
||||
refs, err := listSegments(wlDir)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
@ -979,8 +979,8 @@ func (r *segmentBufReader) Read(b []byte) (n int, err error) {
|
|||
return n, nil
|
||||
}
|
||||
|
||||
// Computing size of the WAL.
|
||||
// Size computes the size of the write log.
|
||||
// We do this by adding the sizes of all the files under the WAL dir.
|
||||
func (w *WAL) Size() (int64, error) {
|
||||
func (w *WL) Size() (int64, error) {
|
||||
return fileutil.DirSize(w.Dir())
|
||||
}
|
2
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
generated
vendored
2
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
generated
vendored
|
@ -16,7 +16,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http
|
|||
|
||||
// Version is the current release version of the otelhttp instrumentation.
|
||||
func Version() string {
|
||||
return "0.36.3"
|
||||
return "0.36.4"
|
||||
// This string is updated by the pre_release.sh script during release
|
||||
}
|
||||
|
||||
|
|
37
vendor/go.opentelemetry.io/otel/CHANGELOG.md
generated
vendored
37
vendor/go.opentelemetry.io/otel/CHANGELOG.md
generated
vendored
|
@ -8,6 +8,39 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
|
|||
|
||||
## [Unreleased]
|
||||
|
||||
## [1.11.1/0.33.0] 2022-10-19
|
||||
|
||||
### Added
|
||||
|
||||
- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation.
|
||||
By default, it will register with the default Prometheus registerer.
|
||||
A non-default registerer can be used by passing the `WithRegisterer` option. (#3239)
|
||||
- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341)
|
||||
- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285)
|
||||
|
||||
### Changed
|
||||
|
||||
- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error.
|
||||
It will return an error if the exporter fails to register with Prometheus. (#3239)
|
||||
|
||||
### Fixed
|
||||
|
||||
- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963)
|
||||
- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it.
|
||||
This fixes the implementation to be compliant with the W3C specification. (#3226)
|
||||
- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252)
|
||||
- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268)
|
||||
- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281)
|
||||
- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293)
|
||||
- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278)
|
||||
- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358)
|
||||
- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup.
|
||||
Instead the exporter is defined as an "unchecked" collector for Prometheus.
|
||||
This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342)
|
||||
- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360)
|
||||
- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names.
|
||||
This can be disabled using the `WithoutUnits()` option added to that package. (#3352)
|
||||
|
||||
## [1.11.0/0.32.3] 2022-10-12
|
||||
|
||||
### Added
|
||||
|
@ -31,6 +64,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
|
|||
|
||||
- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220)
|
||||
- Update histogram default bounds to match the requirements of the latest specification. (#3222)
|
||||
- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer. (#3265)
|
||||
|
||||
### Fixed
|
||||
|
||||
|
@ -1993,7 +2027,8 @@ It contains api and sdk for trace and meter.
|
|||
- CircleCI build CI manifest files.
|
||||
- CODEOWNERS file to track owners of this project.
|
||||
|
||||
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.11.0...HEAD
|
||||
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.11.1...HEAD
|
||||
[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1
|
||||
[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0
|
||||
[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2
|
||||
[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1
|
||||
|
|
67
vendor/go.opentelemetry.io/otel/attribute/value.go
generated
vendored
67
vendor/go.opentelemetry.io/otel/attribute/value.go
generated
vendored
|
@ -17,9 +17,11 @@ package attribute // import "go.opentelemetry.io/otel/attribute"
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"go.opentelemetry.io/otel/internal"
|
||||
"go.opentelemetry.io/otel/internal/attribute"
|
||||
)
|
||||
|
||||
//go:generate stringer -type=Type
|
||||
|
@ -66,12 +68,7 @@ func BoolValue(v bool) Value {
|
|||
|
||||
// BoolSliceValue creates a BOOLSLICE Value.
|
||||
func BoolSliceValue(v []bool) Value {
|
||||
cp := make([]bool, len(v))
|
||||
copy(cp, v)
|
||||
return Value{
|
||||
vtype: BOOLSLICE,
|
||||
slice: &cp,
|
||||
}
|
||||
return Value{vtype: BOOLSLICE, slice: attribute.SliceValue(v)}
|
||||
}
|
||||
|
||||
// IntValue creates an INT64 Value.
|
||||
|
@ -81,13 +78,14 @@ func IntValue(v int) Value {
|
|||
|
||||
// IntSliceValue creates an INTSLICE Value.
|
||||
func IntSliceValue(v []int) Value {
|
||||
cp := make([]int64, 0, len(v))
|
||||
for _, i := range v {
|
||||
cp = append(cp, int64(i))
|
||||
var int64Val int64
|
||||
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val)))
|
||||
for i, val := range v {
|
||||
cp.Elem().Index(i).SetInt(int64(val))
|
||||
}
|
||||
return Value{
|
||||
vtype: INT64SLICE,
|
||||
slice: &cp,
|
||||
slice: cp.Elem().Interface(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -101,12 +99,7 @@ func Int64Value(v int64) Value {
|
|||
|
||||
// Int64SliceValue creates an INT64SLICE Value.
|
||||
func Int64SliceValue(v []int64) Value {
|
||||
cp := make([]int64, len(v))
|
||||
copy(cp, v)
|
||||
return Value{
|
||||
vtype: INT64SLICE,
|
||||
slice: &cp,
|
||||
}
|
||||
return Value{vtype: INT64SLICE, slice: attribute.SliceValue(v)}
|
||||
}
|
||||
|
||||
// Float64Value creates a FLOAT64 Value.
|
||||
|
@ -119,12 +112,7 @@ func Float64Value(v float64) Value {
|
|||
|
||||
// Float64SliceValue creates a FLOAT64SLICE Value.
|
||||
func Float64SliceValue(v []float64) Value {
|
||||
cp := make([]float64, len(v))
|
||||
copy(cp, v)
|
||||
return Value{
|
||||
vtype: FLOAT64SLICE,
|
||||
slice: &cp,
|
||||
}
|
||||
return Value{vtype: FLOAT64SLICE, slice: attribute.SliceValue(v)}
|
||||
}
|
||||
|
||||
// StringValue creates a STRING Value.
|
||||
|
@ -137,12 +125,7 @@ func StringValue(v string) Value {
|
|||
|
||||
// StringSliceValue creates a STRINGSLICE Value.
|
||||
func StringSliceValue(v []string) Value {
|
||||
cp := make([]string, len(v))
|
||||
copy(cp, v)
|
||||
return Value{
|
||||
vtype: STRINGSLICE,
|
||||
slice: &cp,
|
||||
}
|
||||
return Value{vtype: STRINGSLICE, slice: attribute.SliceValue(v)}
|
||||
}
|
||||
|
||||
// Type returns a type of the Value.
|
||||
|
@ -159,10 +142,7 @@ func (v Value) AsBool() bool {
|
|||
// AsBoolSlice returns the []bool value. Make sure that the Value's type is
|
||||
// BOOLSLICE.
|
||||
func (v Value) AsBoolSlice() []bool {
|
||||
if s, ok := v.slice.(*[]bool); ok {
|
||||
return *s
|
||||
}
|
||||
return nil
|
||||
return attribute.AsSlice[bool](v.slice)
|
||||
}
|
||||
|
||||
// AsInt64 returns the int64 value. Make sure that the Value's type is
|
||||
|
@ -174,10 +154,7 @@ func (v Value) AsInt64() int64 {
|
|||
// AsInt64Slice returns the []int64 value. Make sure that the Value's type is
|
||||
// INT64SLICE.
|
||||
func (v Value) AsInt64Slice() []int64 {
|
||||
if s, ok := v.slice.(*[]int64); ok {
|
||||
return *s
|
||||
}
|
||||
return nil
|
||||
return attribute.AsSlice[int64](v.slice)
|
||||
}
|
||||
|
||||
// AsFloat64 returns the float64 value. Make sure that the Value's
|
||||
|
@ -189,10 +166,7 @@ func (v Value) AsFloat64() float64 {
|
|||
// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is
|
||||
// FLOAT64SLICE.
|
||||
func (v Value) AsFloat64Slice() []float64 {
|
||||
if s, ok := v.slice.(*[]float64); ok {
|
||||
return *s
|
||||
}
|
||||
return nil
|
||||
return attribute.AsSlice[float64](v.slice)
|
||||
}
|
||||
|
||||
// AsString returns the string value. Make sure that the Value's type
|
||||
|
@ -204,10 +178,7 @@ func (v Value) AsString() string {
|
|||
// AsStringSlice returns the []string value. Make sure that the Value's type is
|
||||
// STRINGSLICE.
|
||||
func (v Value) AsStringSlice() []string {
|
||||
if s, ok := v.slice.(*[]string); ok {
|
||||
return *s
|
||||
}
|
||||
return nil
|
||||
return attribute.AsSlice[string](v.slice)
|
||||
}
|
||||
|
||||
type unknownValueType struct{}
|
||||
|
@ -239,19 +210,19 @@ func (v Value) AsInterface() interface{} {
|
|||
func (v Value) Emit() string {
|
||||
switch v.Type() {
|
||||
case BOOLSLICE:
|
||||
return fmt.Sprint(*(v.slice.(*[]bool)))
|
||||
return fmt.Sprint(v.AsBoolSlice())
|
||||
case BOOL:
|
||||
return strconv.FormatBool(v.AsBool())
|
||||
case INT64SLICE:
|
||||
return fmt.Sprint(*(v.slice.(*[]int64)))
|
||||
return fmt.Sprint(v.AsInt64Slice())
|
||||
case INT64:
|
||||
return strconv.FormatInt(v.AsInt64(), 10)
|
||||
case FLOAT64SLICE:
|
||||
return fmt.Sprint(*(v.slice.(*[]float64)))
|
||||
return fmt.Sprint(v.AsFloat64Slice())
|
||||
case FLOAT64:
|
||||
return fmt.Sprint(v.AsFloat64())
|
||||
case STRINGSLICE:
|
||||
return fmt.Sprint(*(v.slice.(*[]string)))
|
||||
return fmt.Sprint(v.AsStringSlice())
|
||||
case STRING:
|
||||
return v.stringly
|
||||
default:
|
||||
|
|
16
vendor/go.opentelemetry.io/otel/baggage/baggage.go
generated
vendored
16
vendor/go.opentelemetry.io/otel/baggage/baggage.go
generated
vendored
|
@ -250,8 +250,9 @@ type Member struct {
|
|||
hasData bool
|
||||
}
|
||||
|
||||
// NewMember returns a new Member from the passed arguments. An error is
|
||||
// returned if the created Member would be invalid according to the W3C
|
||||
// NewMember returns a new Member from the passed arguments. The key will be
|
||||
// used directly while the value will be url decoded after validation. An error
|
||||
// is returned if the created Member would be invalid according to the W3C
|
||||
// Baggage specification.
|
||||
func NewMember(key, value string, props ...Property) (Member, error) {
|
||||
m := Member{
|
||||
|
@ -263,7 +264,11 @@ func NewMember(key, value string, props ...Property) (Member, error) {
|
|||
if err := m.validate(); err != nil {
|
||||
return newInvalidMember(), err
|
||||
}
|
||||
|
||||
decodedValue, err := url.QueryUnescape(value)
|
||||
if err != nil {
|
||||
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
|
||||
}
|
||||
m.value = decodedValue
|
||||
return m, nil
|
||||
}
|
||||
|
||||
|
@ -328,8 +333,9 @@ func parseMember(member string) (Member, error) {
|
|||
return Member{key: key, value: value, properties: props, hasData: true}, nil
|
||||
}
|
||||
|
||||
// validate ensures m conforms to the W3C Baggage specification, returning an
|
||||
// error otherwise.
|
||||
// validate ensures m conforms to the W3C Baggage specification.
|
||||
// A key is just an ASCII string, but a value must be URL encoded UTF-8,
|
||||
// returning an error otherwise.
|
||||
func (m Member) validate() error {
|
||||
if !m.hasData {
|
||||
return fmt.Errorf("%w: %q", errInvalidMember, m)
|
||||
|
|
45
vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go
generated
vendored
Normal file
45
vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go
generated
vendored
Normal file
|
@ -0,0 +1,45 @@
|
|||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package attribute provide several helper functions for some commonly used
|
||||
logic of processing attributes.
|
||||
*/
|
||||
package attribute // import "go.opentelemetry.io/otel/internal/attribute"
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// SliceValue convert a slice into an array with same elements as slice.
|
||||
func SliceValue[T bool | int64 | float64 | string](v []T) any {
|
||||
var zero T
|
||||
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
|
||||
copy(cp.Elem().Slice(0, len(v)).Interface().([]T), v)
|
||||
return cp.Elem().Interface()
|
||||
}
|
||||
|
||||
// AsSlice convert an array into a slice into with same elements as array.
|
||||
func AsSlice[T bool | int64 | float64 | string](v any) []T {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.Type().Kind() != reflect.Array {
|
||||
return nil
|
||||
}
|
||||
var zero T
|
||||
correctLen := rv.Len()
|
||||
correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
|
||||
cpy := reflect.New(correctType)
|
||||
_ = reflect.Copy(cpy.Elem(), rv)
|
||||
return cpy.Elem().Slice(0, correctLen).Interface().([]T)
|
||||
}
|
8
vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go
generated
vendored
8
vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go
generated
vendored
|
@ -35,7 +35,8 @@ type InstrumentProvider interface {
|
|||
|
||||
// Counter is an instrument that records increasing values.
|
||||
type Counter interface {
|
||||
// Observe records the state of the instrument.
|
||||
// Observe records the state of the instrument to be x. The value of x is
|
||||
// assumed to be the exact Counter value to record.
|
||||
//
|
||||
// It is only valid to call this within a callback. If called outside of the
|
||||
// registered callback it should have no effect on the instrument, and an
|
||||
|
@ -47,7 +48,8 @@ type Counter interface {
|
|||
|
||||
// UpDownCounter is an instrument that records increasing or decreasing values.
|
||||
type UpDownCounter interface {
|
||||
// Observe records the state of the instrument.
|
||||
// Observe records the state of the instrument to be x. The value of x is
|
||||
// assumed to be the exact UpDownCounter value to record.
|
||||
//
|
||||
// It is only valid to call this within a callback. If called outside of the
|
||||
// registered callback it should have no effect on the instrument, and an
|
||||
|
@ -59,7 +61,7 @@ type UpDownCounter interface {
|
|||
|
||||
// Gauge is an instrument that records independent readings.
|
||||
type Gauge interface {
|
||||
// Observe records the state of the instrument.
|
||||
// Observe records the state of the instrument to be x.
|
||||
//
|
||||
// It is only valid to call this within a callback. If called outside of the
|
||||
// registered callback it should have no effect on the instrument, and an
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue