vendor: make vendor-update

This commit is contained in:
Aliaksandr Valialkin 2023-03-12 01:49:12 -08:00
parent 3d2cfc4f33
commit 60fcf88e45
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
237 changed files with 19305 additions and 10300 deletions

80
go.mod
View file

@ -4,38 +4,38 @@ go 1.19
require (
cloud.google.com/go/storage v1.29.0
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0
github.com/VictoriaMetrics/fastcache v1.12.0
github.com/VictoriaMetrics/fastcache v1.12.1
// Do not use the original github.com/valyala/fasthttp because of issues
// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
github.com/VictoriaMetrics/fasthttp v1.2.0
github.com/VictoriaMetrics/metrics v1.23.1
github.com/VictoriaMetrics/metricsql v0.56.1
github.com/aws/aws-sdk-go-v2 v1.17.4
github.com/aws/aws-sdk-go-v2/config v1.18.13
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.53
github.com/aws/aws-sdk-go-v2/service/s3 v1.30.3
github.com/aws/aws-sdk-go-v2 v1.17.6
github.com/aws/aws-sdk-go-v2/config v1.18.16
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.56
github.com/aws/aws-sdk-go-v2/service/s3 v1.30.6
github.com/cespare/xxhash/v2 v2.2.0
github.com/cheggaaa/pb/v3 v3.1.0
github.com/cheggaaa/pb/v3 v3.1.2
github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4
github.com/googleapis/gax-go/v2 v2.7.0
github.com/googleapis/gax-go/v2 v2.7.1
github.com/influxdata/influxdb v1.11.0
github.com/klauspost/compress v1.15.15
github.com/klauspost/compress v1.16.0
github.com/prometheus/prometheus v0.42.0
github.com/urfave/cli/v2 v2.24.4
github.com/urfave/cli/v2 v2.25.0
github.com/valyala/fastjson v1.6.4
github.com/valyala/fastrand v1.1.0
github.com/valyala/fasttemplate v1.2.2
github.com/valyala/gozstd v1.18.0
github.com/valyala/histogram v1.2.0
github.com/valyala/quicktemplate v1.7.0
golang.org/x/net v0.7.0
golang.org/x/oauth2 v0.5.0
golang.org/x/sys v0.5.0
google.golang.org/api v0.110.0
golang.org/x/net v0.8.0
golang.org/x/oauth2 v0.6.0
golang.org/x/sys v0.6.0
google.golang.org/api v0.112.0
gopkg.in/yaml.v2 v2.4.0
)
@ -44,24 +44,24 @@ require (
cloud.google.com/go/compute v1.18.0 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/iam v0.12.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
github.com/aws/aws-sdk-go v1.44.204 // indirect
github.com/aws/aws-sdk-go v1.44.219 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.13.13 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.22 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.28 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.22 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.29 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.20 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.13.16 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.24 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.24 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.31 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.22 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.23 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.22 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.22 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.12.2 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.2 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.18.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.25 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.24 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.24 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.12.5 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.5 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.18.6 // indirect
github.com/aws/smithy-go v1.13.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
@ -74,7 +74,7 @@ require (
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect
@ -91,29 +91,29 @@ require (
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.14.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.39.0 // indirect
github.com/prometheus/common v0.42.0 // indirect
github.com/prometheus/common/sigv4 v0.1.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/rivo/uniseg v0.4.3 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/stretchr/testify v1.8.1 // indirect
github.com/stretchr/testify v1.8.2 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.39.0 // indirect
go.opentelemetry.io/otel v1.13.0 // indirect
go.opentelemetry.io/otel/metric v0.36.0 // indirect
go.opentelemetry.io/otel/trace v1.13.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0 // indirect
go.opentelemetry.io/otel v1.14.0 // indirect
go.opentelemetry.io/otel/metric v0.37.0 // indirect
go.opentelemetry.io/otel/trace v1.14.0 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/goleak v1.2.1 // indirect
golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb // indirect
golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/text v0.7.0 // indirect
golang.org/x/text v0.8.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44 // indirect
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
google.golang.org/grpc v1.53.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
google.golang.org/protobuf v1.29.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

176
go.sum
View file

@ -29,7 +29,7 @@ cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/iam v0.12.0 h1:DRtTY29b75ciH6Ov1PHb4/iat2CLCvrOm40Q0a6DFpE=
cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY=
cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs=
cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
@ -43,11 +43,11 @@ cloud.google.com/go/storage v1.29.0 h1:6weCgzRvMg7lzuUurI4697AqIRPU1SvzHhynwpW31
cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1 h1:gVXuXcWd1i4C2Ruxe321aU+IKGaStvGB/S90PUPB/W8=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.1/go.mod h1:DffdKW9RFqa5VgmsjUOsS7UE7eiA5iAvYUs63bhKQ0M=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 h1:rTnT/Jrcm+figWlYz4Ixzt0SJVR2cMC8lvZcimipiEY=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0/go.mod h1:ON4tFdPTwRcgWEaVDrN3584Ef+b7GgSJaXxe5fW9t4M=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 h1:+5VZ72z0Qan5Bog5C+ZkgSqUbeVUd9wgtHOrIKuc5b8=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 h1:leh5DwKv6Ihwi+h60uHtn6UWAxBbZ0q8DwQVMzf61zw=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 h1:u/LLAOFgsMv7HmNL4Qufg58y+qElGOt5qv0z1mURkRY=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
@ -62,8 +62,8 @@ github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 h1:BWe8a+f/t+7
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY=
github.com/VictoriaMetrics/fastcache v1.12.0 h1:vnVi/y9yKDcD9akmc4NqAoqgQhJrOwUF+j9LTgn4QDE=
github.com/VictoriaMetrics/fastcache v1.12.0/go.mod h1:tjiYeEfYXCqacuvYw/7UoDIeJaNxq6132xHICNP77w8=
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
github.com/VictoriaMetrics/fasthttp v1.2.0 h1:nd9Wng4DlNtaI27WlYh5mGXCJOmee/2c2blTJwfyU9I=
github.com/VictoriaMetrics/fasthttp v1.2.0/go.mod h1:zv5YSmasAoSyv8sBVexfArzFDIGGTN4TfCKAtAw7IfE=
github.com/VictoriaMetrics/metrics v1.18.1/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA=
@ -71,7 +71,6 @@ github.com/VictoriaMetrics/metrics v1.23.1 h1:/j8DzeJBxSpL2qSIdqnRFLvQQhbJyJbbEi
github.com/VictoriaMetrics/metrics v1.23.1/go.mod h1:rAr/llLpEnAdTehiNlUxKgnjcOuROSzpw0GvjpEbvFc=
github.com/VictoriaMetrics/metricsql v0.56.1 h1:j+W4fA/gtozsZb4PlKDU0Ma2VOgl88xla4FEf29w94g=
github.com/VictoriaMetrics/metricsql v0.56.1/go.mod h1:6pP1ZeLVJHqJrHlF6Ij3gmpQIznSsgktEcZgsAWYel0=
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@ -87,44 +86,44 @@ github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu
github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.44.204 h1:7/tPUXfNOHB390A63t6fJIwmlwVQAkAwcbzKsU2/6OQ=
github.com/aws/aws-sdk-go v1.44.204/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go-v2 v1.17.4 h1:wyC6p9Yfq6V2y98wfDsj6OnNQa4w2BLGCLIxzNhwOGY=
github.com/aws/aws-sdk-go-v2 v1.17.4/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
github.com/aws/aws-sdk-go v1.44.219 h1:YOFxTUQZvdRzgwb6XqLFRwNHxoUdKBuunITC7IFhvbc=
github.com/aws/aws-sdk-go v1.44.219/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go-v2 v1.17.6 h1:Y773UK7OBqhzi5VDXMi1zVGsoj+CVHs2eaC2bDsLwi0=
github.com/aws/aws-sdk-go-v2 v1.17.6/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno=
github.com/aws/aws-sdk-go-v2/config v1.18.13 h1:v0xlYqbO6/EVlM8tUn2QEOA7btQxcgidEq2JRDBPTho=
github.com/aws/aws-sdk-go-v2/config v1.18.13/go.mod h1:r39wGSZB7wPDW1i54JyQXUpc5KsWjh5z/3S5D9eCqDg=
github.com/aws/aws-sdk-go-v2/credentials v1.13.13 h1:zw1KAc1kl00NYd3ofVmFrb09qnYlSQMeh+fmlQRAihI=
github.com/aws/aws-sdk-go-v2/credentials v1.13.13/go.mod h1:DW9nbIIF9MrIja0cBQrUpeWYQMSlNmP8fevLUyF9W38=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.22 h1:3aMfcTmoXtTZnaT86QlVaYh+BRMbvrrmZwIQ5jWqCZQ=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.22/go.mod h1:YGSIJyQ6D6FjKMQh16hVFSIUD54L4F7zTGePqYMYYJU=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.53 h1:h1MmqGtYgkf49DhG2BSjGukpm8c+BJ9CL+bBbdFGzlk=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.53/go.mod h1:mlWLxwKZNeEwE+3Pko07lSr1NvHZwUtdzmo9AiGn7QU=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.28 h1:r+XwaCLpIvCKjBIYy/HVZujQS9tsz5ohHG3ZIe0wKoE=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.28/go.mod h1:3lwChorpIM/BhImY/hy+Z6jekmN92cXGPI1QJasVPYY=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.22 h1:7AwGYXDdqRQYsluvKFmWoqpcOQJ4bH634SkYf3FNj/A=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.22/go.mod h1:EqK7gVrIGAHyZItrD1D8B0ilgwMD1GiWAmbU4u/JHNk=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.29 h1:J4xhFd6zHhdF9jPP0FQJ6WknzBboGMBNjKOv4iTuw4A=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.29/go.mod h1:TwuqRBGzxjQJIwH16/fOZodwXt2Zxa9/cwJC5ke4j7s=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.20 h1:YIvKIfPXQVp0EhXUV644kmQo6cQPPSRmC44A1HSoJeg=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.20/go.mod h1:8W88sW3PjamQpKFUQvHWWKay6ARsNvZnzU7+a4apubw=
github.com/aws/aws-sdk-go-v2/config v1.18.16 h1:4r7gsCu8Ekwl5iJGE/GmspA2UifqySCCkyyyPFeWs3w=
github.com/aws/aws-sdk-go-v2/config v1.18.16/go.mod h1:XjM6lVbq7UgELp9NjXBrb1DQY/ownlWsvDhEQksemJc=
github.com/aws/aws-sdk-go-v2/credentials v1.13.16 h1:GgToSxaENX/1zXIGNFfiVk4hxryYJ5Vt4Mh8XLAL7Lc=
github.com/aws/aws-sdk-go-v2/credentials v1.13.16/go.mod h1:KP7aFJhfwPFgx9aoVYL2nYHjya5WBD98CWaadpgmnpY=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.24 h1:5qyqXASrX2zy5cTnoHHa4N2c3Lc94GH7gjnBP3GwKdU=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.24/go.mod h1:neYVaeKr5eT7BzwULuG2YbLhzWZ22lpjKdCybR7AXrQ=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.56 h1:kFDCPqqVvb9vYcW82L7xYfrBGpuxXQ/8A/zYVayRQK4=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.56/go.mod h1:FoSBuessadgy8Cqp9gQF8U5rzi1XVQhiEJ6su2/kBEE=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30 h1:y+8n9AGDjikyXoMBTRaHHHSaFEB8267ykmvyPodJfys=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30/go.mod h1:LUBAO3zNXQjoONBKn/kR1y0Q4cj/D02Ts0uHYjcCQLM=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.24 h1:r+Kv+SEJquhAZXaJ7G4u44cIwXV3f8K+N482NNAzJZA=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.24/go.mod h1:gAuCezX/gob6BSMbItsSlMb6WZGV7K2+fWOvk8xBSto=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.31 h1:hf+Vhp5WtTdcSdE+yEcUz8L73sAzN0R+0jQv+Z51/mI=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.31/go.mod h1:5zUjguZfG5qjhG9/wqmuyHRyUftl2B5Cp6NNxNC6kRA=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.22 h1:lTqBRUuy8oLhBsnnVZf14uRbIHPHCrGqg4Plc8gU/1U=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.22/go.mod h1:YsOa3tFriwWNvBPYHXM5ARiU2yqBNWPWeUiq+4i7Na0=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 h1:y2+VQzC6Zh2ojtV2LoC0MNwHWc6qXv/j2vrQtlftkdA=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11/go.mod h1:iV4q2hsqtNECrfmlXyord9u4zyuFEJX9eLgLpSPzWA8=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.23 h1:c5+bNdV8E4fIPteWx4HZSkqI07oY9exbfQ7JH7Yx4PI=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.23/go.mod h1:1jcUfF+FAOEwtIcNiHPaV4TSoZqkUIPzrohmD7fb95c=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.22 h1:LjFQf8hFuMO22HkV5VWGLBvmCLBCLPivUAmpdpnp4Vs=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.22/go.mod h1:xt0Au8yPIwYXf/GYPy/vl4K3CgwhfQMYbrH7DlUUIws=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.22 h1:ISLJ2BKXe4zzyZ7mp5ewKECiw0U7KpLgS3S6OxY9Cm0=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.22/go.mod h1:QFVbqK54XArazLvn2wvWMRBi/jGrWii46qbr5DyPGjc=
github.com/aws/aws-sdk-go-v2/service/s3 v1.30.3 h1:PVieHTwugdlHedlxLpYLQsOZAq736RScuEb/m4zhzc4=
github.com/aws/aws-sdk-go-v2/service/s3 v1.30.3/go.mod h1:XN3YcdmnWYZ3Hrnojvo5p2mc/wfF973nkq3ClXPDMHk=
github.com/aws/aws-sdk-go-v2/service/sso v1.12.2 h1:EN102fWY7hI5u/2FPheTrwwMHkSXfl49RYkeEnJsrCU=
github.com/aws/aws-sdk-go-v2/service/sso v1.12.2/go.mod h1:IgV8l3sj22nQDd5qcAGY0WenwCzCphqdbFOpfktZPrI=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.2 h1:f1lmlce7r13CX1BPyPqt9oh/H+uqOWc9367lDoGGwNQ=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.2/go.mod h1:O1YSOg3aekZibh2SngvCRRG+cRHKKlYgxf/JBF/Kr/k=
github.com/aws/aws-sdk-go-v2/service/sts v1.18.3 h1:s49mSnsBZEXjfGBkRfmK+nPqzT7Lt3+t2SmAKNyHblw=
github.com/aws/aws-sdk-go-v2/service/sts v1.18.3/go.mod h1:b+psTJn33Q4qGoDaM7ZiOVVG8uVjGI6HaZ8WBHdgDgU=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.25 h1:B/hO3jfWRm7hP00UeieNlI5O2xP5WJ27tyJG5lzc7AM=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.25/go.mod h1:54K1zgxK/lai3a4HosE4IKBwZsP/5YAJ6dzJfwsjJ0U=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.24 h1:c5qGfdbCHav6viBwiyDns3OXqhqAbGjfIB4uVu2ayhk=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.24/go.mod h1:HMA4FZG6fyib+NDo5bpIxX1EhYjrAOveZJY2YR0xrNE=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.24 h1:i4RH8DLv/BHY0fCrXYQDr+DGnWzaxB3Ee/esxUaSavk=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.24/go.mod h1:N8X45/o2cngvjCYi2ZnvI0P4mU4ZRJfEYC3maCSsPyw=
github.com/aws/aws-sdk-go-v2/service/s3 v1.30.6 h1:zzTm99krKsFcF4N7pu2z17yCcAZpQYZ7jnJZPIgEMXE=
github.com/aws/aws-sdk-go-v2/service/s3 v1.30.6/go.mod h1:PudwVKUTApfm0nYaPutOXaKdPKTlZYClGBQpVIRdcbs=
github.com/aws/aws-sdk-go-v2/service/sso v1.12.5 h1:bdKIX6SVF3nc3xJFw6Nf0igzS6Ff/louGq8Z6VP/3Hs=
github.com/aws/aws-sdk-go-v2/service/sso v1.12.5/go.mod h1:vuWiaDB30M/QTC+lI3Wj6S/zb7tpUK2MSYgy3Guh2L0=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.5 h1:xLPZMyuZ4GuqRCIec/zWuIhRFPXh2UOJdLXBSi64ZWQ=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.5/go.mod h1:QjxpHmCwAg0ESGtPQnLIVp7SedTOBMYy+Slr3IfMKeI=
github.com/aws/aws-sdk-go-v2/service/sts v1.18.6 h1:rIFn5J3yDoeuKCE9sESXqM5POTAhOP1du3bv/qTL+tE=
github.com/aws/aws-sdk-go-v2/service/sts v1.18.6/go.mod h1:48WJ9l3dwP0GSHWGc5sFGGlCkuA82Mc2xnw+T6Q8aDw=
github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8=
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@ -133,11 +132,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheggaaa/pb/v3 v3.1.0 h1:3uouEsl32RL7gTiQsuaXD4Bzbfl5tGztXGUvXbs4O04=
github.com/cheggaaa/pb/v3 v3.1.0/go.mod h1:YjrevcBqadFDaGQKRdmZxTY42pXEqda48Ea3lt0K/BE=
github.com/cheggaaa/pb/v3 v3.1.2 h1:FIxT3ZjOj9XJl0U4o2XbEhjFfZl7jCVCDOGq1ZAB7wQ=
github.com/cheggaaa/pb/v3 v3.1.2/go.mod h1:SNjnd0yKcW+kw0brSusraeDd5Bf1zBfxAzTL2ss3yQ4=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@ -165,7 +163,6 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY=
github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w=
github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg=
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
@ -228,8 +225,8 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
@ -271,8 +268,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9
github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ=
github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8=
github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A=
github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI=
github.com/gophercloud/gophercloud v1.1.1 h1:MuGyqbSxiuVBqkPZ3+Nhbytk1xZxhmfCB2Rg1cJWFWM=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww=
@ -316,8 +313,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw=
github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4=
github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4=
github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -330,14 +327,11 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/linode/linodego v1.12.0 h1:33mOIrZ+gVva14gyJMKPZ85mQGovAvZCEP1ftgmFBjA=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
@ -384,8 +378,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
@ -396,10 +390,9 @@ github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJf
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/prometheus/prometheus v0.42.0 h1:G769v8covTkOiNckXFIwLx01XE04OE6Fr0JPA0oR2nI=
github.com/prometheus/prometheus v0.42.0/go.mod h1:Pfqb/MLnnR2KK+0vchiaH39jXxvLMBk+3lnIGP4N7Vk=
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw=
github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
@ -419,10 +412,11 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/urfave/cli/v2 v2.24.4 h1:0gyJJEBYtCV87zI/x2nZCPyDxD51K6xM8SkwjHFCNEU=
github.com/urfave/cli/v2 v2.24.4/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/urfave/cli/v2 v2.25.0 h1:ykdZKuQey2zq0yin/l7JOm9Mh+pg72ngYMeB0ABn6q8=
github.com/urfave/cli/v2 v2.25.0/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
@ -454,14 +448,14 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.39.0 h1:vFEBG7SieZJzvnRWQ81jxpuEqe6J8Ex+hgc9CqOTzHc=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.39.0/go.mod h1:9rgTcOKdIhDOC0IcAu8a+R+FChqSUBihKpM1lVNi6T0=
go.opentelemetry.io/otel v1.13.0 h1:1ZAKnNQKwBBxFtww/GwxNUyTf0AxkZzrukO8MeXqe4Y=
go.opentelemetry.io/otel v1.13.0/go.mod h1:FH3RtdZCzRkJYFTCsAKDy9l/XYjMdNv6QrkFFB8DvVg=
go.opentelemetry.io/otel/metric v0.36.0 h1:t0lgGI+L68QWt3QtOIlqM9gXoxqxWLhZ3R/e5oOAY0Q=
go.opentelemetry.io/otel/metric v0.36.0/go.mod h1:wKVw57sd2HdSZAzyfOM9gTqqE8v7CbqWsYL6AyrH9qk=
go.opentelemetry.io/otel/trace v1.13.0 h1:CBgRZ6ntv+Amuj1jDsMhZtlAPT6gbyIRdaIzFhfBSdY=
go.opentelemetry.io/otel/trace v1.13.0/go.mod h1:muCvmmO9KKpvuXSf3KKAXXB2ygNYHQ+ZfI5X08d3tds=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0 h1:lE9EJyw3/JhrjWH/hEy9FptnalDQgj7vpbgC2KCCCxE=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0/go.mod h1:pcQ3MM3SWvrA71U4GDqv9UFDJ3HQsW7y5ZO3tDTlUdI=
go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM=
go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU=
go.opentelemetry.io/otel/metric v0.37.0 h1:pHDQuLQOZwYD+Km0eb657A25NaRzy0a+eLyKfDXedEs=
go.opentelemetry.io/otel/metric v0.37.0/go.mod h1:DmdaHfGt54iV6UKxsV9slj2bBRJcKC1B1uvDLIioc1s=
go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M=
go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8=
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
@ -485,8 +479,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb h1:PaBZQdo+iSDyHT053FjUCgZQ/9uqVwPOcl7KSWhKn6w=
golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0 h1:LGJsf5LRplCck6jUCH3dBL2dmycNruWNF5xugkSlfXw=
golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -508,7 +502,7 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -544,16 +538,16 @@ golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.5.0 h1:HuArIo48skDwlrvM3sEdHXElYslAMsf3KwRkkW4MC4s=
golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I=
golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw=
golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -584,7 +578,6 @@ golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -606,18 +599,17 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY=
golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -626,8 +618,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -676,7 +668,7 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.5.0 h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4=
golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -699,8 +691,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.110.0 h1:l+rh0KYUooe9JGbGVx71tbFo4SMbMTXK3I3ia2QSEeU=
google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI=
google.golang.org/api v0.112.0 h1:iDmzvZ4C086R3+en4nSyIf07HlQKMOX1Xx2dmia/+KQ=
google.golang.org/api v0.112.0/go.mod h1:737UfWHNsOq4F3REUTmb+GN9pugkgNLCayLTfoIKpPc=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -738,8 +730,8 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44 h1:EfLuoKW5WfkgVdDy7dTK8qSbH37AX5mj/MFh+bGPz14=
google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA=
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA=
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@ -767,8 +759,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.29.0 h1:44S3JjaKmLEE4YIkjzexaP+NzZsudE3Zin5Njn/pYX0=
google.golang.org/protobuf v1.29.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View file

@ -1,5 +1,23 @@
# Release History
## 1.4.0 (2023-03-02)
> This release doesn't include features added in v1.4.0-beta.1. They will return in v1.5.0-beta.1.
### Features Added
* Add `Clone()` method for `arm/policy.ClientOptions`.
### Bugs Fixed
* ARM's RP registration policy will no longer swallow unrecognized errors.
* Fixed an issue in `runtime.NewPollerFromResumeToken()` when resuming a `Poller` with a custom `PollingHandler`.
* Fixed wrong policy copy in `arm/runtime.NewPipeline()`.
## 1.4.0-beta.1 (2023-02-02)
### Features Added
* Added support for ARM cross-tenant authentication. Set the `AuxiliaryTenants` field of `arm.ClientOptions` to enable.
* Added `Claims` and `TenantID` fields to `policy.TokenRequestOptions`.
* ARM bearer token policy handles CAE challenges.
## 1.3.1 (2023-02-02)
### Other Changes

View file

@ -169,3 +169,14 @@ func (req *Request) Clone(ctx context.Context) *Request {
r2.req = req.req.Clone(ctx)
return &r2
}
// not exported but dependent on Request
// PolicyFunc is a type that implements the Policy interface.
// Use this type when implementing a stateless policy as a first-class function.
type PolicyFunc func(*Request) (*http.Response, error)
// Do implements the Policy interface on policyFunc.
func (pf PolicyFunc) Do(req *Request) (*http.Response, error) {
return pf(req)
}

View file

@ -21,6 +21,7 @@ const (
HeaderOperationLocation = "Operation-Location"
HeaderRetryAfter = "Retry-After"
HeaderUserAgent = "User-Agent"
HeaderXMSClientRequestID = "x-ms-client-request-id"
)
const BearerTokenPrefix = "Bearer "
@ -30,5 +31,5 @@ const (
Module = "azcore"
// Version is the semantic version (see http://semver.org) of this module.
Version = "v1.3.1"
Version = "v1.4.0"
)

View file

@ -7,8 +7,6 @@
package runtime
import (
"net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
)
@ -46,7 +44,7 @@ func NewPipeline(module, version string, plOpts PipelineOptions, options *policy
}
// we put the includeResponsePolicy at the very beginning so that the raw response
// is populated with the final response (some policies might mutate the response)
policies := []policy.Policy{policyFunc(includeResponsePolicy)}
policies := []policy.Policy{exported.PolicyFunc(includeResponsePolicy)}
if cp.APIVersion != "" {
policies = append(policies, newAPIVersionPolicy(cp.APIVersion, &plOpts.APIVersion))
}
@ -59,19 +57,10 @@ func NewPipeline(module, version string, plOpts PipelineOptions, options *policy
policies = append(policies, plOpts.PerRetry...)
policies = append(policies, cp.PerRetryPolicies...)
policies = append(policies, NewLogPolicy(&cp.Logging))
policies = append(policies, policyFunc(httpHeaderPolicy), policyFunc(bodyDownloadPolicy))
policies = append(policies, exported.PolicyFunc(httpHeaderPolicy), exported.PolicyFunc(bodyDownloadPolicy))
transport := cp.Transport
if transport == nil {
transport = defaultHTTPClient
}
return exported.NewPipeline(transport, policies...)
}
// policyFunc is a type that implements the Policy interface.
// Use this type when implementing a stateless policy as a first-class function.
type policyFunc func(*policy.Request) (*http.Response, error)
// Do implements the Policy interface on policyFunc.
func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) {
return pf(req)
}

View file

@ -11,6 +11,7 @@ import (
"fmt"
"io"
"net/http"
"net/url"
"sort"
"strings"
"time"
@ -66,12 +67,7 @@ func NewLogPolicy(o *policy.LogOptions) policy.Policy {
allowedHeaders[strings.ToLower(ah)] = struct{}{}
}
// now do the same thing for query params
allowedQP := map[string]struct{}{
"api-version": {},
}
for _, qp := range o.AllowedQueryParams {
allowedQP[strings.ToLower(qp)] = struct{}{}
}
allowedQP := getAllowedQueryParams(o.AllowedQueryParams)
return &logPolicy{
includeBody: o.IncludeBody,
allowedHeaders: allowedHeaders,
@ -79,6 +75,18 @@ func NewLogPolicy(o *policy.LogOptions) policy.Policy {
}
}
// getAllowedQueryParams merges the default set of allowed query parameters
// with a custom set (usually comes from client options).
func getAllowedQueryParams(customAllowedQP []string) map[string]struct{} {
allowedQP := map[string]struct{}{
"api-version": {},
}
for _, qp := range customAllowedQP {
allowedQP[strings.ToLower(qp)] = struct{}{}
}
return allowedQP
}
// logPolicyOpValues is the struct containing the per-operation values
type logPolicyOpValues struct {
try int32
@ -140,20 +148,24 @@ func (p *logPolicy) Do(req *policy.Request) (*http.Response, error) {
const redactedValue = "REDACTED"
// writeRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are
// not nil, then these are also written into the Buffer.
func (p *logPolicy) writeRequestWithResponse(b *bytes.Buffer, req *policy.Request, resp *http.Response, err error) {
// getSanitizedURL returns a sanitized string for the provided url.URL
func getSanitizedURL(u url.URL, allowedQueryParams map[string]struct{}) string {
// redact applicable query params
cpURL := *req.Raw().URL
qp := cpURL.Query()
qp := u.Query()
for k := range qp {
if _, ok := p.allowedQP[strings.ToLower(k)]; !ok {
if _, ok := allowedQueryParams[strings.ToLower(k)]; !ok {
qp.Set(k, redactedValue)
}
}
cpURL.RawQuery = qp.Encode()
u.RawQuery = qp.Encode()
return u.String()
}
// writeRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are
// not nil, then these are also written into the Buffer.
func (p *logPolicy) writeRequestWithResponse(b *bytes.Buffer, req *policy.Request, resp *http.Response, err error) {
// Write the request into the buffer.
fmt.Fprint(b, " "+req.Raw().Method+" "+cpURL.String()+"\n")
fmt.Fprint(b, " "+req.Raw().Method+" "+getSanitizedURL(*req.Raw().URL, p.allowedQP)+"\n")
p.writeHeader(b, req.Raw().Header)
if resp != nil {
fmt.Fprintln(b, " --------------------------------------------------------------------------------")

View file

@ -9,6 +9,7 @@ package runtime
import (
"net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
)
@ -21,13 +22,12 @@ func NewRequestIDPolicy() policy.Policy {
}
func (r *requestIDPolicy) Do(req *policy.Request) (*http.Response, error) {
const requestIdHeader = "x-ms-client-request-id"
if req.Raw().Header.Get(requestIdHeader) == "" {
if req.Raw().Header.Get(shared.HeaderXMSClientRequestID) == "" {
id, err := uuid.New()
if err != nil {
return nil, err
}
req.Raw().Header.Set(requestIdHeader, id.String())
req.Raw().Header.Set(shared.HeaderXMSClientRequestID, id.String())
}
return req.Next()

View file

@ -146,7 +146,9 @@ func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options
opr := options.Handler
// now rehydrate the poller based on the encoded poller type
if async.CanResume(asJSON) {
if opr != nil {
log.Writef(log.EventLRO, "Resuming custom poller %T.", opr)
} else if async.CanResume(asJSON) {
opr, _ = async.New[T](pl, nil, "")
} else if body.CanResume(asJSON) {
opr, _ = body.New[T](pl, nil)
@ -154,8 +156,6 @@ func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options
opr, _ = loc.New[T](pl, nil)
} else if op.CanResume(asJSON) {
opr, _ = op.New[T](pl, nil, "")
} else if opr != nil {
log.Writef(log.EventLRO, "Resuming custom poller %T.", opr)
} else {
return nil, fmt.Errorf("unhandled poller token %s", string(raw))
}

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,4 @@
## Code of Conduct
This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
opensource-codeofconduct@amazon.com with any additional questions or comments.

View file

@ -14,29 +14,28 @@ Jump To:
* [Feature Requests](#feature-requests)
* [Code Contributions](#code-contributions)
## How to contribute
*Before you send us a pull request, please be sure that:*
1. You're working from the latest source on the master branch.
2. You check existing open, and recently closed, pull requests to be sure
1. You're working from the latest source on the `main` branch.
2. You check existing open, and recently closed, pull requests to be sure
that someone else hasn't already addressed the problem.
3. You create an issue before working on a contribution that will take a
3. You create an issue before working on a contribution that will take a
significant amount of your time.
*Creating a Pull Request*
1. Fork the repository.
2. In your fork, make your change in a branch that's based on this repo's master branch.
2. In your fork, make your change in a branch that's based on this repo's `main` branch.
3. Commit the change to your fork, using a clear and descriptive commit message.
4. Create a pull request, answering any questions in the pull request form.
For contributions that will take a significant amount of time, open a new
issue to pitch your idea before you get started. Explain the problem and
describe the content you want to see added to the documentation. Let us know
if you'll write it yourself or if you'd like us to help. We'll discuss your
proposal with you and let you know whether we're likely to accept it.
For contributions that will take a significant amount of time, open a new
issue to pitch your idea before you get started. Explain the problem and
describe the content you want to see added to the documentation. Let us know
if you'll write it yourself or if you'd like us to help. We'll discuss your
proposal with you and let you know whether we're likely to accept it.
## Bug Reports
@ -74,9 +73,9 @@ guidelines prior to filing a bug report.
Open an [issue][issues] with the following:
* A short, descriptive title. Ideally, other community members should be able
* A short, descriptive title. Ideally, other community members should be able
to get a good idea of the feature just from reading the title.
* A detailed description of the the proposed feature.
* A detailed description of the the proposed feature.
* Why it should be added to the SDK.
* If possible, example code to illustrate how it should work.
* Use Markdown to make the request easier to read;
@ -97,7 +96,7 @@ Please be aware of the following notes prior to opening a pull request:
3. Wherever possible, pull requests should contain tests as appropriate.
Bugfixes should contain tests that exercise the corrected behavior (i.e., the
test should fail without the bugfix and pass with it), and new features
test should fail without the bugfix and pass with it), and new features
should be accompanied by tests exercising the feature.
4. Pull requests that contain failing tests will not be merged until the test
@ -112,7 +111,7 @@ Please be aware of the following notes prior to opening a pull request:
### Testing
To run the tests locally, running the `make unit` command will `go get` the
To run the tests locally, running the `make unit` command will `go get` the
SDK's testing dependencies, and run vet, link and unit tests for the SDK.
```
@ -129,7 +128,7 @@ go test -tags codegen ./private/...
See the `Makefile` for additional testing tags that can be used in testing.
To test on multiple platform the SDK includes several DockerFiles under the
To test on multiple platform the SDK includes several DockerFiles under the
`awstesting/sandbox` folder, and associated make recipes to to execute
unit testing within environments configured for specific Go versions.
@ -170,9 +169,9 @@ This will result in a patch version change.
* `SDK Bugs` - For minor changes that resolve an issue. This will result in a
patch version change.
[issues]: https://github.com/aws/aws-sdk-go/issues
[pr]: https://github.com/aws/aws-sdk-go/pulls
[issues]: https://github.com/aws/aws-sdk-go-v2/issues
[pr]: https://github.com/aws/aws-sdk-go-v2/pulls
[license]: http://aws.amazon.com/apache2.0/
[cla]: http://en.wikipedia.org/wiki/Contributor_License_Agreement
[releasenotes]: https://github.com/aws/aws-sdk-go/releases
[releasenotes]: https://github.com/aws/aws-sdk-go-v2/releases

View file

@ -12,4 +12,4 @@ Past Discussions
---
The issues listed here are for documentation purposes, and is used to capture issues and their associated discussions.
[Code of Conduct]: https://github.com/aws/aws-sdk-go-v2/blob/master/CODE_OF_CONDUCT.md
[Code of Conduct]: https://github.com/aws/aws-sdk-go-v2/blob/main/CODE_OF_CONDUCT.md

View file

@ -246,7 +246,6 @@ unit-race-modules-%:
"go test ${BUILD_TAGS} ${RUN_NONE} ./..." \
"go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./..."
unit-modules-%:
@# unit command that uses the pattern to define the root path that the
@# module testing will start from. Strips off the "unit-modules-" and
@ -408,7 +407,6 @@ bench-modules-%:
&& go run . -p $(subst _,/,$(subst bench-modules-,,$@)) ${EACHMODULE_FLAGS} \
"go test -timeout=10m -bench . --benchmem ${BUILD_TAGS} ${RUN_NONE} ./..."
#####################
# Release Process #
#####################
@ -499,14 +497,22 @@ list-deps-%:
###################
.PHONY: sandbox-tests sandbox-build-% sandbox-run-% sandbox-test-% update-aws-golang-tip
sandbox-tests: sandbox-test-go1.15 sandbox-test-go1.16 sandbox-test-go1.17 sandbox-test-gotip
sandbox-tests: sandbox-test-go1.15 sandbox-test-go1.16 sandbox-test-go1.17 sandbox-test-go1.18 sandbox-test-go1.19 sandbox-test-go1.20 sandbox-test-gotip
sandbox-build-%:
@# sandbox-build-go1.17
@# sandbox-build-gotip
docker build \
-f ./internal/awstesting/sandbox/Dockerfile.test.$(subst sandbox-build-,,$@) \
-t "aws-sdk-go-$(subst sandbox-build-,,$@)" .
@if [ $@ == sandbox-build-gotip ]; then\
docker build \
-f ./internal/awstesting/sandbox/Dockerfile.test.gotip \
-t "aws-sdk-go-$(subst sandbox-build-,,$@)" . ;\
else\
docker build \
--build-arg GO_VERSION=$(subst sandbox-build-go,,$@) \
-f ./internal/awstesting/sandbox/Dockerfile.test.goversion \
-t "aws-sdk-go-$(subst sandbox-build-,,$@)" . ;\
fi
sandbox-run-%: sandbox-build-%
@# sandbox-run-go1.17
@# sandbox-run-gotip

View file

@ -1,3 +1,3 @@
AWS SDK for Go
Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Copyright 2014-2015 Stripe, Inc.

View file

@ -1,7 +1,6 @@
# AWS SDK for Go v2
[![Go Build status](https://github.com/aws/aws-sdk-go-v2/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/aws/aws-sdk-go-v2/actions/workflows/go.yml)[![Codegen Build status](https://github.com/aws/aws-sdk-go-v2/actions/workflows/codegen.yml/badge.svg?branch=main)](https://github.com/aws/aws-sdk-go-v2/actions/workflows/codegen.yml) [![SDK Documentation](https://img.shields.io/badge/SDK-Documentation-blue)](https://aws.github.io/aws-sdk-go-v2/docs/) [![Migration Guide](https://img.shields.io/badge/Migration-Guide-blue)](https://aws.github.io/aws-sdk-go-v2/docs/migrating/) [![API Reference](https://img.shields.io/badge/api-reference-blue.svg)](https://pkg.go.dev/mod/github.com/aws/aws-sdk-go-v2) [![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt)
[![Go Build status](https://github.com/aws/aws-sdk-go-v2/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/aws/aws-sdk-go-v2/actions/workflows/go.yml)[![Codegen Build status](https://github.com/aws/aws-sdk-go-v2/actions/workflows/codegen.yml/badge.svg?branch=main)](https://github.com/aws/aws-sdk-go-v2/actions/workflows/codegen.yml) [![SDK Documentation](https://img.shields.io/badge/SDK-Documentation-blue)](https://aws.github.io/aws-sdk-go-v2/docs/) [![Migration Guide](https://img.shields.io/badge/Migration-Guide-blue)](https://aws.github.io/aws-sdk-go-v2/docs/migrating/) [![API Reference](https://img.shields.io/badge/api-reference-blue.svg)](https://pkg.go.dev/mod/github.com/aws/aws-sdk-go-v2) [![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/aws/aws-sdk-go-v2/blob/main/LICENSE.txt)
`aws-sdk-go-v2` is the v2 AWS SDK for the Go programming language.
@ -118,7 +117,7 @@ Keeping the list of open issues lean will help us respond in a timely manner.
## Feedback and contributing
The v2 SDK will use GitHub [Issues] to track feature requests and issues with the SDK. In addition, we'll use GitHub [Projects] to track large tasks spanning multiple pull requests, such as refactoring the SDK's internal request lifecycle. You can provide feedback to us in several ways.
The v2 SDK will use GitHub [Issues] to track feature requests and issues with the SDK. In addition, we'll use GitHub [Projects] to track large tasks spanning multiple pull requests, such as refactoring the SDK's internal request lifecycle. You can provide feedback to us in several ways.
**GitHub issues**. To provide feedback or report bugs, file GitHub [Issues] on the SDK. This is the preferred mechanism to give feedback so that other users can engage in the conversation, +1 issues, etc. Issues you open will be evaluated, and included in our roadmap for the GA launch.
@ -139,8 +138,8 @@ API operation require parameters.
[Service Documentation](https://aws.amazon.com/documentation/) - Use this
documentation to learn how to interface with AWS services. These guides are
great for getting started with a service, or when looking for more
information about a service. While this document is not required for coding,
great for getting started with a service, or when looking for more
information about a service. While this document is not required for coding,
services may supply helpful samples to look out for.
[Forum](https://forums.aws.amazon.com/forum.jspa?forumID=293) - Ask questions, get help, and give feedback
@ -151,7 +150,7 @@ services may supply helpful samples to look out for.
[Dep]: https://github.com/golang/dep
[Issues]: https://github.com/aws/aws-sdk-go-v2/issues
[Projects]: https://github.com/aws/aws-sdk-go-v2/projects
[CHANGELOG]: https://github.com/aws/aws-sdk-go-v2/blob/master/CHANGELOG.md
[CHANGELOG]: https://github.com/aws/aws-sdk-go-v2/blob/main/CHANGELOG.md
[Amazon DynamoDB]: https://aws.amazon.com/dynamodb/
[design]: https://github.com/aws/aws-sdk-go-v2/blob/master/DESIGN.md
[design]: https://github.com/aws/aws-sdk-go-v2/blob/main/DESIGN.md
[license]: http://aws.amazon.com/apache2.0/

View file

@ -3,4 +3,4 @@
package aws
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.17.4"
const goModuleVersion = "1.17.6"

View file

@ -21,26 +21,18 @@ func GetErrorResponseComponents(r io.Reader, noErrorWrapping bool) (ErrorCompone
if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF {
return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err)
}
return ErrorComponents{
Code: errResponse.Code,
Message: errResponse.Message,
RequestID: errResponse.RequestID,
}, nil
return ErrorComponents(errResponse), nil
}
var errResponse wrappedErrorResponse
if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF {
return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err)
}
return ErrorComponents{
Code: errResponse.Code,
Message: errResponse.Message,
RequestID: errResponse.RequestID,
}, nil
return ErrorComponents(errResponse), nil
}
// noWrappedErrorResponse represents the error response body with
// no internal <Error></Error wrapping
// no internal Error wrapping
type noWrappedErrorResponse struct {
Code string `xml:"Code"`
Message string `xml:"Message"`
@ -48,7 +40,7 @@ type noWrappedErrorResponse struct {
}
// wrappedErrorResponse represents the error response body
// wrapped within <Error>...</Error>
// wrapped within Error
type wrappedErrorResponse struct {
Code string `xml:"Error>Code"`
Message string `xml:"Error>Message"`

View file

@ -30,10 +30,6 @@ func NewTokenRateLimit(tokens uint) *TokenRateLimit {
}
}
func isTimeoutError(error) bool {
return false
}
type canceledError struct {
Err error
}

View file

@ -11,7 +11,6 @@ import (
awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/internal/sdk"
"github.com/aws/smithy-go/logging"
"github.com/aws/smithy-go/middleware"
smithymiddle "github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/transport/http"
)
@ -292,7 +291,7 @@ type retryMetadataKey struct{}
// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
// to clear all stack values.
func getRetryMetadata(ctx context.Context) (metadata retryMetadata, ok bool) {
metadata, ok = middleware.GetStackValue(ctx, retryMetadataKey{}).(retryMetadata)
metadata, ok = smithymiddle.GetStackValue(ctx, retryMetadataKey{}).(retryMetadata)
return metadata, ok
}
@ -301,7 +300,7 @@ func getRetryMetadata(ctx context.Context) (metadata retryMetadata, ok bool) {
// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
// to clear all stack values.
func setRetryMetadata(ctx context.Context, metadata retryMetadata) context.Context {
return middleware.WithStackValue(ctx, retryMetadataKey{}, metadata)
return smithymiddle.WithStackValue(ctx, retryMetadataKey{}, metadata)
}
// AddRetryMiddlewaresOptions is the set of options that can be passed to

View file

@ -1,3 +1,15 @@
# v1.18.16 (2023-03-10)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.15 (2023-02-22)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.14 (2023-02-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.13 (2023-02-15)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package config
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.18.13"
const goModuleVersion = "1.18.16"

View file

@ -1,3 +1,15 @@
# v1.13.16 (2023-03-10)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.15 (2023-02-22)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.14 (2023-02-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.13 (2023-02-15)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package credentials
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.13.13"
const goModuleVersion = "1.13.16"

View file

@ -1,3 +1,11 @@
# v1.12.24 (2023-03-10)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.23 (2023-02-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.22 (2023-02-03)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package imds
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.12.22"
const goModuleVersion = "1.12.24"

View file

@ -1,3 +1,15 @@
# v1.11.56 (2023-03-10)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.55 (2023-02-22)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.54 (2023-02-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.53 (2023-02-15)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package manager
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.11.53"
const goModuleVersion = "1.11.56"

View file

@ -70,7 +70,7 @@ func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTer
value = value.FieldByNameFunc(func(name string) bool {
if c == name {
return true
} else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) {
} else if !caseSensitive && strings.EqualFold(name, c) {
return true
}
return false

View file

@ -1,3 +1,11 @@
# v1.1.30 (2023-03-10)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.29 (2023-02-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.28 (2023-02-03)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package configsources
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.1.28"
const goModuleVersion = "1.1.30"

View file

@ -1,3 +1,11 @@
# v2.4.24 (2023-03-10)
* **Dependency Update**: Updated to the latest SDK module versions
# v2.4.23 (2023-02-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v2.4.22 (2023-02-03)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package endpoints
// goModuleVersion is the tagged release for this module
const goModuleVersion = "2.4.22"
const goModuleVersion = "2.4.24"

View file

@ -1,3 +1,11 @@
# v1.3.31 (2023-03-10)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.30 (2023-02-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.29 (2023-02-03)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package ini
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.3.29"
const goModuleVersion = "1.3.31"

View file

@ -1,3 +1,11 @@
# v1.0.22 (2023-03-10)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.0.21 (2023-02-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.0.20 (2023-02-14)
* No change notes available for this release.

View file

@ -3,4 +3,4 @@
package v4a
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.0.20"
const goModuleVersion = "1.0.22"

View file

@ -1,3 +1,11 @@
# v1.1.25 (2023-03-10)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.24 (2023-02-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.23 (2023-02-03)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package checksum
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.1.23"
const goModuleVersion = "1.1.25"

View file

@ -1,3 +1,11 @@
# v1.9.24 (2023-03-10)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.9.23 (2023-02-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.9.22 (2023-02-03)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package presignedurl
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.9.22"
const goModuleVersion = "1.9.24"

View file

@ -1,3 +1,11 @@
# v1.13.24 (2023-03-10)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.23 (2023-02-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.22 (2023-02-03)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package s3shared
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.13.22"
const goModuleVersion = "1.13.24"

View file

@ -1,3 +1,15 @@
# v1.30.6 (2023-03-10)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.30.5 (2023-02-22)
* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes.
# v1.30.4 (2023-02-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.30.3 (2023-02-14)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package s3
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.30.3"
const goModuleVersion = "1.30.6"

View file

@ -46,7 +46,6 @@ an request is serialized, and removes the serialized bucket name from request pa
Middleware layering:
Initialize : HTTP Request -> ARN Lookup -> Input-Validation -> Serialize step
Serialize : HTTP Request -> Process ARN -> operation serializer -> Update-Endpoint customization -> Remove-Bucket -> next middleware

View file

@ -27,7 +27,7 @@ func (e *BucketAlreadyExists) ErrorMessage() string {
return *e.Message
}
func (e *BucketAlreadyExists) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "BucketAlreadyExists"
}
return *e.ErrorCodeOverride
@ -57,7 +57,7 @@ func (e *BucketAlreadyOwnedByYou) ErrorMessage() string {
return *e.Message
}
func (e *BucketAlreadyOwnedByYou) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "BucketAlreadyOwnedByYou"
}
return *e.ErrorCodeOverride
@ -86,7 +86,7 @@ func (e *InvalidObjectState) ErrorMessage() string {
return *e.Message
}
func (e *InvalidObjectState) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "InvalidObjectState"
}
return *e.ErrorCodeOverride
@ -112,7 +112,7 @@ func (e *NoSuchBucket) ErrorMessage() string {
return *e.Message
}
func (e *NoSuchBucket) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "NoSuchBucket"
}
return *e.ErrorCodeOverride
@ -138,7 +138,7 @@ func (e *NoSuchKey) ErrorMessage() string {
return *e.Message
}
func (e *NoSuchKey) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "NoSuchKey"
}
return *e.ErrorCodeOverride
@ -164,7 +164,7 @@ func (e *NoSuchUpload) ErrorMessage() string {
return *e.Message
}
func (e *NoSuchUpload) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "NoSuchUpload"
}
return *e.ErrorCodeOverride
@ -190,7 +190,7 @@ func (e *NotFound) ErrorMessage() string {
return *e.Message
}
func (e *NotFound) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "NotFound"
}
return *e.ErrorCodeOverride
@ -216,7 +216,7 @@ func (e *ObjectAlreadyInActiveTierError) ErrorMessage() string {
return *e.Message
}
func (e *ObjectAlreadyInActiveTierError) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "ObjectAlreadyInActiveTierError"
}
return *e.ErrorCodeOverride
@ -243,7 +243,7 @@ func (e *ObjectNotInActiveTierError) ErrorMessage() string {
return *e.Message
}
func (e *ObjectNotInActiveTierError) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "ObjectNotInActiveTierError"
}
return *e.ErrorCodeOverride

View file

@ -1,3 +1,15 @@
# v1.12.5 (2023-03-10)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.4 (2023-02-22)
* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes.
# v1.12.3 (2023-02-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.2 (2023-02-15)
* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910.

View file

@ -3,4 +3,4 @@
package sso
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.12.2"
const goModuleVersion = "1.12.5"

View file

@ -27,7 +27,7 @@ func (e *InvalidRequestException) ErrorMessage() string {
return *e.Message
}
func (e *InvalidRequestException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "InvalidRequestException"
}
return *e.ErrorCodeOverride
@ -53,7 +53,7 @@ func (e *ResourceNotFoundException) ErrorMessage() string {
return *e.Message
}
func (e *ResourceNotFoundException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "ResourceNotFoundException"
}
return *e.ErrorCodeOverride
@ -80,7 +80,7 @@ func (e *TooManyRequestsException) ErrorMessage() string {
return *e.Message
}
func (e *TooManyRequestsException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "TooManyRequestsException"
}
return *e.ErrorCodeOverride
@ -107,7 +107,7 @@ func (e *UnauthorizedException) ErrorMessage() string {
return *e.Message
}
func (e *UnauthorizedException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "UnauthorizedException"
}
return *e.ErrorCodeOverride

View file

@ -1,3 +1,15 @@
# v1.14.5 (2023-03-10)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.14.4 (2023-02-22)
* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes.
# v1.14.3 (2023-02-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.14.2 (2023-02-15)
* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910.

View file

@ -3,4 +3,4 @@
package ssooidc
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.14.2"
const goModuleVersion = "1.14.5"

View file

@ -29,7 +29,7 @@ func (e *AccessDeniedException) ErrorMessage() string {
return *e.Message
}
func (e *AccessDeniedException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "AccessDeniedException"
}
return *e.ErrorCodeOverride
@ -59,7 +59,7 @@ func (e *AuthorizationPendingException) ErrorMessage() string {
return *e.Message
}
func (e *AuthorizationPendingException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "AuthorizationPendingException"
}
return *e.ErrorCodeOverride
@ -89,7 +89,7 @@ func (e *ExpiredTokenException) ErrorMessage() string {
return *e.Message
}
func (e *ExpiredTokenException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "ExpiredTokenException"
}
return *e.ErrorCodeOverride
@ -119,7 +119,7 @@ func (e *InternalServerException) ErrorMessage() string {
return *e.Message
}
func (e *InternalServerException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "InternalServerException"
}
return *e.ErrorCodeOverride
@ -150,7 +150,7 @@ func (e *InvalidClientException) ErrorMessage() string {
return *e.Message
}
func (e *InvalidClientException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "InvalidClientException"
}
return *e.ErrorCodeOverride
@ -180,7 +180,7 @@ func (e *InvalidClientMetadataException) ErrorMessage() string {
return *e.Message
}
func (e *InvalidClientMetadataException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "InvalidClientMetadataException"
}
return *e.ErrorCodeOverride
@ -210,7 +210,7 @@ func (e *InvalidGrantException) ErrorMessage() string {
return *e.Message
}
func (e *InvalidGrantException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "InvalidGrantException"
}
return *e.ErrorCodeOverride
@ -240,7 +240,7 @@ func (e *InvalidRequestException) ErrorMessage() string {
return *e.Message
}
func (e *InvalidRequestException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "InvalidRequestException"
}
return *e.ErrorCodeOverride
@ -269,7 +269,7 @@ func (e *InvalidScopeException) ErrorMessage() string {
return *e.Message
}
func (e *InvalidScopeException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "InvalidScopeException"
}
return *e.ErrorCodeOverride
@ -299,7 +299,7 @@ func (e *SlowDownException) ErrorMessage() string {
return *e.Message
}
func (e *SlowDownException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "SlowDownException"
}
return *e.ErrorCodeOverride
@ -329,7 +329,7 @@ func (e *UnauthorizedClientException) ErrorMessage() string {
return *e.Message
}
func (e *UnauthorizedClientException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "UnauthorizedClientException"
}
return *e.ErrorCodeOverride
@ -358,7 +358,7 @@ func (e *UnsupportedGrantTypeException) ErrorMessage() string {
return *e.Message
}
func (e *UnsupportedGrantTypeException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "UnsupportedGrantTypeException"
}
return *e.ErrorCodeOverride

View file

@ -1,3 +1,15 @@
# v1.18.6 (2023-03-10)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.5 (2023-02-22)
* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes.
# v1.18.4 (2023-02-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.3 (2023-02-03)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package sts
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.18.3"
const goModuleVersion = "1.18.6"

View file

@ -27,7 +27,7 @@ func (e *ExpiredTokenException) ErrorMessage() string {
return *e.Message
}
func (e *ExpiredTokenException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "ExpiredTokenException"
}
return *e.ErrorCodeOverride
@ -57,7 +57,7 @@ func (e *IDPCommunicationErrorException) ErrorMessage() string {
return *e.Message
}
func (e *IDPCommunicationErrorException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "IDPCommunicationError"
}
return *e.ErrorCodeOverride
@ -86,7 +86,7 @@ func (e *IDPRejectedClaimException) ErrorMessage() string {
return *e.Message
}
func (e *IDPRejectedClaimException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "IDPRejectedClaim"
}
return *e.ErrorCodeOverride
@ -114,7 +114,7 @@ func (e *InvalidAuthorizationMessageException) ErrorMessage() string {
return *e.Message
}
func (e *InvalidAuthorizationMessageException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "InvalidAuthorizationMessageException"
}
return *e.ErrorCodeOverride
@ -144,7 +144,7 @@ func (e *InvalidIdentityTokenException) ErrorMessage() string {
return *e.Message
}
func (e *InvalidIdentityTokenException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "InvalidIdentityToken"
}
return *e.ErrorCodeOverride
@ -171,7 +171,7 @@ func (e *MalformedPolicyDocumentException) ErrorMessage() string {
return *e.Message
}
func (e *MalformedPolicyDocumentException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "MalformedPolicyDocument"
}
return *e.ErrorCodeOverride
@ -208,7 +208,7 @@ func (e *PackedPolicyTooLargeException) ErrorMessage() string {
return *e.Message
}
func (e *PackedPolicyTooLargeException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "PackedPolicyTooLarge"
}
return *e.ErrorCodeOverride
@ -239,7 +239,7 @@ func (e *RegionDisabledException) ErrorMessage() string {
return *e.Message
}
func (e *RegionDisabledException) ErrorCode() string {
if e.ErrorCodeOverride == nil {
if e == nil || e.ErrorCodeOverride == nil {
return "RegionDisabledException"
}
return *e.ErrorCodeOverride

View file

@ -2961,6 +2961,15 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@ -5236,12 +5245,18 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-south-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@ -5287,6 +5302,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "me-central-1",
}: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@ -7796,12 +7814,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-2",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
endpointKey{
Region: "eu-south-2",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@ -13294,6 +13318,156 @@ var awsPartition = partition{
}: endpoint{},
},
},
"internetmonitor": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
DNSSuffix: "api.aws",
},
defaultKey{
Variant: fipsVariant,
}: endpoint{
Hostname: "{service}-fips.{region}.{dnsSuffix}",
DNSSuffix: "api.aws",
},
},
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
}: endpoint{
Hostname: "internetmonitor.af-south-1.api.aws",
},
endpointKey{
Region: "ap-east-1",
}: endpoint{
Hostname: "internetmonitor.ap-east-1.api.aws",
},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{
Hostname: "internetmonitor.ap-northeast-1.api.aws",
},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{
Hostname: "internetmonitor.ap-northeast-2.api.aws",
},
endpointKey{
Region: "ap-northeast-3",
}: endpoint{
Hostname: "internetmonitor.ap-northeast-3.api.aws",
},
endpointKey{
Region: "ap-south-1",
}: endpoint{
Hostname: "internetmonitor.ap-south-1.api.aws",
},
endpointKey{
Region: "ap-south-2",
}: endpoint{
Hostname: "internetmonitor.ap-south-2.api.aws",
},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{
Hostname: "internetmonitor.ap-southeast-1.api.aws",
},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{
Hostname: "internetmonitor.ap-southeast-2.api.aws",
},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{
Hostname: "internetmonitor.ap-southeast-3.api.aws",
},
endpointKey{
Region: "ap-southeast-4",
}: endpoint{
Hostname: "internetmonitor.ap-southeast-4.api.aws",
},
endpointKey{
Region: "ca-central-1",
}: endpoint{
Hostname: "internetmonitor.ca-central-1.api.aws",
},
endpointKey{
Region: "eu-central-1",
}: endpoint{
Hostname: "internetmonitor.eu-central-1.api.aws",
},
endpointKey{
Region: "eu-central-2",
}: endpoint{
Hostname: "internetmonitor.eu-central-2.api.aws",
},
endpointKey{
Region: "eu-north-1",
}: endpoint{
Hostname: "internetmonitor.eu-north-1.api.aws",
},
endpointKey{
Region: "eu-south-1",
}: endpoint{
Hostname: "internetmonitor.eu-south-1.api.aws",
},
endpointKey{
Region: "eu-south-2",
}: endpoint{
Hostname: "internetmonitor.eu-south-2.api.aws",
},
endpointKey{
Region: "eu-west-1",
}: endpoint{
Hostname: "internetmonitor.eu-west-1.api.aws",
},
endpointKey{
Region: "eu-west-2",
}: endpoint{
Hostname: "internetmonitor.eu-west-2.api.aws",
},
endpointKey{
Region: "eu-west-3",
}: endpoint{
Hostname: "internetmonitor.eu-west-3.api.aws",
},
endpointKey{
Region: "me-central-1",
}: endpoint{
Hostname: "internetmonitor.me-central-1.api.aws",
},
endpointKey{
Region: "me-south-1",
}: endpoint{
Hostname: "internetmonitor.me-south-1.api.aws",
},
endpointKey{
Region: "sa-east-1",
}: endpoint{
Hostname: "internetmonitor.sa-east-1.api.aws",
},
endpointKey{
Region: "us-east-1",
}: endpoint{
Hostname: "internetmonitor.us-east-1.api.aws",
},
endpointKey{
Region: "us-east-2",
}: endpoint{
Hostname: "internetmonitor.us-east-2.api.aws",
},
endpointKey{
Region: "us-west-1",
}: endpoint{
Hostname: "internetmonitor.us-west-1.api.aws",
},
endpointKey{
Region: "us-west-2",
}: endpoint{
Hostname: "internetmonitor.us-west-2.api.aws",
},
},
},
"iot": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@ -14190,6 +14364,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@ -14673,6 +14850,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@ -15270,6 +15450,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@ -18229,6 +18412,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "me-central-1",
}: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@ -19740,9 +19926,6 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "api",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@ -20667,12 +20850,18 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@ -20685,12 +20874,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-2",
}: endpoint{},
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-2",
}: endpoint{},
endpointKey{
Region: "us-west-1",
}: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
@ -29785,6 +29980,31 @@ var awscnPartition = partition{
},
},
},
"internetmonitor": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
DNSSuffix: "api.amazonwebservices.com.cn",
},
defaultKey{
Variant: fipsVariant,
}: endpoint{
Hostname: "{service}-fips.{region}.{dnsSuffix}",
DNSSuffix: "api.amazonwebservices.com.cn",
},
},
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-north-1",
}: endpoint{
Hostname: "internetmonitor.cn-north-1.api.amazonwebservices.com.cn",
},
endpointKey{
Region: "cn-northwest-1",
}: endpoint{
Hostname: "internetmonitor.cn-northwest-1.api.amazonwebservices.com.cn",
},
},
},
"iot": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@ -30159,6 +30379,16 @@ var awscnPartition = partition{
}: endpoint{},
},
},
"rolesanywhere": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
},
},
"route53": service{
PartitionEndpoint: "aws-cn-global",
IsRegionalized: boxedFalse,
@ -31528,6 +31758,21 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "clouddirectory.us-gov-west-1.amazonaws.com",
},
endpointKey{
Region: "us-gov-west-1-fips",
}: endpoint{
Hostname: "clouddirectory.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
Deprecated: boxedTrue,
},
},
},
"cloudformation": service{
@ -32751,6 +32996,16 @@ var awsusgovPartition = partition{
},
},
},
"emr-containers": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
},
},
"es": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -33426,6 +33681,31 @@ var awsusgovPartition = partition{
}: endpoint{},
},
},
"internetmonitor": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
DNSSuffix: "api.aws",
},
defaultKey{
Variant: fipsVariant,
}: endpoint{
Hostname: "{service}-fips.{region}.{dnsSuffix}",
DNSSuffix: "api.aws",
},
},
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{
Hostname: "internetmonitor.us-gov-east-1.api.aws",
},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{
Hostname: "internetmonitor.us-gov-west-1.api.aws",
},
},
},
"iot": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@ -33949,12 +34229,22 @@ var awsusgovPartition = partition{
"mediaconvert": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-gov-west-1",
Region: "fips-us-gov-west-1",
}: endpoint{
Hostname: "mediaconvert.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "mediaconvert.us-gov-west-1.amazonaws.com",
},
},
},
@ -34319,12 +34609,22 @@ var awsusgovPartition = partition{
"participant.connect": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-gov-west-1",
Region: "fips-us-gov-west-1",
}: endpoint{
Hostname: "participant.connect.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "participant.connect.us-gov-west-1.amazonaws.com",
},
},
},
@ -35174,21 +35474,45 @@ var awsusgovPartition = partition{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{
Protocols: []string{"https"},
},
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com",
Protocols: []string{"https"},
},
endpointKey{
Region: "us-gov-east-1-fips",
}: endpoint{
Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{
Protocols: []string{"https"},
},
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com",
Protocols: []string{"https"},
},
endpointKey{
Region: "us-gov-west-1-fips",
}: endpoint{
Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
Deprecated: boxedTrue,
},
},
},
@ -36990,6 +37314,9 @@ var awsisoPartition = partition{
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
endpointKey{
Region: "us-iso-west-1",
}: endpoint{},
},
},
"snowball": service{

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.44.204"
const SDKVersion = "1.44.219"

View file

@ -386,8 +386,14 @@ func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error
}
func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool {
if fd.Cardinality() == protoreflect.Repeated {
return false
}
if md := fd.Message(); md != nil {
return md.FullName() == "google.protobuf.Value" && fd.Cardinality() != protoreflect.Repeated
return md.FullName() == "google.protobuf.Value"
}
if ed := fd.Enum(); ed != nil {
return ed.FullName() == "google.protobuf.NullValue"
}
return false
}

View file

@ -1,3 +1,3 @@
{
"v2": "2.7.0"
"v2": "2.7.1"
}

View file

@ -1,5 +1,12 @@
# Changelog
## [2.7.1](https://github.com/googleapis/gax-go/compare/v2.7.0...v2.7.1) (2023-03-06)
### Bug Fixes
* **v2/apierror:** return Unknown GRPCStatus when err source is HTTP ([#260](https://github.com/googleapis/gax-go/issues/260)) ([043b734](https://github.com/googleapis/gax-go/commit/043b73437a240a91229207fb3ee52a9935a36f23)), refs [#254](https://github.com/googleapis/gax-go/issues/254)
## [2.7.0](https://github.com/googleapis/gax-go/compare/v2.6.0...v2.7.0) (2022-11-02)

View file

@ -39,6 +39,7 @@ import (
jsonerror "github.com/googleapis/gax-go/v2/apierror/internal/proto"
"google.golang.org/api/googleapi"
"google.golang.org/genproto/googleapis/rpc/errdetails"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
@ -197,12 +198,12 @@ func (a *APIError) Unwrap() error {
// Error returns a readable representation of the APIError.
func (a *APIError) Error() string {
var msg string
if a.status != nil {
msg = a.err.Error()
} else if a.httpErr != nil {
if a.httpErr != nil {
// Truncate the googleapi.Error message because it dumps the Details in
// an ugly way.
msg = fmt.Sprintf("googleapi: Error %d: %s", a.httpErr.Code, a.httpErr.Message)
} else if a.status != nil {
msg = a.err.Error()
}
return strings.TrimSpace(fmt.Sprintf("%s\n%s", msg, a.details))
}
@ -236,6 +237,9 @@ func (a *APIError) Metadata() map[string]string {
// setDetailsFromError parses a Status error or a googleapi.Error
// and sets status and details or httpErr and details, respectively.
// It returns false if neither Status nor googleapi.Error can be parsed.
// When err is a googleapi.Error, the status of the returned error will
// be set to an Unknown error, rather than nil, since a nil code is
// interpreted as OK in the gRPC status package.
func (a *APIError) setDetailsFromError(err error) bool {
st, isStatus := status.FromError(err)
var herr *googleapi.Error
@ -248,6 +252,7 @@ func (a *APIError) setDetailsFromError(err error) bool {
case isHTTPErr:
a.httpErr = herr
a.details = parseHTTPDetails(herr)
a.status = status.New(codes.Unknown, herr.Message)
default:
return false
}

View file

@ -30,4 +30,4 @@
package internal
// Version is the current tagged release of the library.
const Version = "2.7.0"
const Version = "2.7.1"

View file

@ -3,7 +3,7 @@
before:
hooks:
- ./gen.sh
- go install mvdan.cc/garble@v0.7.2
- go install mvdan.cc/garble@v0.9.3
builds:
-

View file

@ -16,6 +16,12 @@ This package provides various compression algorithms.
# changelog
* Jan 21st, 2023 (v1.15.15)
* deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
* zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745
* gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740
* Jan 3rd, 2023 (v1.15.14)
* flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718

View file

@ -61,7 +61,7 @@ func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
b, err := fse.Decompress(in[:iSize], s.fse)
s.fse.Out = nil
if err != nil {
return s, nil, err
return s, nil, fmt.Errorf("fse decompress returned: %w", err)
}
if len(b) > 255 {
return s, nil, errors.New("corrupt input: output table too large")

View file

@ -103,6 +103,28 @@ func hash(u, shift uint32) uint32 {
return (u * 0x1e35a7bd) >> shift
}
// EncodeBlockInto exposes encodeBlock but checks dst size.
func EncodeBlockInto(dst, src []byte) (d int) {
if MaxEncodedLen(len(src)) > len(dst) {
return 0
}
// encodeBlock breaks on too big blocks, so split.
for len(src) > 0 {
p := src
src = nil
if len(p) > maxBlockSize {
p, src = p[:maxBlockSize], p[maxBlockSize:]
}
if len(p) < minNonLiteralBlockSize {
d += emitLiteral(dst[d:], p)
} else {
d += encodeBlock(dst[d:], p)
}
}
return d
}
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.

View file

@ -20,11 +20,12 @@ This is important, so you don't have to worry about spending CPU cycles on alrea
* Concurrent stream compression
* Faster decompression, even for Snappy compatible content
* Concurrent Snappy/S2 stream decompression
* Ability to quickly skip forward in compressed stream
* Skip forward in compressed stream
* Random seeking with indexes
* Compatible with reading Snappy compressed content
* Smaller block size overhead on incompressible blocks
* Block concatenation
* Block Dictionary support
* Uncompressed stream mode
* Automatic stream size padding
* Snappy compatible block compression
@ -594,6 +595,123 @@ Best... 10737418240 -> 4210602774 [39.21%]; 42.96s, 254.4MB/s
Decompression speed should be around the same as using the 'better' compression mode.
## Dictionaries
*Note: S2 dictionary compression is currently at an early implementation stage, with no assembly for
neither encoding nor decoding. Performance improvements can be expected in the future.*
Adding dictionaries allow providing a custom dictionary that will serve as lookup in the beginning of blocks.
The same dictionary *must* be used for both encoding and decoding.
S2 does not keep track of whether the same dictionary is used,
and using the wrong dictionary will most often not result in an error when decompressing.
Blocks encoded *without* dictionaries can be decompressed seamlessly *with* a dictionary.
This means it is possible to switch from an encoding without dictionaries to an encoding with dictionaries
and treat the blocks similarly.
Similar to [zStandard dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression),
the same usage scenario applies to S2 dictionaries.
> Training works if there is some correlation in a family of small data samples. The more data-specific a dictionary is, the more efficient it is (there is no universal dictionary). Hence, deploying one dictionary per type of data will provide the greatest benefits. Dictionary gains are mostly effective in the first few KB. Then, the compression algorithm will gradually use previously decoded content to better compress the rest of the file.
S2 further limits the dictionary to only be enabled on the first 64KB of a block.
This will remove any negative (speed) impacts of the dictionaries on bigger blocks.
### Compression
Using the [github_users_sample_set](https://github.com/facebook/zstd/releases/download/v1.1.3/github_users_sample_set.tar.zst)
and a 64KB dictionary trained with zStandard the following sizes can be achieved.
| | Default | Better | Best |
|--------------------|------------------|------------------|-----------------------|
| Without Dictionary | 3362023 (44.92%) | 3083163 (41.19%) | 3057944 (40.86%) |
| With Dictionary | 921524 (12.31%) | 873154 (11.67%) | 785503 bytes (10.49%) |
So for highly repetitive content, this case provides an almost 3x reduction in size.
For less uniform data we will use the Go source code tree.
Compressing First 64KB of all `.go` files in `go/src`, Go 1.19.5, 8912 files, 51253563 bytes input:
| | Default | Better | Best |
|--------------------|-------------------|-------------------|-------------------|
| Without Dictionary | 22955767 (44.79%) | 20189613 (39.39% | 19482828 (38.01%) |
| With Dictionary | 19654568 (38.35%) | 16289357 (31.78%) | 15184589 (29.63%) |
| Saving/file | 362 bytes | 428 bytes | 472 bytes |
### Creating Dictionaries
There are no tools to create dictionaries in S2.
However, there are multiple ways to create a useful dictionary:
#### Using a Sample File
If your input is very uniform, you can just use a sample file as the dictionary.
For example in the `github_users_sample_set` above, the average compression only goes up from
10.49% to 11.48% by using the first file as dictionary compared to using a dedicated dictionary.
```Go
// Read a sample
sample, err := os.ReadFile("sample.json")
// Create a dictionary.
dict := s2.MakeDict(sample, nil)
// b := dict.Bytes() will provide a dictionary that can be saved
// and reloaded with s2.NewDict(b).
// To encode:
encoded := dict.Encode(nil, file)
// To decode:
decoded, err := dict.Decode(nil, file)
```
#### Using Zstandard
Zstandard dictionaries can easily be converted to S2 dictionaries.
This can be helpful to generate dictionaries for files that don't have a fixed structure.
Example, with training set files placed in `./training-set`:
`λ zstd -r --train-fastcover training-set/* --maxdict=65536 -o name.dict`
This will create a dictionary of 64KB, that can be converted to a dictionary like this:
```Go
// Decode the Zstandard dictionary.
insp, err := zstd.InspectDictionary(zdict)
if err != nil {
panic(err)
}
// We are only interested in the contents.
// Assume that files start with "// Copyright (c) 2023".
// Search for the longest match for that.
// This may save a few bytes.
dict := s2.MakeDict(insp.Content(), []byte("// Copyright (c) 2023"))
// b := dict.Bytes() will provide a dictionary that can be saved
// and reloaded with s2.NewDict(b).
// We can now encode using this dictionary
encodedWithDict := dict.Encode(nil, payload)
// To decode content:
decoded, err := dict.Decode(nil, encodedWithDict)
```
It is recommended to save the dictionary returned by ` b:= dict.Bytes()`, since that will contain only the S2 dictionary.
This dictionary can later be loaded using `s2.NewDict(b)`. The dictionary then no longer requires `zstd` to be initialized.
Also note how `s2.MakeDict` allows you to search for a common starting sequence of your files.
This can be omitted, at the expense of a few bytes.
# Snappy Compatibility
S2 now offers full compatibility with Snappy.
@ -929,6 +1047,72 @@ The first copy of a block cannot be a repeat offset and the offset is reset on e
Default streaming block size is 1MB.
# Dictionary Encoding
Adding dictionaries allow providing a custom dictionary that will serve as lookup in the beginning of blocks.
A dictionary provides an initial repeat value that can be used to point to a common header.
Other than that the dictionary contains values that can be used as back-references.
Often used data should be placed at the *end* of the dictionary since offsets < 2048 bytes will be smaller.
## Format
Dictionary *content* must at least 16 bytes and less or equal to 64KiB (65536 bytes).
Encoding: `[repeat value (uvarint)][dictionary content...]`
Before the dictionary content, an unsigned base-128 (uvarint) encoded value specifying the initial repeat offset.
This value is an offset into the dictionary content and not a back-reference offset,
so setting this to 0 will make the repeat value point to the first value of the dictionary.
The value must be less than the dictionary length-8
## Encoding
From the decoder point of view the dictionary content is seen as preceding the encoded content.
`[dictionary content][decoded output]`
Backreferences to the dictionary are encoded as ordinary backreferences that have an offset before the start of the decoded block.
Matches copying from the dictionary are **not** allowed to cross from the dictionary into the decoded data.
However, if a copy ends at the end of the dictionary the next repeat will point to the start of the decoded buffer, which is allowed.
The first match can be a repeat value, which will use the repeat offset stored in the dictionary.
When 64KB (65536 bytes) has been en/decoded it is no longer allowed to reference the dictionary,
neither by a copy nor repeat operations.
If the boundary is crossed while copying from the dictionary, the operation should complete,
but the next instruction is not allowed to reference the dictionary.
Valid blocks encoded *without* a dictionary can be decoded with any dictionary.
There are no checks whether the supplied dictionary is the correct for a block.
Because of this there is no overhead by using a dictionary.
## Example
This is the dictionary content. Elements are separated by `[]`.
Dictionary: `[0x0a][Yesterday 25 bananas were added to Benjamins brown bag]`.
Initial repeat offset is set at 10, which is the letter `2`.
Encoded `[LIT "10"][REPEAT len=10][LIT "hich"][MATCH off=50 len=6][MATCH off=31 len=6][MATCH off=61 len=10]`
Decoded: `[10][ bananas w][hich][ were ][brown ][were added]`
Output: `10 bananas which were brown were added`
## Streams
For streams each block can use the dictionary.
The dictionary cannot not currently be provided on the stream.
# LICENSE
This code is based on the [Snappy-Go](https://github.com/golang/snappy) implementation.

View file

@ -13,6 +13,7 @@ import (
"io/ioutil"
"math"
"runtime"
"strconv"
"sync"
)
@ -880,15 +881,20 @@ func (r *Reader) Skip(n int64) error {
// See Reader.ReadSeeker
type ReadSeeker struct {
*Reader
readAtMu sync.Mutex
}
// ReadSeeker will return an io.ReadSeeker compatible version of the reader.
// ReadSeeker will return an io.ReadSeeker and io.ReaderAt
// compatible version of the reader.
// If 'random' is specified the returned io.Seeker can be used for
// random seeking, otherwise only forward seeking is supported.
// Enabling random seeking requires the original input to support
// the io.Seeker interface.
// A custom index can be specified which will be used if supplied.
// When using a custom index, it will not be read from the input stream.
// The ReadAt position will affect regular reads and the current position of Seek.
// So using Read after ReadAt will continue from where the ReadAt stopped.
// No functions should be used concurrently.
// The returned ReadSeeker contains a shallow reference to the existing Reader,
// meaning changes performed to one is reflected in the other.
func (r *Reader) ReadSeeker(random bool, index []byte) (*ReadSeeker, error) {
@ -958,42 +964,55 @@ func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) {
// Reset on EOF
r.err = nil
}
if offset == 0 && whence == io.SeekCurrent {
return r.blockStart + int64(r.i), nil
}
if !r.readHeader {
// Make sure we read the header.
_, r.err = r.Read([]byte{})
}
rs, ok := r.r.(io.ReadSeeker)
if r.index == nil || !ok {
if whence == io.SeekCurrent && offset >= 0 {
err := r.Skip(offset)
return r.blockStart + int64(r.i), err
}
if whence == io.SeekStart && offset >= r.blockStart+int64(r.i) {
err := r.Skip(offset - r.blockStart - int64(r.i))
return r.blockStart + int64(r.i), err
}
return 0, ErrUnsupported
}
// Calculate absolute offset.
absOffset := offset
switch whence {
case io.SeekStart:
case io.SeekCurrent:
offset += r.blockStart + int64(r.i)
absOffset = r.blockStart + int64(r.i) + offset
case io.SeekEnd:
if offset > 0 {
return 0, errors.New("seek after end of file")
if r.index == nil {
return 0, ErrUnsupported
}
offset = r.index.TotalUncompressed + offset
absOffset = r.index.TotalUncompressed + offset
default:
r.err = ErrUnsupported
return 0, r.err
}
if offset < 0 {
if absOffset < 0 {
return 0, errors.New("seek before start of file")
}
c, u, err := r.index.Find(offset)
if !r.readHeader {
// Make sure we read the header.
_, r.err = r.Read([]byte{})
if r.err != nil {
return 0, r.err
}
}
// If we are inside current block no need to seek.
// This includes no offset changes.
if absOffset >= r.blockStart && absOffset < r.blockStart+int64(r.j) {
r.i = int(absOffset - r.blockStart)
return r.blockStart + int64(r.i), nil
}
rs, ok := r.r.(io.ReadSeeker)
if r.index == nil || !ok {
currOffset := r.blockStart + int64(r.i)
if absOffset >= currOffset {
err := r.Skip(absOffset - currOffset)
return r.blockStart + int64(r.i), err
}
return 0, ErrUnsupported
}
// We can seek and we have an index.
c, u, err := r.index.Find(absOffset)
if err != nil {
return r.blockStart + int64(r.i), err
}
@ -1004,12 +1023,57 @@ func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) {
return 0, err
}
r.i = r.j // Remove rest of current block.
if u < offset {
r.i = r.j // Remove rest of current block.
r.blockStart = u - int64(r.j) // Adjust current block start for accounting.
if u < absOffset {
// Forward inside block
return offset, r.Skip(offset - u)
return absOffset, r.Skip(absOffset - u)
}
return offset, nil
if u > absOffset {
return 0, fmt.Errorf("s2 seek: (internal error) u (%d) > absOffset (%d)", u, absOffset)
}
return absOffset, nil
}
// ReadAt reads len(p) bytes into p starting at offset off in the
// underlying input source. It returns the number of bytes
// read (0 <= n <= len(p)) and any error encountered.
//
// When ReadAt returns n < len(p), it returns a non-nil error
// explaining why more bytes were not returned. In this respect,
// ReadAt is stricter than Read.
//
// Even if ReadAt returns n < len(p), it may use all of p as scratch
// space during the call. If some data is available but not len(p) bytes,
// ReadAt blocks until either all the data is available or an error occurs.
// In this respect ReadAt is different from Read.
//
// If the n = len(p) bytes returned by ReadAt are at the end of the
// input source, ReadAt may return either err == EOF or err == nil.
//
// If ReadAt is reading from an input source with a seek offset,
// ReadAt should not affect nor be affected by the underlying
// seek offset.
//
// Clients of ReadAt can execute parallel ReadAt calls on the
// same input source. This is however not recommended.
func (r *ReadSeeker) ReadAt(p []byte, offset int64) (int, error) {
r.readAtMu.Lock()
defer r.readAtMu.Unlock()
_, err := r.Seek(offset, io.SeekStart)
if err != nil {
return 0, err
}
n := 0
for n < len(p) {
n2, err := r.Read(p[n:])
if err != nil {
// This will include io.EOF
return n + n2, err
}
n += n2
}
return n, nil
}
// ReadByte satisfies the io.ByteReader interface.
@ -1048,3 +1112,370 @@ func (r *Reader) SkippableCB(id uint8, fn func(r io.Reader) error) error {
r.skippableCB[id] = fn
return nil
}
// s2DecodeDict writes the decoding of src to dst. It assumes that the varint-encoded
// length of the decompressed bytes has already been read, and that len(dst)
// equals that length.
//
// It returns 0 on success or a decodeErrCodeXxx error code on failure.
func s2DecodeDict(dst, src []byte, dict *Dict) int {
if dict == nil {
return s2Decode(dst, src)
}
const debug = false
const debugErrs = debug
if debug {
fmt.Println("Starting decode, dst len:", len(dst))
}
var d, s, length int
offset := len(dict.dict) - dict.repeat
// As long as we can read at least 5 bytes...
for s < len(src)-5 {
// Removing bounds checks is SLOWER, when if doing
// in := src[s:s+5]
// Checked on Go 1.18
switch src[s] & 0x03 {
case tagLiteral:
x := uint32(src[s] >> 2)
switch {
case x < 60:
s++
case x == 60:
s += 2
x = uint32(src[s-1])
case x == 61:
in := src[s : s+3]
x = uint32(in[1]) | uint32(in[2])<<8
s += 3
case x == 62:
in := src[s : s+4]
// Load as 32 bit and shift down.
x = uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
x >>= 8
s += 4
case x == 63:
in := src[s : s+5]
x = uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24
s += 5
}
length = int(x) + 1
if debug {
fmt.Println("literals, length:", length, "d-after:", d+length)
}
if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
if debugErrs {
fmt.Println("corrupt literal: length:", length, "d-left:", len(dst)-d, "src-left:", len(src)-s)
}
return decodeErrCodeCorrupt
}
copy(dst[d:], src[s:s+length])
d += length
s += length
continue
case tagCopy1:
s += 2
toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
length = int(src[s-2]) >> 2 & 0x7
if toffset == 0 {
if debug {
fmt.Print("(repeat) ")
}
// keep last offset
switch length {
case 5:
length = int(src[s]) + 4
s += 1
case 6:
in := src[s : s+2]
length = int(uint32(in[0])|(uint32(in[1])<<8)) + (1 << 8)
s += 2
case 7:
in := src[s : s+3]
length = int((uint32(in[2])<<16)|(uint32(in[1])<<8)|uint32(in[0])) + (1 << 16)
s += 3
default: // 0-> 4
}
} else {
offset = toffset
}
length += 4
case tagCopy2:
in := src[s : s+3]
offset = int(uint32(in[1]) | uint32(in[2])<<8)
length = 1 + int(in[0])>>2
s += 3
case tagCopy4:
in := src[s : s+5]
offset = int(uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24)
length = 1 + int(in[0])>>2
s += 5
}
if offset <= 0 || length > len(dst)-d {
if debugErrs {
fmt.Println("match error; offset:", offset, "length:", length, "dst-left:", len(dst)-d)
}
return decodeErrCodeCorrupt
}
// copy from dict
if d < offset {
if d > MaxDictSrcOffset {
if debugErrs {
fmt.Println("dict after", MaxDictSrcOffset, "d:", d, "offset:", offset, "length:", length)
}
return decodeErrCodeCorrupt
}
startOff := len(dict.dict) - offset + d
if startOff < 0 || startOff+length > len(dict.dict) {
if debugErrs {
fmt.Printf("offset (%d) + length (%d) bigger than dict (%d)\n", offset, length, len(dict.dict))
}
return decodeErrCodeCorrupt
}
if debug {
fmt.Println("dict copy, length:", length, "offset:", offset, "d-after:", d+length, "dict start offset:", startOff)
}
copy(dst[d:d+length], dict.dict[startOff:])
d += length
continue
}
if debug {
fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length)
}
// Copy from an earlier sub-slice of dst to a later sub-slice.
// If no overlap, use the built-in copy:
if offset > length {
copy(dst[d:d+length], dst[d-offset:])
d += length
continue
}
// Unlike the built-in copy function, this byte-by-byte copy always runs
// forwards, even if the slices overlap. Conceptually, this is:
//
// d += forwardCopy(dst[d:d+length], dst[d-offset:])
//
// We align the slices into a and b and show the compiler they are the same size.
// This allows the loop to run without bounds checks.
a := dst[d : d+length]
b := dst[d-offset:]
b = b[:len(a)]
for i := range a {
a[i] = b[i]
}
d += length
}
// Remaining with extra checks...
for s < len(src) {
switch src[s] & 0x03 {
case tagLiteral:
x := uint32(src[s] >> 2)
switch {
case x < 60:
s++
case x == 60:
s += 2
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
if debugErrs {
fmt.Println("src went oob")
}
return decodeErrCodeCorrupt
}
x = uint32(src[s-1])
case x == 61:
s += 3
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
if debugErrs {
fmt.Println("src went oob")
}
return decodeErrCodeCorrupt
}
x = uint32(src[s-2]) | uint32(src[s-1])<<8
case x == 62:
s += 4
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
if debugErrs {
fmt.Println("src went oob")
}
return decodeErrCodeCorrupt
}
x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
case x == 63:
s += 5
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
if debugErrs {
fmt.Println("src went oob")
}
return decodeErrCodeCorrupt
}
x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
}
length = int(x) + 1
if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
if debugErrs {
fmt.Println("corrupt literal: length:", length, "d-left:", len(dst)-d, "src-left:", len(src)-s)
}
return decodeErrCodeCorrupt
}
if debug {
fmt.Println("literals, length:", length, "d-after:", d+length)
}
copy(dst[d:], src[s:s+length])
d += length
s += length
continue
case tagCopy1:
s += 2
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
if debugErrs {
fmt.Println("src went oob")
}
return decodeErrCodeCorrupt
}
length = int(src[s-2]) >> 2 & 0x7
toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
if toffset == 0 {
if debug {
fmt.Print("(repeat) ")
}
// keep last offset
switch length {
case 5:
s += 1
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
if debugErrs {
fmt.Println("src went oob")
}
return decodeErrCodeCorrupt
}
length = int(uint32(src[s-1])) + 4
case 6:
s += 2
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
if debugErrs {
fmt.Println("src went oob")
}
return decodeErrCodeCorrupt
}
length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8)
case 7:
s += 3
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
if debugErrs {
fmt.Println("src went oob")
}
return decodeErrCodeCorrupt
}
length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16)
default: // 0-> 4
}
} else {
offset = toffset
}
length += 4
case tagCopy2:
s += 3
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
if debugErrs {
fmt.Println("src went oob")
}
return decodeErrCodeCorrupt
}
length = 1 + int(src[s-3])>>2
offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
case tagCopy4:
s += 5
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
if debugErrs {
fmt.Println("src went oob")
}
return decodeErrCodeCorrupt
}
length = 1 + int(src[s-5])>>2
offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
}
if offset <= 0 || length > len(dst)-d {
if debugErrs {
fmt.Println("match error; offset:", offset, "length:", length, "dst-left:", len(dst)-d)
}
return decodeErrCodeCorrupt
}
// copy from dict
if d < offset {
if d > MaxDictSrcOffset {
if debugErrs {
fmt.Println("dict after", MaxDictSrcOffset, "d:", d, "offset:", offset, "length:", length)
}
return decodeErrCodeCorrupt
}
rOff := len(dict.dict) - (offset - d)
if debug {
fmt.Println("starting dict entry from dict offset", len(dict.dict)-rOff)
}
if rOff+length > len(dict.dict) {
if debugErrs {
fmt.Println("err: END offset", rOff+length, "bigger than dict", len(dict.dict), "dict offset:", rOff, "length:", length)
}
return decodeErrCodeCorrupt
}
if rOff < 0 {
if debugErrs {
fmt.Println("err: START offset", rOff, "less than 0", len(dict.dict), "dict offset:", rOff, "length:", length)
}
return decodeErrCodeCorrupt
}
copy(dst[d:d+length], dict.dict[rOff:])
d += length
continue
}
if debug {
fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length)
}
// Copy from an earlier sub-slice of dst to a later sub-slice.
// If no overlap, use the built-in copy:
if offset > length {
copy(dst[d:d+length], dst[d-offset:])
d += length
continue
}
// Unlike the built-in copy function, this byte-by-byte copy always runs
// forwards, even if the slices overlap. Conceptually, this is:
//
// d += forwardCopy(dst[d:d+length], dst[d-offset:])
//
// We align the slices into a and b and show the compiler they are the same size.
// This allows the loop to run without bounds checks.
a := dst[d : d+length]
b := dst[d-offset:]
b = b[:len(a)]
for i := range a {
a[i] = b[i]
}
d += length
}
if d != len(dst) {
if debugErrs {
fmt.Println("wanted length", len(dst), "got", d)
}
return decodeErrCodeCorrupt
}
return 0
}

View file

@ -57,6 +57,9 @@ func s2Decode(dst, src []byte) int {
}
length = int(x) + 1
if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
if debug {
fmt.Println("corrupt: lit size", length)
}
return decodeErrCodeCorrupt
}
if debug {
@ -109,6 +112,10 @@ func s2Decode(dst, src []byte) int {
}
if offset <= 0 || d < offset || length > len(dst)-d {
if debug {
fmt.Println("corrupt: match, length", length, "offset:", offset, "dst avail:", len(dst)-d, "dst pos:", d)
}
return decodeErrCodeCorrupt
}
@ -175,6 +182,9 @@ func s2Decode(dst, src []byte) int {
}
length = int(x) + 1
if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
if debug {
fmt.Println("corrupt: lit size", length)
}
return decodeErrCodeCorrupt
}
if debug {
@ -241,6 +251,9 @@ func s2Decode(dst, src []byte) int {
}
if offset <= 0 || d < offset || length > len(dst)-d {
if debug {
fmt.Println("corrupt: match, length", length, "offset:", offset, "dst avail:", len(dst)-d, "dst pos:", d)
}
return decodeErrCodeCorrupt
}

331
vendor/github.com/klauspost/compress/s2/dict.go generated vendored Normal file
View file

@ -0,0 +1,331 @@
// Copyright (c) 2022+ Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package s2
import (
"bytes"
"encoding/binary"
"sync"
)
const (
// MinDictSize is the minimum dictionary size when repeat has been read.
MinDictSize = 16
// MaxDictSize is the maximum dictionary size when repeat has been read.
MaxDictSize = 65536
// MaxDictSrcOffset is the maximum offset where a dictionary entry can start.
MaxDictSrcOffset = 65535
)
// Dict contains a dictionary that can be used for encoding and decoding s2
type Dict struct {
dict []byte
repeat int // Repeat as index of dict
fast, better, best sync.Once
fastTable *[1 << 14]uint16
betterTableShort *[1 << 14]uint16
betterTableLong *[1 << 17]uint16
bestTableShort *[1 << 16]uint32
bestTableLong *[1 << 19]uint32
}
// NewDict will read a dictionary.
// It will return nil if the dictionary is invalid.
func NewDict(dict []byte) *Dict {
if len(dict) == 0 {
return nil
}
var d Dict
// Repeat is the first value of the dict
r, n := binary.Uvarint(dict)
if n <= 0 {
return nil
}
dict = dict[n:]
d.dict = dict
if cap(d.dict) < len(d.dict)+16 {
d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...)
}
if len(dict) < MinDictSize || len(dict) > MaxDictSize {
return nil
}
d.repeat = int(r)
if d.repeat > len(dict) {
return nil
}
return &d
}
// Bytes will return a serialized version of the dictionary.
// The output can be sent to NewDict.
func (d *Dict) Bytes() []byte {
dst := make([]byte, binary.MaxVarintLen16+len(d.dict))
return append(dst[:binary.PutUvarint(dst, uint64(d.repeat))], d.dict...)
}
// MakeDict will create a dictionary.
// 'data' must be at least MinDictSize.
// If data is longer than MaxDictSize only the last MaxDictSize bytes will be used.
// If searchStart is set the start repeat value will be set to the last
// match of this content.
// If no matches are found, it will attempt to find shorter matches.
// This content should match the typical start of a block.
// If at least 4 bytes cannot be matched, repeat is set to start of block.
func MakeDict(data []byte, searchStart []byte) *Dict {
if len(data) == 0 {
return nil
}
if len(data) > MaxDictSize {
data = data[len(data)-MaxDictSize:]
}
var d Dict
dict := data
d.dict = dict
if cap(d.dict) < len(d.dict)+16 {
d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...)
}
if len(dict) < MinDictSize {
return nil
}
// Find the longest match possible, last entry if multiple.
for s := len(searchStart); s > 4; s-- {
if idx := bytes.LastIndex(data, searchStart[:s]); idx >= 0 && idx <= len(data)-8 {
d.repeat = idx
break
}
}
return &d
}
// Encode returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
//
// The blocks will require the same amount of memory to decode as encoding,
// and does not make for concurrent decoding.
// Also note that blocks do not contain CRC information, so corruption may be undetected.
//
// If you need to encode larger amounts of data, consider using
// the streaming interface which gives all of these features.
func (d *Dict) Encode(dst, src []byte) []byte {
if n := MaxEncodedLen(len(src)); n < 0 {
panic(ErrTooLarge)
} else if cap(dst) < n {
dst = make([]byte, n)
} else {
dst = dst[:n]
}
// The block starts with the varint-encoded length of the decompressed bytes.
dstP := binary.PutUvarint(dst, uint64(len(src)))
if len(src) == 0 {
return dst[:dstP]
}
if len(src) < minNonLiteralBlockSize {
dstP += emitLiteral(dst[dstP:], src)
return dst[:dstP]
}
n := encodeBlockDictGo(dst[dstP:], src, d)
if n > 0 {
dstP += n
return dst[:dstP]
}
// Not compressible
dstP += emitLiteral(dst[dstP:], src)
return dst[:dstP]
}
// EncodeBetter returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
//
// EncodeBetter compresses better than Encode but typically with a
// 10-40% speed decrease on both compression and decompression.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
//
// The blocks will require the same amount of memory to decode as encoding,
// and does not make for concurrent decoding.
// Also note that blocks do not contain CRC information, so corruption may be undetected.
//
// If you need to encode larger amounts of data, consider using
// the streaming interface which gives all of these features.
func (d *Dict) EncodeBetter(dst, src []byte) []byte {
if n := MaxEncodedLen(len(src)); n < 0 {
panic(ErrTooLarge)
} else if len(dst) < n {
dst = make([]byte, n)
}
// The block starts with the varint-encoded length of the decompressed bytes.
dstP := binary.PutUvarint(dst, uint64(len(src)))
if len(src) == 0 {
return dst[:dstP]
}
if len(src) < minNonLiteralBlockSize {
dstP += emitLiteral(dst[dstP:], src)
return dst[:dstP]
}
n := encodeBlockBetterDict(dst[dstP:], src, d)
if n > 0 {
dstP += n
return dst[:dstP]
}
// Not compressible
dstP += emitLiteral(dst[dstP:], src)
return dst[:dstP]
}
// EncodeBest returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
//
// EncodeBest compresses as good as reasonably possible but with a
// big speed decrease.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
//
// The blocks will require the same amount of memory to decode as encoding,
// and does not make for concurrent decoding.
// Also note that blocks do not contain CRC information, so corruption may be undetected.
//
// If you need to encode larger amounts of data, consider using
// the streaming interface which gives all of these features.
func (d *Dict) EncodeBest(dst, src []byte) []byte {
if n := MaxEncodedLen(len(src)); n < 0 {
panic(ErrTooLarge)
} else if len(dst) < n {
dst = make([]byte, n)
}
// The block starts with the varint-encoded length of the decompressed bytes.
dstP := binary.PutUvarint(dst, uint64(len(src)))
if len(src) == 0 {
return dst[:dstP]
}
if len(src) < minNonLiteralBlockSize {
dstP += emitLiteral(dst[dstP:], src)
return dst[:dstP]
}
n := encodeBlockBest(dst[dstP:], src, d)
if n > 0 {
dstP += n
return dst[:dstP]
}
// Not compressible
dstP += emitLiteral(dst[dstP:], src)
return dst[:dstP]
}
// Decode returns the decoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire decoded block.
// Otherwise, a newly allocated slice will be returned.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
func (d *Dict) Decode(dst, src []byte) ([]byte, error) {
dLen, s, err := decodedLen(src)
if err != nil {
return nil, err
}
if dLen <= cap(dst) {
dst = dst[:dLen]
} else {
dst = make([]byte, dLen)
}
if s2DecodeDict(dst, src[s:], d) != 0 {
return nil, ErrCorrupt
}
return dst, nil
}
func (d *Dict) initFast() {
d.fast.Do(func() {
const (
tableBits = 14
maxTableSize = 1 << tableBits
)
var table [maxTableSize]uint16
// We stop so any entry of length 8 can always be read.
for i := 0; i < len(d.dict)-8-2; i += 3 {
x0 := load64(d.dict, i)
h0 := hash6(x0, tableBits)
h1 := hash6(x0>>8, tableBits)
h2 := hash6(x0>>16, tableBits)
table[h0] = uint16(i)
table[h1] = uint16(i + 1)
table[h2] = uint16(i + 2)
}
d.fastTable = &table
})
}
func (d *Dict) initBetter() {
d.better.Do(func() {
const (
// Long hash matches.
lTableBits = 17
maxLTableSize = 1 << lTableBits
// Short hash matches.
sTableBits = 14
maxSTableSize = 1 << sTableBits
)
var lTable [maxLTableSize]uint16
var sTable [maxSTableSize]uint16
// We stop so any entry of length 8 can always be read.
for i := 0; i < len(d.dict)-8; i++ {
cv := load64(d.dict, i)
lTable[hash7(cv, lTableBits)] = uint16(i)
sTable[hash4(cv, sTableBits)] = uint16(i)
}
d.betterTableShort = &sTable
d.betterTableLong = &lTable
})
}
func (d *Dict) initBest() {
d.best.Do(func() {
const (
// Long hash matches.
lTableBits = 19
maxLTableSize = 1 << lTableBits
// Short hash matches.
sTableBits = 16
maxSTableSize = 1 << sTableBits
)
var lTable [maxLTableSize]uint32
var sTable [maxSTableSize]uint32
// We stop so any entry of length 8 can always be read.
for i := 0; i < len(d.dict)-8; i++ {
cv := load64(d.dict, i)
hashL := hash8(cv, lTableBits)
hashS := hash4(cv, sTableBits)
candidateL := lTable[hashL]
candidateS := sTable[hashS]
lTable[hashL] = uint32(i) | candidateL<<16
sTable[hashS] = uint32(i) | candidateS<<16
}
d.bestTableShort = &sTable
d.bestTableLong = &lTable
})
}

View file

@ -58,6 +58,32 @@ func Encode(dst, src []byte) []byte {
return dst[:d]
}
// EstimateBlockSize will perform a very fast compression
// without outputting the result and return the compressed output size.
// The function returns -1 if no improvement could be achieved.
// Using actual compression will most often produce better compression than the estimate.
func EstimateBlockSize(src []byte) (d int) {
if len(src) < 6 || int64(len(src)) > 0xffffffff {
return -1
}
if len(src) <= 1024 {
d = calcBlockSizeSmall(src)
} else {
d = calcBlockSize(src)
}
if d == 0 {
return -1
}
// Size of the varint encoded block size.
d += (bits.Len64(uint64(len(src))) + 7) / 7
if d >= len(src) {
return -1
}
return d
}
// EncodeBetter returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
@ -132,7 +158,7 @@ func EncodeBest(dst, src []byte) []byte {
d += emitLiteral(dst[d:], src)
return dst[:d]
}
n := encodeBlockBest(dst[d:], src)
n := encodeBlockBest(dst[d:], src, nil)
if n > 0 {
d += n
return dst[:d]
@ -404,10 +430,11 @@ type Writer struct {
buffers sync.Pool
pad int
writer io.Writer
randSrc io.Reader
writerWg sync.WaitGroup
index Index
writer io.Writer
randSrc io.Reader
writerWg sync.WaitGroup
index Index
customEnc func(dst, src []byte) int
// wroteStreamHeader is whether we have written the stream header.
wroteStreamHeader bool
@ -773,6 +800,9 @@ func (w *Writer) EncodeBuffer(buf []byte) (err error) {
}
func (w *Writer) encodeBlock(obuf, uncompressed []byte) int {
if w.customEnc != nil {
return w.customEnc(obuf, uncompressed)
}
if w.snappy {
switch w.level {
case levelFast:
@ -790,7 +820,7 @@ func (w *Writer) encodeBlock(obuf, uncompressed []byte) int {
case levelBetter:
return encodeBlockBetter(obuf, uncompressed)
case levelBest:
return encodeBlockBest(obuf, uncompressed)
return encodeBlockBest(obuf, uncompressed, nil)
}
return 0
}
@ -1339,3 +1369,15 @@ func WriterFlushOnWrite() WriterOption {
return nil
}
}
// WriterCustomEncoder allows to override the encoder for blocks on the stream.
// The function must compress 'src' into 'dst' and return the bytes used in dst as an integer.
// Block size (initial varint) should not be added by the encoder.
// Returning value 0 indicates the block could not be compressed.
// The function should expect to be called concurrently.
func WriterCustomEncoder(fn func(dst, src []byte) int) WriterOption {
return func(w *Writer) error {
w.customEnc = fn
return nil
}
}

View file

@ -8,6 +8,7 @@ package s2
import (
"bytes"
"encoding/binary"
"fmt"
"math/bits"
)
@ -455,3 +456,594 @@ emitRemainder:
}
return d
}
// encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
//
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockDictGo(dst, src []byte, dict *Dict) (d int) {
// Initialize the hash table.
const (
tableBits = 14
maxTableSize = 1 << tableBits
maxAhead = 8 // maximum bytes ahead without checking sLimit
debug = false
)
dict.initFast()
var table [maxTableSize]uint32
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := len(src) - inputMargin
if sLimit > MaxDictSrcOffset-maxAhead {
sLimit = MaxDictSrcOffset - maxAhead
}
// Bail if we can't compress to at least this.
dstLimit := len(src) - len(src)>>5 - 5
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form can start with a dict entry (copy or repeat).
s := 0
// Convert dict repeat to offset
repeat := len(dict.dict) - dict.repeat
cv := load64(src, 0)
// While in dict
searchDict:
for {
// Next src position to check
nextS := s + (s-nextEmit)>>6 + 4
hash0 := hash6(cv, tableBits)
hash1 := hash6(cv>>8, tableBits)
if nextS > sLimit {
if debug {
fmt.Println("slimit reached", s, nextS)
}
break searchDict
}
candidateDict := int(dict.fastTable[hash0])
candidateDict2 := int(dict.fastTable[hash1])
candidate2 := int(table[hash1])
candidate := int(table[hash0])
table[hash0] = uint32(s)
table[hash1] = uint32(s + 1)
hash2 := hash6(cv>>16, tableBits)
// Check repeat at offset checkRep.
const checkRep = 1
if repeat > s {
candidate := len(dict.dict) - repeat + s
if repeat-s >= 4 && uint32(cv) == load32(dict.dict, candidate) {
// Extend back
base := s
for i := candidate; base > nextEmit && i > 0 && dict.dict[i-1] == src[base-1]; {
i--
base--
}
d += emitLiteral(dst[d:], src[nextEmit:base])
if debug && nextEmit != base {
fmt.Println("emitted ", base-nextEmit, "literals")
}
s += 4
candidate += 4
for candidate < len(dict.dict)-8 && s <= len(src)-8 {
if diff := load64(src, s) ^ load64(dict.dict, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
d += emitRepeat(dst[d:], repeat, s-base)
if debug {
fmt.Println("emitted dict repeat length", s-base, "offset:", repeat, "s:", s)
}
nextEmit = s
if s >= sLimit {
break searchDict
}
cv = load64(src, s)
continue
}
} else if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
base := s + checkRep
// Extend back
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
i--
base--
}
d += emitLiteral(dst[d:], src[nextEmit:base])
if debug && nextEmit != base {
fmt.Println("emitted ", base-nextEmit, "literals")
}
// Extend forward
candidate := s - repeat + 4 + checkRep
s += 4 + checkRep
for s <= sLimit {
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
if debug {
// Validate match.
if s <= candidate {
panic("s <= candidate")
}
a := src[base:s]
b := src[base-repeat : base-repeat+(s-base)]
if !bytes.Equal(a, b) {
panic("mismatch")
}
}
if nextEmit > 0 {
// same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
d += emitRepeat(dst[d:], repeat, s-base)
} else {
// First match, cannot be repeat.
d += emitCopy(dst[d:], repeat, s-base)
}
nextEmit = s
if s >= sLimit {
break searchDict
}
if debug {
fmt.Println("emitted reg repeat", s-base, "s:", s)
}
cv = load64(src, s)
continue searchDict
}
if s == 0 {
cv = load64(src, nextS)
s = nextS
continue searchDict
}
// Start with table. These matches will always be closer.
if uint32(cv) == load32(src, candidate) {
goto emitMatch
}
candidate = int(table[hash2])
if uint32(cv>>8) == load32(src, candidate2) {
table[hash2] = uint32(s + 2)
candidate = candidate2
s++
goto emitMatch
}
// Check dict. Dicts have longer offsets, so we want longer matches.
if cv == load64(dict.dict, candidateDict) {
table[hash2] = uint32(s + 2)
goto emitDict
}
candidateDict = int(dict.fastTable[hash2])
// Check if upper 7 bytes match
if candidateDict2 >= 1 {
if cv^load64(dict.dict, candidateDict2-1) < (1 << 8) {
table[hash2] = uint32(s + 2)
candidateDict = candidateDict2
s++
goto emitDict
}
}
table[hash2] = uint32(s + 2)
if uint32(cv>>16) == load32(src, candidate) {
s += 2
goto emitMatch
}
if candidateDict >= 2 {
// Check if upper 6 bytes match
if cv^load64(dict.dict, candidateDict-2) < (1 << 16) {
s += 2
goto emitDict
}
}
cv = load64(src, nextS)
s = nextS
continue searchDict
emitDict:
{
if debug {
if load32(dict.dict, candidateDict) != load32(src, s) {
panic("dict emit mismatch")
}
}
// Extend backwards.
// The top bytes will be rechecked to get the full match.
for candidateDict > 0 && s > nextEmit && dict.dict[candidateDict-1] == src[s-1] {
candidateDict--
s--
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
d += emitLiteral(dst[d:], src[nextEmit:s])
if debug && nextEmit != s {
fmt.Println("emitted ", s-nextEmit, "literals")
}
{
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
base := s
repeat = s + (len(dict.dict)) - candidateDict
// Extend the 4-byte match as long as possible.
s += 4
candidateDict += 4
for s <= len(src)-8 && len(dict.dict)-candidateDict >= 8 {
if diff := load64(src, s) ^ load64(dict.dict, candidateDict); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidateDict += 8
}
// Matches longer than 64 are split.
if s <= sLimit || s-base < 8 {
d += emitCopy(dst[d:], repeat, s-base)
} else {
// Split to ensure we don't start a copy within next block
d += emitCopy(dst[d:], repeat, 4)
d += emitRepeat(dst[d:], repeat, s-base-4)
}
if false {
// Validate match.
if s <= candidate {
panic("s <= candidate")
}
a := src[base:s]
b := dict.dict[base-repeat : base-repeat+(s-base)]
if !bytes.Equal(a, b) {
panic("mismatch")
}
}
if debug {
fmt.Println("emitted dict copy, length", s-base, "offset:", repeat, "s:", s)
}
nextEmit = s
if s >= sLimit {
break searchDict
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Index and continue loop to try new candidate.
x := load64(src, s-2)
m2Hash := hash6(x, tableBits)
currHash := hash6(x>>8, tableBits)
candidate = int(table[currHash])
table[m2Hash] = uint32(s - 2)
table[currHash] = uint32(s - 1)
cv = load64(src, s)
}
continue
}
emitMatch:
// Extend backwards.
// The top bytes will be rechecked to get the full match.
for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
candidate--
s--
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
d += emitLiteral(dst[d:], src[nextEmit:s])
if debug && nextEmit != s {
fmt.Println("emitted ", s-nextEmit, "literals")
}
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
base := s
repeat = base - candidate
// Extend the 4-byte match as long as possible.
s += 4
candidate += 4
for s <= len(src)-8 {
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
d += emitCopy(dst[d:], repeat, s-base)
if debug {
// Validate match.
if s <= candidate {
panic("s <= candidate")
}
a := src[base:s]
b := src[base-repeat : base-repeat+(s-base)]
if !bytes.Equal(a, b) {
panic("mismatch")
}
}
if debug {
fmt.Println("emitted src copy, length", s-base, "offset:", repeat, "s:", s)
}
nextEmit = s
if s >= sLimit {
break searchDict
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Check for an immediate match, otherwise start search at s+1
x := load64(src, s-2)
m2Hash := hash6(x, tableBits)
currHash := hash6(x>>16, tableBits)
candidate = int(table[currHash])
table[m2Hash] = uint32(s - 2)
table[currHash] = uint32(s)
if debug && s == candidate {
panic("s == candidate")
}
if uint32(x>>16) != load32(src, candidate) {
cv = load64(src, s+1)
s++
break
}
}
}
// Search without dict:
if repeat > s {
repeat = 0
}
// No more dict
sLimit = len(src) - inputMargin
if s >= sLimit {
goto emitRemainder
}
if debug {
fmt.Println("non-dict matching at", s, "repeat:", repeat)
}
cv = load64(src, s)
if debug {
fmt.Println("now", s, "->", sLimit, "out:", d, "left:", len(src)-s, "nextemit:", nextEmit, "dstLimit:", dstLimit, "s:", s)
}
for {
candidate := 0
for {
// Next src position to check
nextS := s + (s-nextEmit)>>6 + 4
if nextS > sLimit {
goto emitRemainder
}
hash0 := hash6(cv, tableBits)
hash1 := hash6(cv>>8, tableBits)
candidate = int(table[hash0])
candidate2 := int(table[hash1])
table[hash0] = uint32(s)
table[hash1] = uint32(s + 1)
hash2 := hash6(cv>>16, tableBits)
// Check repeat at offset checkRep.
const checkRep = 1
if repeat > 0 && uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
base := s + checkRep
// Extend back
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
i--
base--
}
d += emitLiteral(dst[d:], src[nextEmit:base])
if debug && nextEmit != base {
fmt.Println("emitted ", base-nextEmit, "literals")
}
// Extend forward
candidate := s - repeat + 4 + checkRep
s += 4 + checkRep
for s <= sLimit {
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
if debug {
// Validate match.
if s <= candidate {
panic("s <= candidate")
}
a := src[base:s]
b := src[base-repeat : base-repeat+(s-base)]
if !bytes.Equal(a, b) {
panic("mismatch")
}
}
if nextEmit > 0 {
// same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
d += emitRepeat(dst[d:], repeat, s-base)
} else {
// First match, cannot be repeat.
d += emitCopy(dst[d:], repeat, s-base)
}
if debug {
fmt.Println("emitted src repeat length", s-base, "offset:", repeat, "s:", s)
}
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
cv = load64(src, s)
continue
}
if uint32(cv) == load32(src, candidate) {
break
}
candidate = int(table[hash2])
if uint32(cv>>8) == load32(src, candidate2) {
table[hash2] = uint32(s + 2)
candidate = candidate2
s++
break
}
table[hash2] = uint32(s + 2)
if uint32(cv>>16) == load32(src, candidate) {
s += 2
break
}
cv = load64(src, nextS)
s = nextS
}
// Extend backwards.
// The top bytes will be rechecked to get the full match.
for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
candidate--
s--
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
d += emitLiteral(dst[d:], src[nextEmit:s])
if debug && nextEmit != s {
fmt.Println("emitted ", s-nextEmit, "literals")
}
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
base := s
repeat = base - candidate
// Extend the 4-byte match as long as possible.
s += 4
candidate += 4
for s <= len(src)-8 {
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
d += emitCopy(dst[d:], repeat, s-base)
if debug {
// Validate match.
if s <= candidate {
panic("s <= candidate")
}
a := src[base:s]
b := src[base-repeat : base-repeat+(s-base)]
if !bytes.Equal(a, b) {
panic("mismatch")
}
}
if debug {
fmt.Println("emitted src copy, length", s-base, "offset:", repeat, "s:", s)
}
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Check for an immediate match, otherwise start search at s+1
x := load64(src, s-2)
m2Hash := hash6(x, tableBits)
currHash := hash6(x>>16, tableBits)
candidate = int(table[currHash])
table[m2Hash] = uint32(s - 2)
table[currHash] = uint32(s)
if debug && s == candidate {
panic("s == candidate")
}
if uint32(x>>16) != load32(src, candidate) {
cv = load64(src, s+1)
s++
break
}
}
}
emitRemainder:
if nextEmit < len(src) {
// Bail if we exceed the maximum size.
if d+len(src)-nextEmit > dstLimit {
return 0
}
d += emitLiteral(dst[d:], src[nextEmit:])
if debug && nextEmit != s {
fmt.Println("emitted ", len(src)-nextEmit, "literals")
}
}
return d
}

View file

@ -3,6 +3,8 @@
package s2
const hasAmd64Asm = true
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.

View file

@ -7,6 +7,7 @@ package s2
import (
"fmt"
"math"
"math/bits"
)
@ -18,7 +19,7 @@ import (
//
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockBest(dst, src []byte) (d int) {
func encodeBlockBest(dst, src []byte, dict *Dict) (d int) {
// Initialize the hash tables.
const (
// Long hash matches.
@ -30,6 +31,8 @@ func encodeBlockBest(dst, src []byte) (d int) {
maxSTableSize = 1 << sTableBits
inputMargin = 8 + 2
debug = false
)
// sLimit is when to stop looking for offset/length copies. The inputMargin
@ -39,6 +42,10 @@ func encodeBlockBest(dst, src []byte) (d int) {
if len(src) < minNonLiteralBlockSize {
return 0
}
sLimitDict := len(src) - inputMargin
if sLimitDict > MaxDictSrcOffset-inputMargin {
sLimitDict = MaxDictSrcOffset - inputMargin
}
var lTable [maxLTableSize]uint64
var sTable [maxSTableSize]uint64
@ -52,10 +59,15 @@ func encodeBlockBest(dst, src []byte) (d int) {
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 1
repeat := 1
if dict != nil {
dict.initBest()
s = 0
repeat = len(dict.dict) - dict.repeat
}
cv := load64(src, s)
// We search for a repeat at -1, but don't output repeats when nextEmit == 0
repeat := 1
const lowbitMask = 0xffffffff
getCur := func(x uint64) int {
return int(x & lowbitMask)
@ -67,11 +79,11 @@ func encodeBlockBest(dst, src []byte) (d int) {
for {
type match struct {
offset int
s int
length int
score int
rep bool
offset int
s int
length int
score int
rep, dict bool
}
var best match
for {
@ -85,6 +97,12 @@ func encodeBlockBest(dst, src []byte) (d int) {
if nextS > sLimit {
goto emitRemainder
}
if dict != nil && s >= MaxDictSrcOffset {
dict = nil
if repeat > s {
repeat = math.MinInt32
}
}
hashL := hash8(cv, lTableBits)
hashS := hash4(cv, sTableBits)
candidateL := lTable[hashL]
@ -114,7 +132,15 @@ func encodeBlockBest(dst, src []byte) (d int) {
}
m := match{offset: offset, s: s, length: 4 + offset, rep: rep}
s += 4
for s <= sLimit {
for s < len(src) {
if len(src)-s < 8 {
if src[s] == src[m.length] {
m.length++
s++
continue
}
break
}
if diff := load64(src, s) ^ load64(src, m.length); diff != 0 {
m.length += bits.TrailingZeros64(diff) >> 3
break
@ -130,6 +156,62 @@ func encodeBlockBest(dst, src []byte) (d int) {
}
return m
}
matchDict := func(candidate, s int, first uint32, rep bool) match {
// Calculate offset as if in continuous array with s
offset := -len(dict.dict) + candidate
if best.length != 0 && best.s-best.offset == s-offset && !rep {
// Don't retest if we have the same offset.
return match{offset: offset, s: s}
}
if load32(dict.dict, candidate) != first {
return match{offset: offset, s: s}
}
m := match{offset: offset, s: s, length: 4 + candidate, rep: rep, dict: true}
s += 4
if !rep {
for s < sLimitDict && m.length < len(dict.dict) {
if len(src)-s < 8 || len(dict.dict)-m.length < 8 {
if src[s] == dict.dict[m.length] {
m.length++
s++
continue
}
break
}
if diff := load64(src, s) ^ load64(dict.dict, m.length); diff != 0 {
m.length += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
m.length += 8
}
} else {
for s < len(src) && m.length < len(dict.dict) {
if len(src)-s < 8 || len(dict.dict)-m.length < 8 {
if src[s] == dict.dict[m.length] {
m.length++
s++
continue
}
break
}
if diff := load64(src, s) ^ load64(dict.dict, m.length); diff != 0 {
m.length += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
m.length += 8
}
}
m.length -= candidate
m.score = score(m)
if m.score <= -m.s {
// Eliminate if no savings, we might find a better one.
m.length = 0
}
return m
}
bestOf := func(a, b match) match {
if b.length == 0 {
@ -146,35 +228,82 @@ func encodeBlockBest(dst, src []byte) (d int) {
return b
}
best = bestOf(matchAt(getCur(candidateL), s, uint32(cv), false), matchAt(getPrev(candidateL), s, uint32(cv), false))
best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv), false))
best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv), false))
if s > 0 {
best = bestOf(matchAt(getCur(candidateL), s, uint32(cv), false), matchAt(getPrev(candidateL), s, uint32(cv), false))
best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv), false))
best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv), false))
}
if dict != nil {
candidateL := dict.bestTableLong[hashL]
candidateS := dict.bestTableShort[hashS]
best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false))
best = bestOf(best, matchDict(int(candidateL>>16), s, uint32(cv), false))
best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false))
best = bestOf(best, matchDict(int(candidateS>>16), s, uint32(cv), false))
}
{
best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true))
if (dict == nil || repeat <= s) && repeat > 0 {
best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true))
} else if s-repeat < -4 && dict != nil {
candidate := len(dict.dict) - (repeat - s)
best = bestOf(best, matchDict(candidate, s, uint32(cv), true))
candidate++
best = bestOf(best, matchDict(candidate, s+1, uint32(cv>>8), true))
}
if best.length > 0 {
hashS := hash4(cv>>8, sTableBits)
// s+1
nextShort := sTable[hash4(cv>>8, sTableBits)]
nextShort := sTable[hashS]
s := s + 1
cv := load64(src, s)
nextLong := lTable[hash8(cv, lTableBits)]
hashL := hash8(cv, lTableBits)
nextLong := lTable[hashL]
best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false))
best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false))
best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false))
best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false))
// Repeat at + 2
best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true))
// Dict at + 1
if dict != nil {
candidateL := dict.bestTableLong[hashL]
candidateS := dict.bestTableShort[hashS]
best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false))
best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false))
}
// s+2
if true {
nextShort = sTable[hash4(cv>>8, sTableBits)]
hashS := hash4(cv>>8, sTableBits)
nextShort = sTable[hashS]
s++
cv = load64(src, s)
nextLong = lTable[hash8(cv, lTableBits)]
hashL := hash8(cv, lTableBits)
nextLong = lTable[hashL]
if (dict == nil || repeat <= s) && repeat > 0 {
// Repeat at + 2
best = bestOf(best, matchAt(s-repeat, s, uint32(cv), true))
} else if repeat-s > 4 && dict != nil {
candidate := len(dict.dict) - (repeat - s)
best = bestOf(best, matchDict(candidate, s, uint32(cv), true))
}
best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false))
best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false))
best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false))
best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false))
// Dict at +2
// Very small gain
if dict != nil {
candidateL := dict.bestTableLong[hashL]
candidateS := dict.bestTableShort[hashS]
best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false))
best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false))
}
}
// Search for a match at best match end, see if that is better.
// Allow some bytes at the beginning to mismatch.
@ -227,7 +356,7 @@ func encodeBlockBest(dst, src []byte) (d int) {
// Extend backwards, not needed for repeats...
s = best.s
if !best.rep {
if !best.rep && !best.dict {
for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] {
best.offset--
best.length++
@ -244,7 +373,6 @@ func encodeBlockBest(dst, src []byte) (d int) {
base := s
offset := s - best.offset
s += best.length
if offset > 65535 && s-base <= 5 && !best.rep {
@ -256,16 +384,28 @@ func encodeBlockBest(dst, src []byte) (d int) {
cv = load64(src, s)
continue
}
if debug && nextEmit != base {
fmt.Println("EMIT", base-nextEmit, "literals. base-after:", base)
}
d += emitLiteral(dst[d:], src[nextEmit:base])
if best.rep {
if nextEmit > 0 {
if nextEmit > 0 || best.dict {
if debug {
fmt.Println("REPEAT, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best)
}
// same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
d += emitRepeat(dst[d:], offset, best.length)
} else {
// First match, cannot be repeat.
// First match without dict cannot be a repeat.
if debug {
fmt.Println("COPY, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best)
}
d += emitCopy(dst[d:], offset, best.length)
}
} else {
if debug {
fmt.Println("COPY, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best)
}
d += emitCopy(dst[d:], offset, best.length)
}
repeat = offset
@ -296,6 +436,9 @@ emitRemainder:
if d+len(src)-nextEmit > dstLimit {
return 0
}
if debug && nextEmit != s {
fmt.Println("emitted ", len(src)-nextEmit, "literals")
}
d += emitLiteral(dst[d:], src[nextEmit:])
}
return d
@ -642,7 +785,6 @@ func emitRepeatSize(offset, length int) int {
left := 0
if length > maxRepeat {
left = length - maxRepeat + 4
length = maxRepeat - 4
}
if left > 0 {
return 5 + emitRepeatSize(offset, left)

View file

@ -6,6 +6,8 @@
package s2
import (
"bytes"
"fmt"
"math/bits"
)
@ -476,3 +478,623 @@ emitRemainder:
}
return d
}
// encodeBlockBetterDict encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
//
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockBetterDict(dst, src []byte, dict *Dict) (d int) {
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
// Initialize the hash tables.
const (
// Long hash matches.
lTableBits = 17
maxLTableSize = 1 << lTableBits
// Short hash matches.
sTableBits = 14
maxSTableSize = 1 << sTableBits
maxAhead = 8 // maximum bytes ahead without checking sLimit
debug = false
)
sLimit := len(src) - inputMargin
if sLimit > MaxDictSrcOffset-maxAhead {
sLimit = MaxDictSrcOffset - maxAhead
}
if len(src) < minNonLiteralBlockSize {
return 0
}
dict.initBetter()
var lTable [maxLTableSize]uint32
var sTable [maxSTableSize]uint32
// Bail if we can't compress to at least this.
dstLimit := len(src) - len(src)>>5 - 6
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 0
cv := load64(src, s)
// We initialize repeat to 0, so we never match on first attempt
repeat := len(dict.dict) - dict.repeat
// While in dict
searchDict:
for {
candidateL := 0
nextS := 0
for {
// Next src position to check
nextS = s + (s-nextEmit)>>7 + 1
if nextS > sLimit {
break searchDict
}
hashL := hash7(cv, lTableBits)
hashS := hash4(cv, sTableBits)
candidateL = int(lTable[hashL])
candidateS := int(sTable[hashS])
dictL := int(dict.betterTableLong[hashL])
dictS := int(dict.betterTableShort[hashS])
lTable[hashL] = uint32(s)
sTable[hashS] = uint32(s)
valLong := load64(src, candidateL)
valShort := load64(src, candidateS)
// If long matches at least 8 bytes, use that.
if s != 0 {
if cv == valLong {
goto emitMatch
}
if cv == valShort {
candidateL = candidateS
goto emitMatch
}
}
// Check dict repeat.
if repeat >= s+4 {
candidate := len(dict.dict) - repeat + s
if candidate > 0 && uint32(cv) == load32(dict.dict, candidate) {
// Extend back
base := s
for i := candidate; base > nextEmit && i > 0 && dict.dict[i-1] == src[base-1]; {
i--
base--
}
d += emitLiteral(dst[d:], src[nextEmit:base])
if debug && nextEmit != base {
fmt.Println("emitted ", base-nextEmit, "literals")
}
s += 4
candidate += 4
for candidate < len(dict.dict)-8 && s <= len(src)-8 {
if diff := load64(src, s) ^ load64(dict.dict, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
d += emitRepeat(dst[d:], repeat, s-base)
if debug {
fmt.Println("emitted dict repeat length", s-base, "offset:", repeat, "s:", s)
}
nextEmit = s
if s >= sLimit {
break searchDict
}
cv = load64(src, s)
// Index in-between
index0 := base + 1
index1 := s - 2
cv = load64(src, s)
for index0 < index1 {
cv0 := load64(src, index0)
cv1 := load64(src, index1)
lTable[hash7(cv0, lTableBits)] = uint32(index0)
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
lTable[hash7(cv1, lTableBits)] = uint32(index1)
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
index0 += 2
index1 -= 2
}
continue
}
}
// Don't try to find match at s==0
if s == 0 {
cv = load64(src, nextS)
s = nextS
continue
}
// Long likely matches 7, so take that.
if uint32(cv) == uint32(valLong) {
goto emitMatch
}
// Long dict...
if uint32(cv) == load32(dict.dict, dictL) {
candidateL = dictL
goto emitDict
}
// Check our short candidate
if uint32(cv) == uint32(valShort) {
// Try a long candidate at s+1
hashL = hash7(cv>>8, lTableBits)
candidateL = int(lTable[hashL])
lTable[hashL] = uint32(s + 1)
if uint32(cv>>8) == load32(src, candidateL) {
s++
goto emitMatch
}
// Use our short candidate.
candidateL = candidateS
goto emitMatch
}
if uint32(cv) == load32(dict.dict, dictS) {
// Try a long candidate at s+1
hashL = hash7(cv>>8, lTableBits)
candidateL = int(lTable[hashL])
lTable[hashL] = uint32(s + 1)
if uint32(cv>>8) == load32(src, candidateL) {
s++
goto emitMatch
}
candidateL = dictS
goto emitDict
}
cv = load64(src, nextS)
s = nextS
}
emitDict:
{
if debug {
if load32(dict.dict, candidateL) != load32(src, s) {
panic("dict emit mismatch")
}
}
// Extend backwards.
// The top bytes will be rechecked to get the full match.
for candidateL > 0 && s > nextEmit && dict.dict[candidateL-1] == src[s-1] {
candidateL--
s--
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
d += emitLiteral(dst[d:], src[nextEmit:s])
if debug && nextEmit != s {
fmt.Println("emitted ", s-nextEmit, "literals")
}
{
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
base := s
offset := s + (len(dict.dict)) - candidateL
// Extend the 4-byte match as long as possible.
s += 4
candidateL += 4
for s <= len(src)-8 && len(dict.dict)-candidateL >= 8 {
if diff := load64(src, s) ^ load64(dict.dict, candidateL); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidateL += 8
}
if repeat == offset {
if debug {
fmt.Println("emitted dict repeat, length", s-base, "offset:", offset, "s:", s, "dict offset:", candidateL)
}
d += emitRepeat(dst[d:], offset, s-base)
} else {
if debug {
fmt.Println("emitted dict copy, length", s-base, "offset:", offset, "s:", s, "dict offset:", candidateL)
}
// Matches longer than 64 are split.
if s <= sLimit || s-base < 8 {
d += emitCopy(dst[d:], offset, s-base)
} else {
// Split to ensure we don't start a copy within next block.
d += emitCopy(dst[d:], offset, 4)
d += emitRepeat(dst[d:], offset, s-base-4)
}
repeat = offset
}
if false {
// Validate match.
if s <= candidateL {
panic("s <= candidate")
}
a := src[base:s]
b := dict.dict[base-repeat : base-repeat+(s-base)]
if !bytes.Equal(a, b) {
panic("mismatch")
}
}
nextEmit = s
if s >= sLimit {
break searchDict
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Index short & long
index0 := base + 1
index1 := s - 2
cv0 := load64(src, index0)
cv1 := load64(src, index1)
lTable[hash7(cv0, lTableBits)] = uint32(index0)
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
lTable[hash7(cv1, lTableBits)] = uint32(index1)
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
index0 += 1
index1 -= 1
cv = load64(src, s)
// index every second long in between.
for index0 < index1 {
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
index0 += 2
index1 -= 2
}
}
continue
}
emitMatch:
// Extend backwards
for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
candidateL--
s--
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
base := s
offset := base - candidateL
// Extend the 4-byte match as long as possible.
s += 4
candidateL += 4
for s < len(src) {
if len(src)-s < 8 {
if src[s] == src[candidateL] {
s++
candidateL++
continue
}
break
}
if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidateL += 8
}
if offset > 65535 && s-base <= 5 && repeat != offset {
// Bail if the match is equal or worse to the encoding.
s = nextS + 1
if s >= sLimit {
goto emitRemainder
}
cv = load64(src, s)
continue
}
d += emitLiteral(dst[d:], src[nextEmit:base])
if debug && nextEmit != s {
fmt.Println("emitted ", s-nextEmit, "literals")
}
if repeat == offset {
if debug {
fmt.Println("emitted match repeat, length", s-base, "offset:", offset, "s:", s)
}
d += emitRepeat(dst[d:], offset, s-base)
} else {
if debug {
fmt.Println("emitted match copy, length", s-base, "offset:", offset, "s:", s)
}
d += emitCopy(dst[d:], offset, s-base)
repeat = offset
}
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Index short & long
index0 := base + 1
index1 := s - 2
cv0 := load64(src, index0)
cv1 := load64(src, index1)
lTable[hash7(cv0, lTableBits)] = uint32(index0)
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
lTable[hash7(cv1, lTableBits)] = uint32(index1)
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
index0 += 1
index1 -= 1
cv = load64(src, s)
// index every second long in between.
for index0 < index1 {
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
index0 += 2
index1 -= 2
}
}
// Search without dict:
if repeat > s {
repeat = 0
}
// No more dict
sLimit = len(src) - inputMargin
if s >= sLimit {
goto emitRemainder
}
cv = load64(src, s)
if debug {
fmt.Println("now", s, "->", sLimit, "out:", d, "left:", len(src)-s, "nextemit:", nextEmit, "dstLimit:", dstLimit, "s:", s)
}
for {
candidateL := 0
nextS := 0
for {
// Next src position to check
nextS = s + (s-nextEmit)>>7 + 1
if nextS > sLimit {
goto emitRemainder
}
hashL := hash7(cv, lTableBits)
hashS := hash4(cv, sTableBits)
candidateL = int(lTable[hashL])
candidateS := int(sTable[hashS])
lTable[hashL] = uint32(s)
sTable[hashS] = uint32(s)
valLong := load64(src, candidateL)
valShort := load64(src, candidateS)
// If long matches at least 8 bytes, use that.
if cv == valLong {
break
}
if cv == valShort {
candidateL = candidateS
break
}
// Check repeat at offset checkRep.
const checkRep = 1
// Minimum length of a repeat. Tested with various values.
// While 4-5 offers improvements in some, 6 reduces
// regressions significantly.
const wantRepeatBytes = 6
const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep)
if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask {
base := s + checkRep
// Extend back
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
i--
base--
}
d += emitLiteral(dst[d:], src[nextEmit:base])
// Extend forward
candidate := s - repeat + wantRepeatBytes + checkRep
s += wantRepeatBytes + checkRep
for s < len(src) {
if len(src)-s < 8 {
if src[s] == src[candidate] {
s++
candidate++
continue
}
break
}
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
// same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
d += emitRepeat(dst[d:], repeat, s-base)
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
// Index in-between
index0 := base + 1
index1 := s - 2
cv = load64(src, s)
for index0 < index1 {
cv0 := load64(src, index0)
cv1 := load64(src, index1)
lTable[hash7(cv0, lTableBits)] = uint32(index0)
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
lTable[hash7(cv1, lTableBits)] = uint32(index1)
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
index0 += 2
index1 -= 2
}
cv = load64(src, s)
continue
}
// Long likely matches 7, so take that.
if uint32(cv) == uint32(valLong) {
break
}
// Check our short candidate
if uint32(cv) == uint32(valShort) {
// Try a long candidate at s+1
hashL = hash7(cv>>8, lTableBits)
candidateL = int(lTable[hashL])
lTable[hashL] = uint32(s + 1)
if uint32(cv>>8) == load32(src, candidateL) {
s++
break
}
// Use our short candidate.
candidateL = candidateS
break
}
cv = load64(src, nextS)
s = nextS
}
// Extend backwards
for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
candidateL--
s--
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
base := s
offset := base - candidateL
// Extend the 4-byte match as long as possible.
s += 4
candidateL += 4
for s < len(src) {
if len(src)-s < 8 {
if src[s] == src[candidateL] {
s++
candidateL++
continue
}
break
}
if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidateL += 8
}
if offset > 65535 && s-base <= 5 && repeat != offset {
// Bail if the match is equal or worse to the encoding.
s = nextS + 1
if s >= sLimit {
goto emitRemainder
}
cv = load64(src, s)
continue
}
d += emitLiteral(dst[d:], src[nextEmit:base])
if repeat == offset {
d += emitRepeat(dst[d:], offset, s-base)
} else {
d += emitCopy(dst[d:], offset, s-base)
repeat = offset
}
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Index short & long
index0 := base + 1
index1 := s - 2
cv0 := load64(src, index0)
cv1 := load64(src, index1)
lTable[hash7(cv0, lTableBits)] = uint32(index0)
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
lTable[hash7(cv1, lTableBits)] = uint32(index1)
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
index0 += 1
index1 -= 1
cv = load64(src, s)
// index every second long in between.
for index0 < index1 {
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
index0 += 2
index1 -= 2
}
}
emitRemainder:
if nextEmit < len(src) {
// Bail if we exceed the maximum size.
if d+len(src)-nextEmit > dstLimit {
return 0
}
d += emitLiteral(dst[d:], src[nextEmit:])
}
return d
}

View file

@ -4,9 +4,12 @@
package s2
import (
"bytes"
"math/bits"
)
const hasAmd64Asm = false
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
@ -312,3 +315,405 @@ func matchLen(a []byte, b []byte) int {
}
return len(a) + checked
}
func calcBlockSize(src []byte) (d int) {
// Initialize the hash table.
const (
tableBits = 13
maxTableSize = 1 << tableBits
)
var table [maxTableSize]uint32
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := len(src) - inputMargin
// Bail if we can't compress to at least this.
dstLimit := len(src) - len(src)>>5 - 5
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 1
cv := load64(src, s)
// We search for a repeat at -1, but don't output repeats when nextEmit == 0
repeat := 1
for {
candidate := 0
for {
// Next src position to check
nextS := s + (s-nextEmit)>>6 + 4
if nextS > sLimit {
goto emitRemainder
}
hash0 := hash6(cv, tableBits)
hash1 := hash6(cv>>8, tableBits)
candidate = int(table[hash0])
candidate2 := int(table[hash1])
table[hash0] = uint32(s)
table[hash1] = uint32(s + 1)
hash2 := hash6(cv>>16, tableBits)
// Check repeat at offset checkRep.
const checkRep = 1
if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
base := s + checkRep
// Extend back
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
i--
base--
}
d += emitLiteralSize(src[nextEmit:base])
// Extend forward
candidate := s - repeat + 4 + checkRep
s += 4 + checkRep
for s <= sLimit {
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
d += emitCopyNoRepeatSize(repeat, s-base)
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
cv = load64(src, s)
continue
}
if uint32(cv) == load32(src, candidate) {
break
}
candidate = int(table[hash2])
if uint32(cv>>8) == load32(src, candidate2) {
table[hash2] = uint32(s + 2)
candidate = candidate2
s++
break
}
table[hash2] = uint32(s + 2)
if uint32(cv>>16) == load32(src, candidate) {
s += 2
break
}
cv = load64(src, nextS)
s = nextS
}
// Extend backwards
for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
candidate--
s--
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
d += emitLiteralSize(src[nextEmit:s])
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
base := s
repeat = base - candidate
// Extend the 4-byte match as long as possible.
s += 4
candidate += 4
for s <= len(src)-8 {
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
d += emitCopyNoRepeatSize(repeat, s-base)
if false {
// Validate match.
a := src[base:s]
b := src[base-repeat : base-repeat+(s-base)]
if !bytes.Equal(a, b) {
panic("mismatch")
}
}
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Check for an immediate match, otherwise start search at s+1
x := load64(src, s-2)
m2Hash := hash6(x, tableBits)
currHash := hash6(x>>16, tableBits)
candidate = int(table[currHash])
table[m2Hash] = uint32(s - 2)
table[currHash] = uint32(s)
if uint32(x>>16) != load32(src, candidate) {
cv = load64(src, s+1)
s++
break
}
}
}
emitRemainder:
if nextEmit < len(src) {
// Bail if we exceed the maximum size.
if d+len(src)-nextEmit > dstLimit {
return 0
}
d += emitLiteralSize(src[nextEmit:])
}
return d
}
func calcBlockSizeSmall(src []byte) (d int) {
// Initialize the hash table.
const (
tableBits = 9
maxTableSize = 1 << tableBits
)
var table [maxTableSize]uint32
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := len(src) - inputMargin
// Bail if we can't compress to at least this.
dstLimit := len(src) - len(src)>>5 - 5
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 1
cv := load64(src, s)
// We search for a repeat at -1, but don't output repeats when nextEmit == 0
repeat := 1
for {
candidate := 0
for {
// Next src position to check
nextS := s + (s-nextEmit)>>6 + 4
if nextS > sLimit {
goto emitRemainder
}
hash0 := hash6(cv, tableBits)
hash1 := hash6(cv>>8, tableBits)
candidate = int(table[hash0])
candidate2 := int(table[hash1])
table[hash0] = uint32(s)
table[hash1] = uint32(s + 1)
hash2 := hash6(cv>>16, tableBits)
// Check repeat at offset checkRep.
const checkRep = 1
if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
base := s + checkRep
// Extend back
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
i--
base--
}
d += emitLiteralSize(src[nextEmit:base])
// Extend forward
candidate := s - repeat + 4 + checkRep
s += 4 + checkRep
for s <= sLimit {
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
d += emitCopyNoRepeatSize(repeat, s-base)
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
cv = load64(src, s)
continue
}
if uint32(cv) == load32(src, candidate) {
break
}
candidate = int(table[hash2])
if uint32(cv>>8) == load32(src, candidate2) {
table[hash2] = uint32(s + 2)
candidate = candidate2
s++
break
}
table[hash2] = uint32(s + 2)
if uint32(cv>>16) == load32(src, candidate) {
s += 2
break
}
cv = load64(src, nextS)
s = nextS
}
// Extend backwards
for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
candidate--
s--
}
// Bail if we exceed the maximum size.
if d+(s-nextEmit) > dstLimit {
return 0
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
d += emitLiteralSize(src[nextEmit:s])
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
base := s
repeat = base - candidate
// Extend the 4-byte match as long as possible.
s += 4
candidate += 4
for s <= len(src)-8 {
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
s += bits.TrailingZeros64(diff) >> 3
break
}
s += 8
candidate += 8
}
d += emitCopyNoRepeatSize(repeat, s-base)
if false {
// Validate match.
a := src[base:s]
b := src[base-repeat : base-repeat+(s-base)]
if !bytes.Equal(a, b) {
panic("mismatch")
}
}
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
if d > dstLimit {
// Do we have space for more, if not bail.
return 0
}
// Check for an immediate match, otherwise start search at s+1
x := load64(src, s-2)
m2Hash := hash6(x, tableBits)
currHash := hash6(x>>16, tableBits)
candidate = int(table[currHash])
table[m2Hash] = uint32(s - 2)
table[currHash] = uint32(s)
if uint32(x>>16) != load32(src, candidate) {
cv = load64(src, s+1)
s++
break
}
}
}
emitRemainder:
if nextEmit < len(src) {
// Bail if we exceed the maximum size.
if d+len(src)-nextEmit > dstLimit {
return 0
}
d += emitLiteralSize(src[nextEmit:])
}
return d
}
// emitLiteral writes a literal chunk and returns the number of bytes written.
//
// It assumes that:
//
// dst is long enough to hold the encoded bytes
// 0 <= len(lit) && len(lit) <= math.MaxUint32
func emitLiteralSize(lit []byte) int {
if len(lit) == 0 {
return 0
}
switch {
case len(lit) <= 60:
return len(lit) + 1
case len(lit) <= 1<<8:
return len(lit) + 2
case len(lit) <= 1<<16:
return len(lit) + 3
case len(lit) <= 1<<24:
return len(lit) + 4
default:
return len(lit) + 5
}
}
func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) {
panic("cvtLZ4BlockAsm should be unreachable")
}
func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) {
panic("cvtLZ4BlockSnappyAsm should be unreachable")
}

View file

@ -146,6 +146,20 @@ func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte) int
//go:noescape
func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int
// calcBlockSize encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4294967295 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
func calcBlockSize(src []byte) int
// calcBlockSizeSmall encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 1024 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
func calcBlockSizeSmall(src []byte) int
// emitLiteral writes a literal chunk and returns the number of bytes written.
//
// It assumes that:
@ -192,3 +206,13 @@ func emitCopyNoRepeat(dst []byte, offset int, length int) int
//
//go:noescape
func matchLen(a []byte, b []byte) int
// cvtLZ4Block converts an LZ4 block to S2
//
//go:noescape
func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
// cvtLZ4Block converts an LZ4 block to S2
//
//go:noescape
func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)

File diff suppressed because it is too large Load diff

585
vendor/github.com/klauspost/compress/s2/lz4convert.go generated vendored Normal file
View file

@ -0,0 +1,585 @@
// Copyright (c) 2022 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package s2
import (
"encoding/binary"
"errors"
"fmt"
)
// LZ4Converter provides conversion from LZ4 blocks as defined here:
// https://github.com/lz4/lz4/blob/dev/doc/lz4_Block_format.md
type LZ4Converter struct {
}
// ErrDstTooSmall is returned when provided destination is too small.
var ErrDstTooSmall = errors.New("s2: destination too small")
// ConvertBlock will convert an LZ4 block and append it as an S2
// block without block length to dst.
// The uncompressed size is returned as well.
// dst must have capacity to contain the entire compressed block.
func (l *LZ4Converter) ConvertBlock(dst, src []byte) ([]byte, int, error) {
if len(src) == 0 {
return dst, 0, nil
}
const debug = false
const inline = true
const lz4MinMatch = 4
s, d := 0, len(dst)
dst = dst[:cap(dst)]
if !debug && hasAmd64Asm {
res, sz := cvtLZ4BlockAsm(dst[d:], src)
if res < 0 {
const (
errCorrupt = -1
errDstTooSmall = -2
)
switch res {
case errCorrupt:
return nil, 0, ErrCorrupt
case errDstTooSmall:
return nil, 0, ErrDstTooSmall
default:
return nil, 0, fmt.Errorf("unexpected result: %d", res)
}
}
if d+sz > len(dst) {
return nil, 0, ErrDstTooSmall
}
return dst[:d+sz], res, nil
}
dLimit := len(dst) - 10
var lastOffset uint16
var uncompressed int
if debug {
fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst))
}
for {
if s >= len(src) {
return dst[:d], 0, ErrCorrupt
}
// Read literal info
token := src[s]
ll := int(token >> 4)
ml := int(lz4MinMatch + (token & 0xf))
// If upper nibble is 15, literal length is extended
if token >= 0xf0 {
for {
s++
if s >= len(src) {
if debug {
fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src))
}
return dst[:d], 0, ErrCorrupt
}
val := src[s]
ll += int(val)
if val != 255 {
break
}
}
}
// Skip past token
if s+ll >= len(src) {
if debug {
fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src))
}
return nil, 0, ErrCorrupt
}
s++
if ll > 0 {
if d+ll > dLimit {
return nil, 0, ErrDstTooSmall
}
if debug {
fmt.Printf("emit %d literals\n", ll)
}
d += emitLiteralGo(dst[d:], src[s:s+ll])
s += ll
uncompressed += ll
}
// Check if we are done...
if s == len(src) && ml == lz4MinMatch {
break
}
// 2 byte offset
if s >= len(src)-2 {
if debug {
fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2)
}
return nil, 0, ErrCorrupt
}
offset := binary.LittleEndian.Uint16(src[s:])
s += 2
if offset == 0 {
if debug {
fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s)
}
return nil, 0, ErrCorrupt
}
if int(offset) > uncompressed {
if debug {
fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed)
}
return nil, 0, ErrCorrupt
}
if ml == lz4MinMatch+15 {
for {
if s >= len(src) {
if debug {
fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
}
return nil, 0, ErrCorrupt
}
val := src[s]
s++
ml += int(val)
if val != 255 {
if s >= len(src) {
if debug {
fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
}
return nil, 0, ErrCorrupt
}
break
}
}
}
if offset == lastOffset {
if debug {
fmt.Printf("emit repeat, length: %d, offset: %d\n", ml, offset)
}
if !inline {
d += emitRepeat16(dst[d:], offset, ml)
} else {
length := ml
dst := dst[d:]
for len(dst) > 5 {
// Repeat offset, make length cheaper
length -= 4
if length <= 4 {
dst[0] = uint8(length)<<2 | tagCopy1
dst[1] = 0
d += 2
break
}
if length < 8 && offset < 2048 {
// Encode WITH offset
dst[1] = uint8(offset)
dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1
d += 2
break
}
if length < (1<<8)+4 {
length -= 4
dst[2] = uint8(length)
dst[1] = 0
dst[0] = 5<<2 | tagCopy1
d += 3
break
}
if length < (1<<16)+(1<<8) {
length -= 1 << 8
dst[3] = uint8(length >> 8)
dst[2] = uint8(length >> 0)
dst[1] = 0
dst[0] = 6<<2 | tagCopy1
d += 4
break
}
const maxRepeat = (1 << 24) - 1
length -= 1 << 16
left := 0
if length > maxRepeat {
left = length - maxRepeat + 4
length = maxRepeat - 4
}
dst[4] = uint8(length >> 16)
dst[3] = uint8(length >> 8)
dst[2] = uint8(length >> 0)
dst[1] = 0
dst[0] = 7<<2 | tagCopy1
if left > 0 {
d += 5 + emitRepeat16(dst[5:], offset, left)
break
}
d += 5
break
}
}
} else {
if debug {
fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset)
}
if !inline {
d += emitCopy16(dst[d:], offset, ml)
} else {
length := ml
dst := dst[d:]
for len(dst) > 5 {
// Offset no more than 2 bytes.
if length > 64 {
off := 3
if offset < 2048 {
// emit 8 bytes as tagCopy1, rest as repeats.
dst[1] = uint8(offset)
dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1
length -= 8
off = 2
} else {
// Emit a length 60 copy, encoded as 3 bytes.
// Emit remaining as repeat value (minimum 4 bytes).
dst[2] = uint8(offset >> 8)
dst[1] = uint8(offset)
dst[0] = 59<<2 | tagCopy2
length -= 60
}
// Emit remaining as repeats, at least 4 bytes remain.
d += off + emitRepeat16(dst[off:], offset, length)
break
}
if length >= 12 || offset >= 2048 {
// Emit the remaining copy, encoded as 3 bytes.
dst[2] = uint8(offset >> 8)
dst[1] = uint8(offset)
dst[0] = uint8(length-1)<<2 | tagCopy2
d += 3
break
}
// Emit the remaining copy, encoded as 2 bytes.
dst[1] = uint8(offset)
dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
d += 2
break
}
}
lastOffset = offset
}
uncompressed += ml
if d > dLimit {
return nil, 0, ErrDstTooSmall
}
}
return dst[:d], uncompressed, nil
}
// ConvertBlockSnappy will convert an LZ4 block and append it
// as a Snappy block without block length to dst.
// The uncompressed size is returned as well.
// dst must have capacity to contain the entire compressed block.
func (l *LZ4Converter) ConvertBlockSnappy(dst, src []byte) ([]byte, int, error) {
if len(src) == 0 {
return dst, 0, nil
}
const debug = false
const lz4MinMatch = 4
s, d := 0, len(dst)
dst = dst[:cap(dst)]
// Use assembly when possible
if !debug && hasAmd64Asm {
res, sz := cvtLZ4BlockSnappyAsm(dst[d:], src)
if res < 0 {
const (
errCorrupt = -1
errDstTooSmall = -2
)
switch res {
case errCorrupt:
return nil, 0, ErrCorrupt
case errDstTooSmall:
return nil, 0, ErrDstTooSmall
default:
return nil, 0, fmt.Errorf("unexpected result: %d", res)
}
}
if d+sz > len(dst) {
return nil, 0, ErrDstTooSmall
}
return dst[:d+sz], res, nil
}
dLimit := len(dst) - 10
var uncompressed int
if debug {
fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst))
}
for {
if s >= len(src) {
return nil, 0, ErrCorrupt
}
// Read literal info
token := src[s]
ll := int(token >> 4)
ml := int(lz4MinMatch + (token & 0xf))
// If upper nibble is 15, literal length is extended
if token >= 0xf0 {
for {
s++
if s >= len(src) {
if debug {
fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src))
}
return nil, 0, ErrCorrupt
}
val := src[s]
ll += int(val)
if val != 255 {
break
}
}
}
// Skip past token
if s+ll >= len(src) {
if debug {
fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src))
}
return nil, 0, ErrCorrupt
}
s++
if ll > 0 {
if d+ll > dLimit {
return nil, 0, ErrDstTooSmall
}
if debug {
fmt.Printf("emit %d literals\n", ll)
}
d += emitLiteralGo(dst[d:], src[s:s+ll])
s += ll
uncompressed += ll
}
// Check if we are done...
if s == len(src) && ml == lz4MinMatch {
break
}
// 2 byte offset
if s >= len(src)-2 {
if debug {
fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2)
}
return nil, 0, ErrCorrupt
}
offset := binary.LittleEndian.Uint16(src[s:])
s += 2
if offset == 0 {
if debug {
fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s)
}
return nil, 0, ErrCorrupt
}
if int(offset) > uncompressed {
if debug {
fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed)
}
return nil, 0, ErrCorrupt
}
if ml == lz4MinMatch+15 {
for {
if s >= len(src) {
if debug {
fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
}
return nil, 0, ErrCorrupt
}
val := src[s]
s++
ml += int(val)
if val != 255 {
if s >= len(src) {
if debug {
fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
}
return nil, 0, ErrCorrupt
}
break
}
}
}
if debug {
fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset)
}
length := ml
// d += emitCopyNoRepeat(dst[d:], int(offset), ml)
for length > 0 {
if d >= dLimit {
return nil, 0, ErrDstTooSmall
}
// Offset no more than 2 bytes.
if length > 64 {
// Emit a length 64 copy, encoded as 3 bytes.
dst[d+2] = uint8(offset >> 8)
dst[d+1] = uint8(offset)
dst[d+0] = 63<<2 | tagCopy2
length -= 64
d += 3
continue
}
if length >= 12 || offset >= 2048 || length < 4 {
// Emit the remaining copy, encoded as 3 bytes.
dst[d+2] = uint8(offset >> 8)
dst[d+1] = uint8(offset)
dst[d+0] = uint8(length-1)<<2 | tagCopy2
d += 3
break
}
// Emit the remaining copy, encoded as 2 bytes.
dst[d+1] = uint8(offset)
dst[d+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
d += 2
break
}
uncompressed += ml
if d > dLimit {
return nil, 0, ErrDstTooSmall
}
}
return dst[:d], uncompressed, nil
}
// emitRepeat writes a repeat chunk and returns the number of bytes written.
// Length must be at least 4 and < 1<<24
func emitRepeat16(dst []byte, offset uint16, length int) int {
// Repeat offset, make length cheaper
length -= 4
if length <= 4 {
dst[0] = uint8(length)<<2 | tagCopy1
dst[1] = 0
return 2
}
if length < 8 && offset < 2048 {
// Encode WITH offset
dst[1] = uint8(offset)
dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1
return 2
}
if length < (1<<8)+4 {
length -= 4
dst[2] = uint8(length)
dst[1] = 0
dst[0] = 5<<2 | tagCopy1
return 3
}
if length < (1<<16)+(1<<8) {
length -= 1 << 8
dst[3] = uint8(length >> 8)
dst[2] = uint8(length >> 0)
dst[1] = 0
dst[0] = 6<<2 | tagCopy1
return 4
}
const maxRepeat = (1 << 24) - 1
length -= 1 << 16
left := 0
if length > maxRepeat {
left = length - maxRepeat + 4
length = maxRepeat - 4
}
dst[4] = uint8(length >> 16)
dst[3] = uint8(length >> 8)
dst[2] = uint8(length >> 0)
dst[1] = 0
dst[0] = 7<<2 | tagCopy1
if left > 0 {
return 5 + emitRepeat16(dst[5:], offset, left)
}
return 5
}
// emitCopy writes a copy chunk and returns the number of bytes written.
//
// It assumes that:
//
// dst is long enough to hold the encoded bytes
// 1 <= offset && offset <= math.MaxUint16
// 4 <= length && length <= math.MaxUint32
func emitCopy16(dst []byte, offset uint16, length int) int {
// Offset no more than 2 bytes.
if length > 64 {
off := 3
if offset < 2048 {
// emit 8 bytes as tagCopy1, rest as repeats.
dst[1] = uint8(offset)
dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1
length -= 8
off = 2
} else {
// Emit a length 60 copy, encoded as 3 bytes.
// Emit remaining as repeat value (minimum 4 bytes).
dst[2] = uint8(offset >> 8)
dst[1] = uint8(offset)
dst[0] = 59<<2 | tagCopy2
length -= 60
}
// Emit remaining as repeats, at least 4 bytes remain.
return off + emitRepeat16(dst[off:], offset, length)
}
if length >= 12 || offset >= 2048 {
// Emit the remaining copy, encoded as 3 bytes.
dst[2] = uint8(offset >> 8)
dst[1] = uint8(offset)
dst[0] = uint8(length-1)<<2 | tagCopy2
return 3
}
// Emit the remaining copy, encoded as 2 bytes.
dst[1] = uint8(offset)
dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
return 2
}
// emitLiteral writes a literal chunk and returns the number of bytes written.
//
// It assumes that:
//
// dst is long enough to hold the encoded bytes
// 0 <= len(lit) && len(lit) <= math.MaxUint32
func emitLiteralGo(dst, lit []byte) int {
if len(lit) == 0 {
return 0
}
i, n := 0, uint(len(lit)-1)
switch {
case n < 60:
dst[0] = uint8(n)<<2 | tagLiteral
i = 1
case n < 1<<8:
dst[1] = uint8(n)
dst[0] = 60<<2 | tagLiteral
i = 2
case n < 1<<16:
dst[2] = uint8(n >> 8)
dst[1] = uint8(n)
dst[0] = 61<<2 | tagLiteral
i = 3
case n < 1<<24:
dst[3] = uint8(n >> 16)
dst[2] = uint8(n >> 8)
dst[1] = uint8(n)
dst[0] = 62<<2 | tagLiteral
i = 4
default:
dst[4] = uint8(n >> 24)
dst[3] = uint8(n >> 16)
dst[2] = uint8(n >> 8)
dst[1] = uint8(n)
dst[0] = 63<<2 | tagLiteral
i = 5
}
return i + copy(dst[i:], lit)
}

View file

@ -32,14 +32,38 @@ func (d *dict) ID() uint32 {
return d.id
}
// DictContentSize returns the dictionary content size or 0 if d is nil.
func (d *dict) DictContentSize() int {
// ContentSize returns the dictionary content size or 0 if d is nil.
func (d *dict) ContentSize() int {
if d == nil {
return 0
}
return len(d.content)
}
// Content returns the dictionary content.
func (d *dict) Content() []byte {
if d == nil {
return nil
}
return d.content
}
// Offsets returns the initial offsets.
func (d *dict) Offsets() [3]int {
if d == nil {
return [3]int{}
}
return d.offsets
}
// LitEncoder returns the literal encoder.
func (d *dict) LitEncoder() *huff0.Scratch {
if d == nil {
return nil
}
return d.litEnc
}
// Load a dictionary as described in
// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
func loadDict(b []byte) (*dict, error) {
@ -64,7 +88,7 @@ func loadDict(b []byte) (*dict, error) {
var err error
d.litEnc, b, err = huff0.ReadTable(b[8:], nil)
if err != nil {
return nil, err
return nil, fmt.Errorf("loading literal table: %w", err)
}
d.litEnc.Reuse = huff0.ReusePolicyMust
@ -122,3 +146,16 @@ func loadDict(b []byte) (*dict, error) {
return &d, nil
}
// InspectDictionary loads a zstd dictionary and provides functions to inspect the content.
func InspectDictionary(b []byte) (interface {
ID() uint32
ContentSize() int
Content() []byte
Offsets() [3]int
LitEncoder() *huff0.Scratch
}, error) {
initPredefined()
d, err := loadDict(b)
return d, err
}

View file

@ -149,7 +149,7 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) {
if singleBlock {
e.lowMem = true
}
e.ensureHist(d.DictContentSize() + maxCompressedBlockSize)
e.ensureHist(d.ContentSize() + maxCompressedBlockSize)
e.lowMem = low
}

View file

@ -31,6 +31,7 @@ import (
"time"
"github.com/mwitkow/go-conntrack"
"golang.org/x/net/http/httpproxy"
"golang.org/x/net/http2"
"golang.org/x/oauth2"
"golang.org/x/oauth2/clientcredentials"
@ -80,12 +81,9 @@ func (tv *TLSVersion) UnmarshalYAML(unmarshal func(interface{}) error) error {
return fmt.Errorf("unknown TLS version: %s", s)
}
func (tv *TLSVersion) MarshalYAML() (interface{}, error) {
if tv == nil || *tv == 0 {
return []byte("null"), nil
}
func (tv TLSVersion) MarshalYAML() (interface{}, error) {
for s, v := range TLSVersions {
if *tv == v {
if tv == v {
return s, nil
}
}
@ -106,13 +104,10 @@ func (tv *TLSVersion) UnmarshalJSON(data []byte) error {
}
// MarshalJSON implements the json.Marshaler interface for TLSVersion.
func (tv *TLSVersion) MarshalJSON() ([]byte, error) {
if tv == nil || *tv == 0 {
return []byte("null"), nil
}
func (tv TLSVersion) MarshalJSON() ([]byte, error) {
for s, v := range TLSVersions {
if *tv == v {
return []byte(s), nil
if tv == v {
return json.Marshal(s)
}
}
return nil, fmt.Errorf("unknown TLS version: %d", tv)
@ -233,11 +228,26 @@ type OAuth2 struct {
Scopes []string `yaml:"scopes,omitempty" json:"scopes,omitempty"`
TokenURL string `yaml:"token_url" json:"token_url"`
EndpointParams map[string]string `yaml:"endpoint_params,omitempty" json:"endpoint_params,omitempty"`
TLSConfig TLSConfig `yaml:"tls_config,omitempty"`
ProxyConfig `yaml:",inline"`
}
// HTTP proxy server to use to connect to the targets.
ProxyURL URL `yaml:"proxy_url,omitempty" json:"proxy_url,omitempty"`
// TLSConfig is used to connect to the token URL.
TLSConfig TLSConfig `yaml:"tls_config,omitempty"`
// UnmarshalYAML implements the yaml.Unmarshaler interface
func (o *OAuth2) UnmarshalYAML(unmarshal func(interface{}) error) error {
type plain OAuth2
if err := unmarshal((*plain)(o)); err != nil {
return err
}
return o.ProxyConfig.Validate()
}
// UnmarshalJSON implements the json.Marshaler interface for URL.
func (o *OAuth2) UnmarshalJSON(data []byte) error {
type plain OAuth2
if err := json.Unmarshal(data, (*plain)(o)); err != nil {
return err
}
return o.ProxyConfig.Validate()
}
// SetDirectory joins any relative file paths with dir.
@ -287,13 +297,6 @@ type HTTPClientConfig struct {
// The bearer token file for the targets. Deprecated in favour of
// Authorization.CredentialsFile.
BearerTokenFile string `yaml:"bearer_token_file,omitempty" json:"bearer_token_file,omitempty"`
// HTTP proxy server to use to connect to the targets.
ProxyURL URL `yaml:"proxy_url,omitempty" json:"proxy_url,omitempty"`
// ProxyConnectHeader optionally specifies headers to send to
// proxies during CONNECT requests. Assume that at least _some_ of
// these headers are going to contain secrets and use Secret as the
// value type instead of string.
ProxyConnectHeader Header `yaml:"proxy_connect_header,omitempty" json:"proxy_connect_header,omitempty"`
// TLSConfig to use to connect to the targets.
TLSConfig TLSConfig `yaml:"tls_config,omitempty" json:"tls_config,omitempty"`
// FollowRedirects specifies whether the client should follow HTTP 3xx redirects.
@ -304,6 +307,8 @@ type HTTPClientConfig struct {
// The omitempty flag is not set, because it would be hidden from the
// marshalled configuration when set to false.
EnableHTTP2 bool `yaml:"enable_http2" json:"enable_http2"`
// Proxy configuration.
ProxyConfig `yaml:",inline"`
}
// SetDirectory joins any relative file paths with dir.
@ -378,8 +383,8 @@ func (c *HTTPClientConfig) Validate() error {
return fmt.Errorf("at most one of oauth2 client_secret & client_secret_file must be configured")
}
}
if len(c.ProxyConnectHeader) > 0 && (c.ProxyURL.URL == nil || c.ProxyURL.String() == "") {
return fmt.Errorf("if proxy_connect_header is configured proxy_url must also be configured")
if err := c.ProxyConfig.Validate(); err != nil {
return err
}
return nil
}
@ -508,8 +513,8 @@ func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HT
// The only timeout we care about is the configured scrape timeout.
// It is applied on request. So we leave out any timings here.
var rt http.RoundTripper = &http.Transport{
Proxy: http.ProxyURL(cfg.ProxyURL.URL),
ProxyConnectHeader: cfg.ProxyConnectHeader.HTTPHeader(),
Proxy: cfg.ProxyConfig.Proxy(),
ProxyConnectHeader: cfg.ProxyConfig.GetProxyConnectHeader(),
MaxIdleConns: 20000,
MaxIdleConnsPerHost: 1000, // see https://github.com/golang/go/issues/13801
DisableKeepAlives: !opts.keepAlivesEnabled,
@ -730,7 +735,8 @@ func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, erro
tlsTransport := func(tlsConfig *tls.Config) (http.RoundTripper, error) {
return &http.Transport{
TLSClientConfig: tlsConfig,
Proxy: http.ProxyURL(rt.config.ProxyURL.URL),
Proxy: rt.config.ProxyConfig.Proxy(),
ProxyConnectHeader: rt.config.ProxyConfig.GetProxyConnectHeader(),
DisableKeepAlives: !rt.opts.keepAlivesEnabled,
MaxIdleConns: 20,
MaxIdleConnsPerHost: 1, // see https://github.com/golang/go/issues/13801
@ -1078,3 +1084,78 @@ func (c HTTPClientConfig) String() string {
}
return string(b)
}
type ProxyConfig struct {
// HTTP proxy server to use to connect to the targets.
ProxyURL URL `yaml:"proxy_url,omitempty" json:"proxy_url,omitempty"`
// NoProxy contains addresses that should not use a proxy.
NoProxy string `yaml:"no_proxy,omitempty" json:"no_proxy,omitempty"`
// ProxyFromEnvironment makes use of net/http ProxyFromEnvironment function
// to determine proxies.
ProxyFromEnvironment bool `yaml:"proxy_from_environment,omitempty" json:"proxy_from_environment,omitempty"`
// ProxyConnectHeader optionally specifies headers to send to
// proxies during CONNECT requests. Assume that at least _some_ of
// these headers are going to contain secrets and use Secret as the
// value type instead of string.
ProxyConnectHeader Header `yaml:"proxy_connect_header,omitempty" json:"proxy_connect_header,omitempty"`
proxyFunc func(*http.Request) (*url.URL, error)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *ProxyConfig) Validate() error {
if len(c.ProxyConnectHeader) > 0 && (!c.ProxyFromEnvironment && (c.ProxyURL.URL == nil || c.ProxyURL.String() == "")) {
return fmt.Errorf("if proxy_connect_header is configured, proxy_url or proxy_from_environment must also be configured")
}
if c.ProxyFromEnvironment && c.ProxyURL.URL != nil && c.ProxyURL.String() != "" {
return fmt.Errorf("if proxy_from_environment is configured, proxy_url must not be configured")
}
if c.ProxyFromEnvironment && c.NoProxy != "" {
return fmt.Errorf("if proxy_from_environment is configured, no_proxy must not be configured")
}
if c.ProxyURL.URL == nil && c.NoProxy != "" {
return fmt.Errorf("if no_proxy is configured, proxy_url must also be configured")
}
return nil
}
// Proxy returns the Proxy URL for a request.
func (c *ProxyConfig) Proxy() (fn func(*http.Request) (*url.URL, error)) {
if c == nil {
return nil
}
defer func() {
fn = c.proxyFunc
}()
if c.proxyFunc != nil {
return
}
if c.ProxyFromEnvironment {
proxyFn := httpproxy.FromEnvironment().ProxyFunc()
c.proxyFunc = func(req *http.Request) (*url.URL, error) {
return proxyFn(req.URL)
}
return
}
if c.ProxyURL.URL != nil && c.ProxyURL.URL.String() != "" {
if c.NoProxy == "" {
c.proxyFunc = http.ProxyURL(c.ProxyURL.URL)
return
}
proxy := &httpproxy.Config{
HTTPProxy: c.ProxyURL.String(),
HTTPSProxy: c.ProxyURL.String(),
NoProxy: c.NoProxy,
}
proxyFn := proxy.ProxyFunc()
c.proxyFunc = func(req *http.Request) (*url.URL, error) {
return proxyFn(req.URL)
}
}
return
}
// ProxyConnectHeader() return the Proxy Connext Headers.
func (c *ProxyConfig) GetProxyConnectHeader() http.Header {
return c.ProxyConnectHeader.HTTPHeader()
}

View file

@ -115,32 +115,28 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
// textDecoder implements the Decoder interface for the text protocol.
type textDecoder struct {
r io.Reader
p TextParser
fams []*dto.MetricFamily
fams map[string]*dto.MetricFamily
err error
}
// Decode implements the Decoder interface.
func (d *textDecoder) Decode(v *dto.MetricFamily) error {
// TODO(fabxc): Wrap this as a line reader to make streaming safer.
if len(d.fams) == 0 {
// No cached metric families, read everything and parse metrics.
fams, err := d.p.TextToMetricFamilies(d.r)
if err != nil {
return err
}
if len(fams) == 0 {
return io.EOF
}
d.fams = make([]*dto.MetricFamily, 0, len(fams))
for _, f := range fams {
d.fams = append(d.fams, f)
if d.err == nil {
// Read all metrics in one shot.
var p TextParser
d.fams, d.err = p.TextToMetricFamilies(d.r)
// If we don't get an error, store io.EOF for the end.
if d.err == nil {
d.err = io.EOF
}
}
*v = *d.fams[0]
d.fams = d.fams[1:]
return nil
// Pick off one MetricFamily per Decode until there's nothing left.
for key, fam := range d.fams {
*v = *fam
delete(d.fams, key)
return nil
}
return d.err
}
// SampleDecoder wraps a Decoder to extract samples from the metric families

View file

@ -142,9 +142,13 @@ func (p *TextParser) reset(in io.Reader) {
func (p *TextParser) startOfLine() stateFn {
p.lineCount++
if p.skipBlankTab(); p.err != nil {
// End of input reached. This is the only case where
// that is not an error but a signal that we are done.
p.err = nil
// This is the only place that we expect to see io.EOF,
// which is not an error but the signal that we are done.
// Any other error that happens to align with the start of
// a line is still an error.
if p.err == io.EOF {
p.err = nil
}
return nil
}
switch p.currentByte {

View file

@ -18,7 +18,6 @@ import (
"errors"
"fmt"
"math"
"regexp"
"strconv"
"strings"
"time"
@ -183,54 +182,78 @@ func (d *Duration) Type() string {
return "duration"
}
var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$")
func isdigit(c byte) bool { return c >= '0' && c <= '9' }
// Units are required to go in order from biggest to smallest.
// This guards against confusion from "1m1d" being 1 minute + 1 day, not 1 month + 1 day.
var unitMap = map[string]struct {
pos int
mult uint64
}{
"ms": {7, uint64(time.Millisecond)},
"s": {6, uint64(time.Second)},
"m": {5, uint64(time.Minute)},
"h": {4, uint64(time.Hour)},
"d": {3, uint64(24 * time.Hour)},
"w": {2, uint64(7 * 24 * time.Hour)},
"y": {1, uint64(365 * 24 * time.Hour)},
}
// ParseDuration parses a string into a time.Duration, assuming that a year
// always has 365d, a week always has 7d, and a day always has 24h.
func ParseDuration(durationStr string) (Duration, error) {
switch durationStr {
func ParseDuration(s string) (Duration, error) {
switch s {
case "0":
// Allow 0 without a unit.
return 0, nil
case "":
return 0, errors.New("empty duration string")
}
matches := durationRE.FindStringSubmatch(durationStr)
if matches == nil {
return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
}
var dur time.Duration
// Parse the match at pos `pos` in the regex and use `mult` to turn that
// into ms, then add that value to the total parsed duration.
var overflowErr error
m := func(pos int, mult time.Duration) {
if matches[pos] == "" {
return
orig := s
var dur uint64
lastUnitPos := 0
for s != "" {
if !isdigit(s[0]) {
return 0, fmt.Errorf("not a valid duration string: %q", orig)
}
n, _ := strconv.Atoi(matches[pos])
// Consume [0-9]*
i := 0
for ; i < len(s) && isdigit(s[i]); i++ {
}
v, err := strconv.ParseUint(s[:i], 10, 0)
if err != nil {
return 0, fmt.Errorf("not a valid duration string: %q", orig)
}
s = s[i:]
// Consume unit.
for i = 0; i < len(s) && !isdigit(s[i]); i++ {
}
if i == 0 {
return 0, fmt.Errorf("not a valid duration string: %q", orig)
}
u := s[:i]
s = s[i:]
unit, ok := unitMap[u]
if !ok {
return 0, fmt.Errorf("unknown unit %q in duration %q", u, orig)
}
if unit.pos <= lastUnitPos { // Units must go in order from biggest to smallest.
return 0, fmt.Errorf("not a valid duration string: %q", orig)
}
lastUnitPos = unit.pos
// Check if the provided duration overflows time.Duration (> ~ 290years).
if n > int((1<<63-1)/mult/time.Millisecond) {
overflowErr = errors.New("duration out of range")
if v > 1<<63/unit.mult {
return 0, errors.New("duration out of range")
}
d := time.Duration(n) * time.Millisecond
dur += d * mult
if dur < 0 {
overflowErr = errors.New("duration out of range")
dur += v * unit.mult
if dur > 1<<63-1 {
return 0, errors.New("duration out of range")
}
}
m(2, 1000*60*60*24*365) // y
m(4, 1000*60*60*24*7) // w
m(6, 1000*60*60*24) // d
m(8, 1000*60*60) // h
m(10, 1000*60) // m
m(12, 1000) // s
m(14, 1) // ms
return Duration(dur), overflowErr
return Duration(dur), nil
}
func (d Duration) String() string {

View file

@ -16,20 +16,12 @@ package model
import (
"encoding/json"
"fmt"
"math"
"sort"
"strconv"
"strings"
)
var (
// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
// non-existing sample pair. It is a SamplePair with timestamp Earliest and
// value 0.0. Note that the natural zero value of SamplePair has a timestamp
// of 0, which is possible to appear in a real SamplePair and thus not
// suitable to signal a non-existing SamplePair.
ZeroSamplePair = SamplePair{Timestamp: Earliest}
// ZeroSample is the pseudo zero-value of Sample used to signal a
// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
// and metric nil. Note that the natural zero value of Sample has a timestamp
@ -38,82 +30,14 @@ var (
ZeroSample = Sample{Timestamp: Earliest}
)
// A SampleValue is a representation of a value for a given sample at a given
// time.
type SampleValue float64
// MarshalJSON implements json.Marshaler.
func (v SampleValue) MarshalJSON() ([]byte, error) {
return json.Marshal(v.String())
}
// UnmarshalJSON implements json.Unmarshaler.
func (v *SampleValue) UnmarshalJSON(b []byte) error {
if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
return fmt.Errorf("sample value must be a quoted string")
}
f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
if err != nil {
return err
}
*v = SampleValue(f)
return nil
}
// Equal returns true if the value of v and o is equal or if both are NaN. Note
// that v==o is false if both are NaN. If you want the conventional float
// behavior, use == to compare two SampleValues.
func (v SampleValue) Equal(o SampleValue) bool {
if v == o {
return true
}
return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
}
func (v SampleValue) String() string {
return strconv.FormatFloat(float64(v), 'f', -1, 64)
}
// SamplePair pairs a SampleValue with a Timestamp.
type SamplePair struct {
Timestamp Time
Value SampleValue
}
// MarshalJSON implements json.Marshaler.
func (s SamplePair) MarshalJSON() ([]byte, error) {
t, err := json.Marshal(s.Timestamp)
if err != nil {
return nil, err
}
v, err := json.Marshal(s.Value)
if err != nil {
return nil, err
}
return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
}
// UnmarshalJSON implements json.Unmarshaler.
func (s *SamplePair) UnmarshalJSON(b []byte) error {
v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
return json.Unmarshal(b, &v)
}
// Equal returns true if this SamplePair and o have equal Values and equal
// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
func (s *SamplePair) Equal(o *SamplePair) bool {
return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
}
func (s SamplePair) String() string {
return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
}
// Sample is a sample pair associated with a metric.
// Sample is a sample pair associated with a metric. A single sample must either
// define Value or Histogram but not both. Histogram == nil implies the Value
// field is used, otherwise it should be ignored.
type Sample struct {
Metric Metric `json:"metric"`
Value SampleValue `json:"value"`
Timestamp Time `json:"timestamp"`
Metric Metric `json:"metric"`
Value SampleValue `json:"value"`
Timestamp Time `json:"timestamp"`
Histogram *SampleHistogram `json:"histogram"`
}
// Equal compares first the metrics, then the timestamp, then the value. The
@ -129,11 +53,19 @@ func (s *Sample) Equal(o *Sample) bool {
if !s.Timestamp.Equal(o.Timestamp) {
return false
}
if s.Histogram != nil {
return s.Histogram.Equal(o.Histogram)
}
return s.Value.Equal(o.Value)
}
func (s Sample) String() string {
if s.Histogram != nil {
return fmt.Sprintf("%s => %s", s.Metric, SampleHistogramPair{
Timestamp: s.Timestamp,
Histogram: s.Histogram,
})
}
return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
Timestamp: s.Timestamp,
Value: s.Value,
@ -142,6 +74,19 @@ func (s Sample) String() string {
// MarshalJSON implements json.Marshaler.
func (s Sample) MarshalJSON() ([]byte, error) {
if s.Histogram != nil {
v := struct {
Metric Metric `json:"metric"`
Histogram SampleHistogramPair `json:"histogram"`
}{
Metric: s.Metric,
Histogram: SampleHistogramPair{
Timestamp: s.Timestamp,
Histogram: s.Histogram,
},
}
return json.Marshal(&v)
}
v := struct {
Metric Metric `json:"metric"`
Value SamplePair `json:"value"`
@ -152,21 +97,25 @@ func (s Sample) MarshalJSON() ([]byte, error) {
Value: s.Value,
},
}
return json.Marshal(&v)
}
// UnmarshalJSON implements json.Unmarshaler.
func (s *Sample) UnmarshalJSON(b []byte) error {
v := struct {
Metric Metric `json:"metric"`
Value SamplePair `json:"value"`
Metric Metric `json:"metric"`
Value SamplePair `json:"value"`
Histogram SampleHistogramPair `json:"histogram"`
}{
Metric: s.Metric,
Value: SamplePair{
Timestamp: s.Timestamp,
Value: s.Value,
},
Histogram: SampleHistogramPair{
Timestamp: s.Timestamp,
Histogram: s.Histogram,
},
}
if err := json.Unmarshal(b, &v); err != nil {
@ -174,8 +123,13 @@ func (s *Sample) UnmarshalJSON(b []byte) error {
}
s.Metric = v.Metric
s.Timestamp = v.Value.Timestamp
s.Value = v.Value.Value
if v.Histogram.Histogram != nil {
s.Timestamp = v.Histogram.Timestamp
s.Histogram = v.Histogram.Histogram
} else {
s.Timestamp = v.Value.Timestamp
s.Value = v.Value.Value
}
return nil
}
@ -221,80 +175,76 @@ func (s Samples) Equal(o Samples) bool {
// SampleStream is a stream of Values belonging to an attached COWMetric.
type SampleStream struct {
Metric Metric `json:"metric"`
Values []SamplePair `json:"values"`
Metric Metric `json:"metric"`
Values []SamplePair `json:"values"`
Histograms []SampleHistogramPair `json:"histograms"`
}
func (ss SampleStream) String() string {
vals := make([]string, len(ss.Values))
valuesLength := len(ss.Values)
vals := make([]string, valuesLength+len(ss.Histograms))
for i, v := range ss.Values {
vals[i] = v.String()
}
for i, v := range ss.Histograms {
vals[i+valuesLength] = v.String()
}
return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
}
// Value is a generic interface for values resulting from a query evaluation.
type Value interface {
Type() ValueType
String() string
func (ss SampleStream) MarshalJSON() ([]byte, error) {
if len(ss.Histograms) > 0 && len(ss.Values) > 0 {
v := struct {
Metric Metric `json:"metric"`
Values []SamplePair `json:"values"`
Histograms []SampleHistogramPair `json:"histograms"`
}{
Metric: ss.Metric,
Values: ss.Values,
Histograms: ss.Histograms,
}
return json.Marshal(&v)
} else if len(ss.Histograms) > 0 {
v := struct {
Metric Metric `json:"metric"`
Histograms []SampleHistogramPair `json:"histograms"`
}{
Metric: ss.Metric,
Histograms: ss.Histograms,
}
return json.Marshal(&v)
} else {
v := struct {
Metric Metric `json:"metric"`
Values []SamplePair `json:"values"`
}{
Metric: ss.Metric,
Values: ss.Values,
}
return json.Marshal(&v)
}
}
func (Matrix) Type() ValueType { return ValMatrix }
func (Vector) Type() ValueType { return ValVector }
func (*Scalar) Type() ValueType { return ValScalar }
func (*String) Type() ValueType { return ValString }
func (ss *SampleStream) UnmarshalJSON(b []byte) error {
v := struct {
Metric Metric `json:"metric"`
Values []SamplePair `json:"values"`
Histograms []SampleHistogramPair `json:"histograms"`
}{
Metric: ss.Metric,
Values: ss.Values,
Histograms: ss.Histograms,
}
type ValueType int
const (
ValNone ValueType = iota
ValScalar
ValVector
ValMatrix
ValString
)
// MarshalJSON implements json.Marshaler.
func (et ValueType) MarshalJSON() ([]byte, error) {
return json.Marshal(et.String())
}
func (et *ValueType) UnmarshalJSON(b []byte) error {
var s string
if err := json.Unmarshal(b, &s); err != nil {
if err := json.Unmarshal(b, &v); err != nil {
return err
}
switch s {
case "<ValNone>":
*et = ValNone
case "scalar":
*et = ValScalar
case "vector":
*et = ValVector
case "matrix":
*et = ValMatrix
case "string":
*et = ValString
default:
return fmt.Errorf("unknown value type %q", s)
}
return nil
}
func (e ValueType) String() string {
switch e {
case ValNone:
return "<ValNone>"
case ValScalar:
return "scalar"
case ValVector:
return "vector"
case ValMatrix:
return "matrix"
case ValString:
return "string"
}
panic("ValueType.String: unhandled value type")
ss.Metric = v.Metric
ss.Values = v.Values
ss.Histograms = v.Histograms
return nil
}
// Scalar is a scalar value evaluated at the set timestamp.

View file

@ -0,0 +1,100 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"encoding/json"
"fmt"
"math"
"strconv"
)
var (
// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
// non-existing sample pair. It is a SamplePair with timestamp Earliest and
// value 0.0. Note that the natural zero value of SamplePair has a timestamp
// of 0, which is possible to appear in a real SamplePair and thus not
// suitable to signal a non-existing SamplePair.
ZeroSamplePair = SamplePair{Timestamp: Earliest}
)
// A SampleValue is a representation of a value for a given sample at a given
// time.
type SampleValue float64
// MarshalJSON implements json.Marshaler.
func (v SampleValue) MarshalJSON() ([]byte, error) {
return json.Marshal(v.String())
}
// UnmarshalJSON implements json.Unmarshaler.
func (v *SampleValue) UnmarshalJSON(b []byte) error {
if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
return fmt.Errorf("sample value must be a quoted string")
}
f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
if err != nil {
return err
}
*v = SampleValue(f)
return nil
}
// Equal returns true if the value of v and o is equal or if both are NaN. Note
// that v==o is false if both are NaN. If you want the conventional float
// behavior, use == to compare two SampleValues.
func (v SampleValue) Equal(o SampleValue) bool {
if v == o {
return true
}
return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
}
func (v SampleValue) String() string {
return strconv.FormatFloat(float64(v), 'f', -1, 64)
}
// SamplePair pairs a SampleValue with a Timestamp.
type SamplePair struct {
Timestamp Time
Value SampleValue
}
func (s SamplePair) MarshalJSON() ([]byte, error) {
t, err := json.Marshal(s.Timestamp)
if err != nil {
return nil, err
}
v, err := json.Marshal(s.Value)
if err != nil {
return nil, err
}
return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
}
// UnmarshalJSON implements json.Unmarshaler.
func (s *SamplePair) UnmarshalJSON(b []byte) error {
v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
return json.Unmarshal(b, &v)
}
// Equal returns true if this SamplePair and o have equal Values and equal
// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
func (s *SamplePair) Equal(o *SamplePair) bool {
return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
}
func (s SamplePair) String() string {
return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
}

View file

@ -0,0 +1,178 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"encoding/json"
"fmt"
"strconv"
"strings"
)
type FloatString float64
func (v FloatString) String() string {
return strconv.FormatFloat(float64(v), 'f', -1, 64)
}
func (v FloatString) MarshalJSON() ([]byte, error) {
return json.Marshal(v.String())
}
func (v *FloatString) UnmarshalJSON(b []byte) error {
if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
return fmt.Errorf("float value must be a quoted string")
}
f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
if err != nil {
return err
}
*v = FloatString(f)
return nil
}
type HistogramBucket struct {
Boundaries int32
Lower FloatString
Upper FloatString
Count FloatString
}
func (s HistogramBucket) MarshalJSON() ([]byte, error) {
b, err := json.Marshal(s.Boundaries)
if err != nil {
return nil, err
}
l, err := json.Marshal(s.Lower)
if err != nil {
return nil, err
}
u, err := json.Marshal(s.Upper)
if err != nil {
return nil, err
}
c, err := json.Marshal(s.Count)
if err != nil {
return nil, err
}
return []byte(fmt.Sprintf("[%s,%s,%s,%s]", b, l, u, c)), nil
}
func (s *HistogramBucket) UnmarshalJSON(buf []byte) error {
tmp := []interface{}{&s.Boundaries, &s.Lower, &s.Upper, &s.Count}
wantLen := len(tmp)
if err := json.Unmarshal(buf, &tmp); err != nil {
return err
}
if gotLen := len(tmp); gotLen != wantLen {
return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
}
return nil
}
func (s *HistogramBucket) Equal(o *HistogramBucket) bool {
return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count)
}
func (b HistogramBucket) String() string {
var sb strings.Builder
lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3
upperInclusive := b.Boundaries == 0 || b.Boundaries == 3
if lowerInclusive {
sb.WriteRune('[')
} else {
sb.WriteRune('(')
}
fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper)
if upperInclusive {
sb.WriteRune(']')
} else {
sb.WriteRune(')')
}
fmt.Fprintf(&sb, ":%v", b.Count)
return sb.String()
}
type HistogramBuckets []*HistogramBucket
func (s HistogramBuckets) Equal(o HistogramBuckets) bool {
if len(s) != len(o) {
return false
}
for i, bucket := range s {
if !bucket.Equal(o[i]) {
return false
}
}
return true
}
type SampleHistogram struct {
Count FloatString `json:"count"`
Sum FloatString `json:"sum"`
Buckets HistogramBuckets `json:"buckets"`
}
func (s SampleHistogram) String() string {
return fmt.Sprintf("Count: %f, Sum: %f, Buckets: %v", s.Count, s.Sum, s.Buckets)
}
func (s *SampleHistogram) Equal(o *SampleHistogram) bool {
return s == o || (s.Count == o.Count && s.Sum == o.Sum && s.Buckets.Equal(o.Buckets))
}
type SampleHistogramPair struct {
Timestamp Time
// Histogram should never be nil, it's only stored as pointer for efficiency.
Histogram *SampleHistogram
}
func (s SampleHistogramPair) MarshalJSON() ([]byte, error) {
if s.Histogram == nil {
return nil, fmt.Errorf("histogram is nil")
}
t, err := json.Marshal(s.Timestamp)
if err != nil {
return nil, err
}
v, err := json.Marshal(s.Histogram)
if err != nil {
return nil, err
}
return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
}
func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error {
tmp := []interface{}{&s.Timestamp, &s.Histogram}
wantLen := len(tmp)
if err := json.Unmarshal(buf, &tmp); err != nil {
return err
}
if gotLen := len(tmp); gotLen != wantLen {
return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
}
if s.Histogram == nil {
return fmt.Errorf("histogram is null")
}
return nil
}
func (s SampleHistogramPair) String() string {
return fmt.Sprintf("%s @[%s]", s.Histogram, s.Timestamp)
}
func (s *SampleHistogramPair) Equal(o *SampleHistogramPair) bool {
return s == o || (s.Histogram.Equal(o.Histogram) && s.Timestamp.Equal(o.Timestamp))
}

View file

@ -0,0 +1,83 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"encoding/json"
"fmt"
)
// Value is a generic interface for values resulting from a query evaluation.
type Value interface {
Type() ValueType
String() string
}
func (Matrix) Type() ValueType { return ValMatrix }
func (Vector) Type() ValueType { return ValVector }
func (*Scalar) Type() ValueType { return ValScalar }
func (*String) Type() ValueType { return ValString }
type ValueType int
const (
ValNone ValueType = iota
ValScalar
ValVector
ValMatrix
ValString
)
// MarshalJSON implements json.Marshaler.
func (et ValueType) MarshalJSON() ([]byte, error) {
return json.Marshal(et.String())
}
func (et *ValueType) UnmarshalJSON(b []byte) error {
var s string
if err := json.Unmarshal(b, &s); err != nil {
return err
}
switch s {
case "<ValNone>":
*et = ValNone
case "scalar":
*et = ValScalar
case "vector":
*et = ValVector
case "matrix":
*et = ValMatrix
case "string":
*et = ValString
default:
return fmt.Errorf("unknown value type %q", s)
}
return nil
}
func (e ValueType) String() string {
switch e {
case ValNone:
return "<ValNone>"
case ValScalar:
return "scalar"
case ValVector:
return "vector"
case ValMatrix:
return "matrix"
case ValString:
return "string"
}
panic("ValueType.String: unhandled value type")
}

View file

@ -53,6 +53,7 @@ func NewCollector(program string) prometheus.Collector {
"goversion": GoVersion,
"goos": GoOS,
"goarch": GoArch,
"tags": getTags(),
},
},
func() float64 { return 1 },
@ -66,6 +67,7 @@ var versionInfoTmpl = `
build date: {{.buildDate}}
go version: {{.goVersion}}
platform: {{.platform}}
tags: {{.tags}}
`
// Print returns version information.
@ -79,6 +81,7 @@ func Print(program string) string {
"buildDate": BuildDate,
"goVersion": GoVersion,
"platform": GoOS + "/" + GoArch,
"tags": getTags(),
}
t := template.Must(template.New("version").Parse(versionInfoTmpl))
@ -96,5 +99,5 @@ func Info() string {
// BuildContext returns goVersion, platform, buildUser and buildDate information.
func BuildContext() string {
return fmt.Sprintf("(go=%s, platform=%s, user=%s, date=%s)", GoVersion, GoOS+"/"+GoArch, BuildUser, BuildDate)
return fmt.Sprintf("(go=%s, platform=%s, user=%s, date=%s, tags=%s)", GoVersion, GoOS+"/"+GoArch, BuildUser, BuildDate, getTags())
}

View file

@ -19,3 +19,7 @@ package version
func getRevision() string {
return Revision
}
func getTags() string {
return "unknown" // Not available prior to Go 1.18
}

View file

@ -19,6 +19,7 @@ package version
import "runtime/debug"
var computedRevision string
var computedTags string
func getRevision() string {
if Revision != "" {
@ -27,19 +28,24 @@ func getRevision() string {
return computedRevision
}
func init() {
computedRevision = computeRevision()
func getTags() string {
return computedTags
}
func computeRevision() string {
func init() {
computedRevision, computedTags = computeRevision()
}
func computeRevision() (string, string) {
var (
rev = "unknown"
tags = "unknown"
modified bool
)
buildInfo, ok := debug.ReadBuildInfo()
if !ok {
return rev
return rev, tags
}
for _, v := range buildInfo.Settings {
if v.Key == "vcs.revision" {
@ -50,9 +56,12 @@ func computeRevision() string {
modified = true
}
}
if v.Key == "-tags" {
tags = v.Value
}
}
if modified {
return rev + "-modified"
return rev + "-modified", tags
}
return rev
return rev, tags
}

View file

@ -187,8 +187,8 @@ func ReverseString(s string) string {
const shiftGraphemePropState = 4
// FirstGraphemeCluster returns the first grapheme cluster found in the given
// byte slice according to the rules of Unicode Standard Annex #29, Grapheme
// Cluster Boundaries. This function can be called continuously to extract all
// byte slice according to the rules of [Unicode Standard Annex #29, Grapheme
// Cluster Boundaries]. This function can be called continuously to extract all
// grapheme clusters from a byte slice, as illustrated in the example below.
//
// If you don't know the current state, for example when calling the function
@ -209,6 +209,8 @@ const shiftGraphemePropState = 4
// While slightly less convenient than using the Graphemes class, this function
// has much better performance and makes no allocations. It lends itself well to
// large byte slices.
//
// [Unicode Standard Annex #29, Grapheme Cluster Boundaries]: http://unicode.org/reports/tr29/#Grapheme_Cluster_Boundaries
func FirstGraphemeCluster(b []byte, state int) (cluster, rest []byte, width, newState int) {
// An empty byte slice returns nothing.
if len(b) == 0 {

View file

@ -48,7 +48,7 @@ var grTransitions = map[[2]int][3]int{
{grControlLF, prAny}: {grAny, grBoundary, 40},
// GB3.
{grCR, prLF}: {grAny, grNoBoundary, 30},
{grCR, prLF}: {grControlLF, grNoBoundary, 30},
// GB6.
{grAny, prL}: {grL, grBoundary, 9990},

View file

@ -4,7 +4,7 @@ import "unicode/utf8"
// FirstLineSegment returns the prefix of the given byte slice after which a
// decision to break the string over to the next line can or must be made,
// according to the rules of Unicode Standard Annex #14. This is used to
// according to the rules of [Unicode Standard Annex #14]. This is used to
// implement line breaking.
//
// Line breaking, also known as word wrapping, is the process of breaking a
@ -35,7 +35,7 @@ import "unicode/utf8"
//
// Given an empty byte slice "b", the function returns nil values.
//
// Note that in accordance with UAX #14 LB3, the final segment will end with
// Note that in accordance with [UAX #14 LB3], the final segment will end with
// "mustBreak" set to true. You can choose to ignore this by checking if the
// length of the "rest" slice is 0 and calling [HasTrailingLineBreak] or
// [HasTrailingLineBreakInString] on the last rune.
@ -43,6 +43,9 @@ import "unicode/utf8"
// Note also that this algorithm may break within grapheme clusters. This is
// addressed in Section 8.2 Example 6 of UAX #14. To avoid this, you can use
// the [Step] function instead.
//
// [Unicode Standard Annex #14]: https://www.unicode.org/reports/tr14/
// [UAX #14 LB3]: https://www.unicode.org/reports/tr14/#Algorithm
func FirstLineSegment(b []byte, state int) (segment, rest []byte, mustBreak bool, newState int) {
// An empty byte slice returns nothing.
if len(b) == 0 {

View file

@ -3,7 +3,7 @@ package uniseg
import "unicode/utf8"
// FirstSentence returns the first sentence found in the given byte slice
// according to the rules of Unicode Standard Annex #29, Sentence Boundaries.
// according to the rules of [Unicode Standard Annex #29, Sentence Boundaries].
// This function can be called continuously to extract all sentences from a byte
// slice, as illustrated in the example below.
//
@ -17,6 +17,8 @@ import "unicode/utf8"
// slice is the sub-slice of the input slice containing the identified sentence.
//
// Given an empty byte slice "b", the function returns nil values.
//
// [Unicode Standard Annex #29, Sentence Boundaries]: http://unicode.org/reports/tr29/#Sentence_Boundaries
func FirstSentence(b []byte, state int) (sentence, rest []byte, newState int) {
// An empty byte slice returns nothing.
if len(b) == 0 {

View file

@ -83,10 +83,12 @@ const (
// has much better performance and makes no allocations. It lends itself well to
// large byte slices.
//
// Note that in accordance with UAX #14 LB3, the final segment will end with
// Note that in accordance with [UAX #14 LB3], the final segment will end with
// a mandatory line break (boundaries&MaskLine == LineMustBreak). You can choose
// to ignore this by checking if the length of the "rest" slice is 0 and calling
// [HasTrailingLineBreak] or [HasTrailingLineBreakInString] on the last rune.
//
// [UAX #14 LB3]: https://www.unicode.org/reports/tr14/#Algorithm
func Step(b []byte, state int) (cluster, rest []byte, boundaries int, newState int) {
// An empty byte slice returns nothing.
if len(b) == 0 {

View file

@ -3,7 +3,7 @@ package uniseg
import "unicode/utf8"
// FirstWord returns the first word found in the given byte slice according to
// the rules of Unicode Standard Annex #29, Word Boundaries. This function can
// the rules of [Unicode Standard Annex #29, Word Boundaries]. This function can
// be called continuously to extract all words from a byte slice, as illustrated
// in the example below.
//
@ -17,6 +17,8 @@ import "unicode/utf8"
// the sub-slice of the input slice containing the identified word.
//
// Given an empty byte slice "b", the function returns nil values.
//
// [Unicode Standard Annex #29, Word Boundaries]: http://unicode.org/reports/tr29/#Word_Boundaries
func FirstWord(b []byte, state int) (word, rest []byte, newState int) {
// An empty byte slice returns nothing.
if len(b) == 0 {

View file

@ -8,7 +8,6 @@ import (
"fmt"
"math"
"os"
"path/filepath"
"reflect"
"regexp"
"runtime"
@ -141,12 +140,11 @@ func CallerInfo() []string {
}
parts := strings.Split(file, "/")
file = parts[len(parts)-1]
if len(parts) > 1 {
filename := parts[len(parts)-1]
dir := parts[len(parts)-2]
if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" {
path, _ := filepath.Abs(file)
callers = append(callers, fmt.Sprintf("%s:%d", path, line))
if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" {
callers = append(callers, fmt.Sprintf("%s:%d", file, line))
}
}
@ -530,7 +528,7 @@ func isNil(object interface{}) bool {
[]reflect.Kind{
reflect.Chan, reflect.Func,
reflect.Interface, reflect.Map,
reflect.Ptr, reflect.Slice},
reflect.Ptr, reflect.Slice, reflect.UnsafePointer},
kind)
if isNilableKind && value.IsNil() {
@ -818,49 +816,44 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
return true // we consider nil to be equal to the nil set
}
defer func() {
if e := recover(); e != nil {
ok = false
}
}()
listKind := reflect.TypeOf(list).Kind()
subsetKind := reflect.TypeOf(subset).Kind()
if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
}
subsetKind := reflect.TypeOf(subset).Kind()
if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
}
subsetValue := reflect.ValueOf(subset)
if subsetKind == reflect.Map && listKind == reflect.Map {
listValue := reflect.ValueOf(list)
subsetKeys := subsetValue.MapKeys()
subsetMap := reflect.ValueOf(subset)
actualMap := reflect.ValueOf(list)
for i := 0; i < len(subsetKeys); i++ {
subsetKey := subsetKeys[i]
subsetElement := subsetValue.MapIndex(subsetKey).Interface()
listElement := listValue.MapIndex(subsetKey).Interface()
for _, k := range subsetMap.MapKeys() {
ev := subsetMap.MapIndex(k)
av := actualMap.MapIndex(k)
if !ObjectsAreEqual(subsetElement, listElement) {
return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...)
if !av.IsValid() {
return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...)
}
if !ObjectsAreEqual(ev.Interface(), av.Interface()) {
return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...)
}
}
return true
}
for i := 0; i < subsetValue.Len(); i++ {
element := subsetValue.Index(i).Interface()
subsetList := reflect.ValueOf(subset)
for i := 0; i < subsetList.Len(); i++ {
element := subsetList.Index(i).Interface()
ok, found := containsElement(list, element)
if !ok {
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", list), msgAndArgs...)
}
if !found {
return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...)
return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, element), msgAndArgs...)
}
}
@ -879,34 +872,28 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...)
}
defer func() {
if e := recover(); e != nil {
ok = false
}
}()
listKind := reflect.TypeOf(list).Kind()
subsetKind := reflect.TypeOf(subset).Kind()
if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
}
subsetKind := reflect.TypeOf(subset).Kind()
if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
}
subsetValue := reflect.ValueOf(subset)
if subsetKind == reflect.Map && listKind == reflect.Map {
listValue := reflect.ValueOf(list)
subsetKeys := subsetValue.MapKeys()
subsetMap := reflect.ValueOf(subset)
actualMap := reflect.ValueOf(list)
for i := 0; i < len(subsetKeys); i++ {
subsetKey := subsetKeys[i]
subsetElement := subsetValue.MapIndex(subsetKey).Interface()
listElement := listValue.MapIndex(subsetKey).Interface()
for _, k := range subsetMap.MapKeys() {
ev := subsetMap.MapIndex(k)
av := actualMap.MapIndex(k)
if !ObjectsAreEqual(subsetElement, listElement) {
if !av.IsValid() {
return true
}
if !ObjectsAreEqual(ev.Interface(), av.Interface()) {
return true
}
}
@ -914,8 +901,9 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...)
}
for i := 0; i < subsetValue.Len(); i++ {
element := subsetValue.Index(i).Interface()
subsetList := reflect.ValueOf(subset)
for i := 0; i < subsetList.Len(); i++ {
element := subsetList.Index(i).Interface()
ok, found := containsElement(list, element)
if !ok {
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)

Some files were not shown because too many files have changed in this diff Show more