vendor: run make vendor-update

This commit is contained in:
Aliaksandr Valialkin 2023-10-02 21:49:16 +02:00
parent 55e9a9e3a0
commit 5eac0cdf42
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
158 changed files with 6474 additions and 6171 deletions

54
go.mod
View file

@ -3,7 +3,7 @@ module github.com/VictoriaMetrics/VictoriaMetrics
go 1.20
require (
cloud.google.com/go/storage v1.32.0
cloud.google.com/go/storage v1.33.0
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.2
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0
github.com/VictoriaMetrics/fastcache v1.12.1
@ -14,9 +14,9 @@ require (
github.com/VictoriaMetrics/metrics v1.24.0
github.com/VictoriaMetrics/metricsql v0.66.0
github.com/aws/aws-sdk-go-v2 v1.21.0
github.com/aws/aws-sdk-go-v2/config v1.18.39
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.83
github.com/aws/aws-sdk-go-v2/service/s3 v1.38.5
github.com/aws/aws-sdk-go-v2/config v1.18.43
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.88
github.com/aws/aws-sdk-go-v2/service/s3 v1.40.0
github.com/bmatcuk/doublestar/v4 v4.6.0
github.com/cespare/xxhash/v2 v2.2.0
github.com/cheggaaa/pb/v3 v3.1.4
@ -24,7 +24,7 @@ require (
github.com/golang/snappy v0.0.4
github.com/googleapis/gax-go/v2 v2.12.0
github.com/influxdata/influxdb v1.11.2
github.com/klauspost/compress v1.16.7
github.com/klauspost/compress v1.17.0
github.com/prometheus/prometheus v0.47.0
github.com/urfave/cli/v2 v2.25.7
github.com/valyala/fastjson v1.6.4
@ -36,12 +36,12 @@ require (
golang.org/x/net v0.15.0
golang.org/x/oauth2 v0.12.0
golang.org/x/sys v0.12.0
google.golang.org/api v0.138.0
google.golang.org/api v0.143.0
gopkg.in/yaml.v2 v2.4.0
)
require (
cloud.google.com/go v0.110.7 // indirect
cloud.google.com/go v0.110.8 // indirect
cloud.google.com/go/compute v1.23.0 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/iam v1.1.2 // indirect
@ -50,21 +50,21 @@ require (
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
github.com/aws/aws-sdk-go v1.45.4 // indirect
github.com/aws/aws-sdk-go v1.45.20 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.13.37 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.13.41 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.11 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.42 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.43 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.14 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.36 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.4 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.13.6 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.6 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.21.5 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.15.0 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.1 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.23.0 // indirect
github.com/aws/smithy-go v1.14.2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
@ -82,7 +82,7 @@ require (
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/s2a-go v0.1.7 // indirect
github.com/google/uuid v1.3.1 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.1 // indirect
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
@ -99,23 +99,23 @@ require (
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_golang v1.16.0 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/client_golang v1.17.0 // indirect
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect
github.com/prometheus/common v0.44.0 // indirect
github.com/prometheus/common/sigv4 v0.1.0 // indirect
github.com/prometheus/procfs v0.11.1 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/stretchr/testify v1.8.4 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 // indirect
go.opentelemetry.io/collector/semconv v0.84.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.43.0 // indirect
go.opentelemetry.io/otel v1.17.0 // indirect
go.opentelemetry.io/otel/metric v1.17.0 // indirect
go.opentelemetry.io/otel/trace v1.17.0 // indirect
go.opentelemetry.io/collector/pdata v1.0.0-rcv0015 // indirect
go.opentelemetry.io/collector/semconv v0.86.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect
go.opentelemetry.io/otel v1.19.0 // indirect
go.opentelemetry.io/otel/metric v1.19.0 // indirect
go.opentelemetry.io/otel/trace v1.19.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/goleak v1.2.1 // indirect
go.uber.org/multierr v1.11.0 // indirect
@ -126,10 +126,10 @@ require (
golang.org/x/time v0.3.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/grpc v1.58.0 // indirect
google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect
google.golang.org/grpc v1.58.2 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

108
go.sum
View file

@ -13,8 +13,8 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o=
cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI=
cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME=
cloud.google.com/go v0.110.8/go.mod h1:Iz8AkXJf1qmxC3Oxoep8R1T36w8B92yU29PcBhHO5fk=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
@ -38,8 +38,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.32.0 h1:5w6DxEGOnktmJHarxAOUywxVW9lbNWIzlzzUltG/3+o=
cloud.google.com/go/storage v1.32.0/go.mod h1:Hhh/dogNRGca7IWv1RC2YqEn0c0G77ctA/OxflYkiD8=
cloud.google.com/go/storage v1.33.0 h1:PVrDOkIC8qQVa1P3SXGpQvfuJhN2LHOoyZvWs8D2X5M=
cloud.google.com/go/storage v1.33.0/go.mod h1:Hhh/dogNRGca7IWv1RC2YqEn0c0G77ctA/OxflYkiD8=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.2 h1:t5+QXLCK9SVi0PPdaY0PrFvYUo24KwA0QwxnaHRSVd4=
@ -87,26 +87,26 @@ github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu
github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.45.4 h1:6B8oTYNEncxga8EV1C6Q4iJNnpDIqLEigy0v0oh2qYw=
github.com/aws/aws-sdk-go v1.45.4/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go v1.45.20 h1:U/wLZEwqVB6o2XlcJ7um8kczx+A1X2MgO2y4wdKDQTs=
github.com/aws/aws-sdk-go v1.45.20/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go-v2 v1.21.0 h1:gMT0IW+03wtYJhRqTVYn0wLzwdnK9sRMcxmtfGzRdJc=
github.com/aws/aws-sdk-go-v2 v1.21.0/go.mod h1:/RfNgGmRxI+iFOB1OeJUyxiU+9s88k3pfHvDagGEp0M=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13 h1:OPLEkmhXf6xFPiz0bLeDArZIDx1NNS4oJyG4nv3Gct0=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13/go.mod h1:gpAbvyDGQFozTEmlTFO8XcQKHzubdq0LzRyJpG6MiXM=
github.com/aws/aws-sdk-go-v2/config v1.18.39 h1:oPVyh6fuu/u4OiW4qcuQyEtk7U7uuNBmHmJSLg1AJsQ=
github.com/aws/aws-sdk-go-v2/config v1.18.39/go.mod h1:+NH/ZigdPckFpgB1TRcRuWCB/Kbbvkxc/iNAKTq5RhE=
github.com/aws/aws-sdk-go-v2/credentials v1.13.37 h1:BvEdm09+ZEh2XtN+PVHPcYwKY3wIeB6pw7vPRM4M9/U=
github.com/aws/aws-sdk-go-v2/credentials v1.13.37/go.mod h1:ACLrdkd4CLZyXOghZ8IYumQbcooAcp2jo/s2xsFH8IM=
github.com/aws/aws-sdk-go-v2/config v1.18.43 h1:IgdUtTRvUDC6eiJBqU6vh7bHFNAEBjQ8S+qJ7zVhDOs=
github.com/aws/aws-sdk-go-v2/config v1.18.43/go.mod h1:NiFev8qlgg8MPzw3fO/EwzMZeZwlJEKGwfpjRPA9Nvw=
github.com/aws/aws-sdk-go-v2/credentials v1.13.41 h1:dgbKq1tamtboYAKSXWbqL0lKO9rmEzEhbZFh9JQW/Bg=
github.com/aws/aws-sdk-go-v2/credentials v1.13.41/go.mod h1:cc3Fn7DkKbJalPtQnudHGZZ8ml9+hwtbc1CJONsYYqk=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.11 h1:uDZJF1hu0EVT/4bogChk8DyjSF6fof6uL/0Y26Ma7Fg=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.11/go.mod h1:TEPP4tENqBGO99KwVpV9MlOX4NSrSLP8u3KRy2CDwA8=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.83 h1:wcluDLIQ0uYaxv0fCWQRimbXkPdTgWHUD21j1CzXEwc=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.83/go.mod h1:nGCBuon134gW67yAtxHKV73x+tAcY/xG4ZPNPDB1h/I=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.88 h1:AxcMcV1uTY15jysvTiXC6Mgpb5nU1rnqH0PmgJ7ig80=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.88/go.mod h1:C6Kvpm4g92So11JEAHMK0trT6EEEe5g5uG5JrneR6zQ=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41 h1:22dGT7PneFMx4+b3pz7lMTRyN8ZKH7M2cW4GP9yUS2g=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41/go.mod h1:CrObHAuPneJBlfEJ5T3szXOUkLEThaGfvnhTf33buas=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35 h1:SijA0mgjV8E+8G45ltVHs0fvKpTj8xmZJ3VwhGKtUSI=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35/go.mod h1:SJC1nEVVva1g3pHAIdCp7QsRIkMmLAgoDquQ9Rr8kYw=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.42 h1:GPUcE/Yq7Ur8YSUk6lVkoIMWnJNO0HT18GUzCWCgCI0=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.42/go.mod h1:rzfdUlfA+jdgLDmPKjd3Chq9V7LVLYo1Nz++Wb91aRo=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.43 h1:g+qlObJH4Kn4n21g69DjspU0hKTjWtq7naZ9OLCv0ew=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.43/go.mod h1:rzfdUlfA+jdgLDmPKjd3Chq9V7LVLYo1Nz++Wb91aRo=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.4 h1:6lJvvkQ9HmbHZ4h/IEwclwv2mrTW8Uq1SOB/kXy0mfw=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.4/go.mod h1:1PrKYwxTM+zjpw9Y41KFtoJCQrJ34Z47Y4VgVbfndjo=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.14 h1:m0QTSI6pZYJTk5WSKx3fm5cNW/DCicVzULBgU/6IyD0=
@ -117,14 +117,14 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35 h1:CdzPW9kKi
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35/go.mod h1:QGF2Rs33W5MaN9gYdEQOBBFPLwTZkEhRwI33f7KIG0o=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.4 h1:v0jkRigbSD6uOdwcaUQmgEwG1BkPfAPDqaeNt/29ghg=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.4/go.mod h1:LhTyt8J04LL+9cIt7pYJ5lbS/U98ZmXovLOR/4LUsk8=
github.com/aws/aws-sdk-go-v2/service/s3 v1.38.5 h1:A42xdtStObqy7NGvzZKpnyNXvoOmm+FENobZ0/ssHWk=
github.com/aws/aws-sdk-go-v2/service/s3 v1.38.5/go.mod h1:rDGMZA7f4pbmTtPOk5v5UM2lmX6UAbRnMDJeDvnH7AM=
github.com/aws/aws-sdk-go-v2/service/sso v1.13.6 h1:2PylFCfKCEDv6PeSN09pC/VUiRd10wi1VfHG5FrW0/g=
github.com/aws/aws-sdk-go-v2/service/sso v1.13.6/go.mod h1:fIAwKQKBFu90pBxx07BFOMJLpRUGu8VOzLJakeY+0K4=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.6 h1:pSB560BbVj9ZlJZF4WYj5zsytWHWKxg+NgyGV4B2L58=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.6/go.mod h1:yygr8ACQRY2PrEcy3xsUI357stq2AxnFM6DIsR9lij4=
github.com/aws/aws-sdk-go-v2/service/sts v1.21.5 h1:CQBFElb0LS8RojMJlxRSo/HXipvTZW2S44Lt9Mk2aYQ=
github.com/aws/aws-sdk-go-v2/service/sts v1.21.5/go.mod h1:VC7JDqsqiwXukYEDjoHh9U0fOJtNWh04FPQz4ct4GGU=
github.com/aws/aws-sdk-go-v2/service/s3 v1.40.0 h1:wl5dxN1NONhTDQD9uaEvNsDRX29cBmGED/nl0jkWlt4=
github.com/aws/aws-sdk-go-v2/service/s3 v1.40.0/go.mod h1:rDGMZA7f4pbmTtPOk5v5UM2lmX6UAbRnMDJeDvnH7AM=
github.com/aws/aws-sdk-go-v2/service/sso v1.15.0 h1:vuGK1vHNP9zx0PfOrtPumbwR2af0ATQ1Z2H6p75AgRQ=
github.com/aws/aws-sdk-go-v2/service/sso v1.15.0/go.mod h1:fIAwKQKBFu90pBxx07BFOMJLpRUGu8VOzLJakeY+0K4=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.1 h1:8lKOidPkmSmfUtiTgtdXWgaKItCZ/g75/jEk6Ql6GsA=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.1/go.mod h1:yygr8ACQRY2PrEcy3xsUI357stq2AxnFM6DIsR9lij4=
github.com/aws/aws-sdk-go-v2/service/sts v1.23.0 h1:pyvfUqkNLMipdKNAtu7OVbRxUrR2BMaKccIPpk/Hkak=
github.com/aws/aws-sdk-go-v2/service/sts v1.23.0/go.mod h1:VC7JDqsqiwXukYEDjoHh9U0fOJtNWh04FPQz4ct4GGU=
github.com/aws/smithy-go v1.14.2 h1:MJU9hqBGbvWZdApzpvoF2WAIJDbtjK2NDJSiJP7HblQ=
github.com/aws/smithy-go v1.14.2/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@ -273,8 +273,8 @@ github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM=
github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w=
github.com/googleapis/enterprise-certificate-proxy v0.3.1 h1:SBWmZhjUDRorQxrN0nwzf+AHBxnbFjViHQS4P0yVpmQ=
github.com/googleapis/enterprise-certificate-proxy v0.3.1/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
@ -323,8 +323,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM=
github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -380,14 +380,14 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM=
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
@ -400,8 +400,8 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/prometheus/prometheus v0.47.0 h1:tIJJKZGlmrMVsvIt6rMfB8he7CRHEc8ZxS5ubcZtbkM=
github.com/prometheus/prometheus v0.47.0/go.mod h1:J/bmOSjgH7lFxz2gZhrWEZs2i64vMS+HIuZfmYNhJ/M=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
@ -463,18 +463,18 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 h1:iT5qH0NLmkGeIdDtnBogYDx7L58t6CaWGL378DEo2QY=
go.opentelemetry.io/collector/pdata v1.0.0-rcv0014/go.mod h1:BRvDrx43kiSoUx3mr7SoA7h9B8+OY99mUK+CZSQFWW4=
go.opentelemetry.io/collector/semconv v0.84.0 h1:sI1B8ebHhfJPd87iyba66TDnluVFvYu8CEpSjKHqIDc=
go.opentelemetry.io/collector/semconv v0.84.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.43.0 h1:HKORGpiOY0R0nAPtKx/ub8/7XoHhRooP8yNRkuPfelI=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.43.0/go.mod h1:e+y1M74SYXo/FcIx3UATwth2+5dDkM8dBi7eXg1tbw8=
go.opentelemetry.io/otel v1.17.0 h1:MW+phZ6WZ5/uk2nd93ANk/6yJ+dVrvNWUjGhnnFU5jM=
go.opentelemetry.io/otel v1.17.0/go.mod h1:I2vmBGtFaODIVMBSTPVDlJSzBDNf93k60E6Ft0nyjo0=
go.opentelemetry.io/otel/metric v1.17.0 h1:iG6LGVz5Gh+IuO0jmgvpTB6YVrCGngi8QGm+pMd8Pdc=
go.opentelemetry.io/otel/metric v1.17.0/go.mod h1:h4skoxdZI17AxwITdmdZjjYJQH5nzijUUjm+wtPph5o=
go.opentelemetry.io/otel/trace v1.17.0 h1:/SWhSRHmDPOImIAetP1QAeMnZYiQXrTy4fMMYOdSKWQ=
go.opentelemetry.io/otel/trace v1.17.0/go.mod h1:I/4vKTgFclIsXRVucpH25X0mpFSczM7aHeaz0ZBLWjY=
go.opentelemetry.io/collector/pdata v1.0.0-rcv0015 h1:8PzrQFk3oKiT1Sd5EmNEcagdMyt1KcBy5/OyF5He5gY=
go.opentelemetry.io/collector/pdata v1.0.0-rcv0015/go.mod h1:I1PqyHJlsXjANC73tp43nDId7/jiv82NoZZ6uS0xdwM=
go.opentelemetry.io/collector/semconv v0.86.0 h1:bLlPe/JYNjQHo744cqi7iIEybuLv+M5DntUwQPTrvZo=
go.opentelemetry.io/collector/semconv v0.86.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q=
go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs=
go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY=
go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE=
go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8=
go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg=
go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
@ -718,8 +718,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.138.0 h1:K/tVp05MxNVbHShRw9m7e9VJGdagNeTdMzqPH7AUqr0=
google.golang.org/api v0.138.0/go.mod h1:4xyob8CxC+0GChNBvEUAk8VBKNvYOTWM9T3v3UfRxuY=
google.golang.org/api v0.143.0 h1:o8cekTkqhywkbZT6p1UHJPZ9+9uuCAJs/KYomxZB8fA=
google.golang.org/api v0.143.0/go.mod h1:FoX9DO9hT7DLNn97OuoZAGSDuNAXdJRuGK98rSUgurk=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -757,12 +757,12 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY=
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4=
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q=
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0=
google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk=
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 h1:W18sezcAYs+3tDZX4F80yctqa12jcP1PUS2gQu1zTPU=
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@ -776,8 +776,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.58.0 h1:32JY8YpPMSR45K+c3o6b8VL73V+rR8k+DeMIr4vRH8o=
google.golang.org/grpc v1.58.0/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I=
google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View file

@ -349,6 +349,26 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/bigquery/biglake/apiv1": {
"api_shortname": "biglake",
"distribution_name": "cloud.google.com/go/bigquery/biglake/apiv1",
"description": "BigLake API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/biglake/apiv1",
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/bigquery/biglake/apiv1alpha1": {
"api_shortname": "biglake",
"distribution_name": "cloud.google.com/go/bigquery/biglake/apiv1alpha1",
"description": "BigLake API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/biglake/apiv1alpha1",
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/bigquery/connection/apiv1": {
"api_shortname": "bigqueryconnection",
"distribution_name": "cloud.google.com/go/bigquery/connection/apiv1",
@ -659,6 +679,16 @@
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/config/apiv1": {
"api_shortname": "config",
"distribution_name": "cloud.google.com/go/config/apiv1",
"description": "Infrastructure Manager API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/config/latest/apiv1",
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/contactcenterinsights/apiv1": {
"api_shortname": "contactcenterinsights",
"distribution_name": "cloud.google.com/go/contactcenterinsights/apiv1",
@ -1249,6 +1279,16 @@
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/language/apiv2": {
"api_shortname": "language",
"distribution_name": "cloud.google.com/go/language/apiv2",
"description": "Cloud Natural Language API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/language/latest/apiv2",
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/lifesciences/apiv2beta": {
"api_shortname": "lifesciences",
"distribution_name": "cloud.google.com/go/lifesciences/apiv2beta",
@ -1509,6 +1549,16 @@
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/notebooks/apiv2": {
"api_shortname": "notebooks",
"distribution_name": "cloud.google.com/go/notebooks/apiv2",
"description": "Notebooks API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/notebooks/latest/apiv2",
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/optimization/apiv1": {
"api_shortname": "cloudoptimization",
"distribution_name": "cloud.google.com/go/optimization/apiv1",
@ -1639,6 +1689,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/policytroubleshooter/iam/apiv3": {
"api_shortname": "policytroubleshooter",
"distribution_name": "cloud.google.com/go/policytroubleshooter/iam/apiv3",
"description": "Policy Troubleshooter API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/policytroubleshooter/latest/iam/apiv3",
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/privatecatalog/apiv1beta1": {
"api_shortname": "cloudprivatecatalog",
"distribution_name": "cloud.google.com/go/privatecatalog/apiv1beta1",

View file

@ -1,6 +1,13 @@
# Changes
## [1.33.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.32.0...storage/v1.33.0) (2023-09-07)
### Features
* **storage:** Export gRPC client constructor ([#8509](https://github.com/googleapis/google-cloud-go/issues/8509)) ([1a928ae](https://github.com/googleapis/google-cloud-go/commit/1a928ae205f2325cb5206304af4d609dc3c1447a))
## [1.32.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.31.0...storage/v1.32.0) (2023-08-15)

View file

@ -331,6 +331,33 @@ to add a [custom audit logging] header:
// Use client as usual with the context and the additional headers will be sent.
client.Bucket("my-bucket").Attrs(ctx)
# Experimental gRPC API
This package includes support for the Cloud Storage gRPC API, which is currently
in preview. This implementation uses gRPC rather than the current JSON & XML
APIs to make requests to Cloud Storage. If you would like to try the API,
please contact your GCP account rep for more information. The gRPC API is not
yet generally available, so it may be subject to breaking changes.
To create a client which will use gRPC, use the alternate constructor:
ctx := context.Background()
client, err := storage.NewGRPCClient(ctx)
if err != nil {
// TODO: Handle error.
}
// Use client as usual.
If the application is running within GCP, users may get better performance by
enabling DirectPath (enabling requests to skip some proxy steps). To enable,
set the environment variable `GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS=true` and add
the following side-effect imports to your application:
import (
_ "google.golang.org/grpc/balancer/rls"
_ "google.golang.org/grpc/xds/googledirectpath"
)
[Cloud Storage IAM docs]: https://cloud.google.com/storage/docs/access-control/iam
[XML POST Object docs]: https://cloud.google.com/storage/docs/xml-api/post-object
[Cloud Storage retry docs]: https://cloud.google.com/storage/docs/retry-strategy

View file

@ -45,8 +45,6 @@ import (
// httpStorageClient is the HTTP-JSON API implementation of the transport-agnostic
// storageClient interface.
//
// This is an experimental API and not intended for public use.
type httpStorageClient struct {
creds *google.Credentials
hc *http.Client
@ -59,8 +57,6 @@ type httpStorageClient struct {
// newHTTPStorageClient initializes a new storageClient that uses the HTTP-JSON
// Storage API.
//
// This is an experimental API and not intended for public use.
func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageClient, error) {
s := initSettings(opts...)
o := s.clientOption

View file

@ -15,4 +15,4 @@
package internal
// Version is the current tagged release of the library.
const Version = "1.32.0"
const Version = "1.33.0"

View file

@ -123,7 +123,7 @@ type Client struct {
useGRPC bool
}
// NewClient creates a new Google Cloud Storage client.
// NewClient creates a new Google Cloud Storage client using the HTTP transport.
// The default scope is ScopeFullControl. To use a different scope, like
// ScopeReadOnly, use option.WithScopes.
//
@ -133,12 +133,6 @@ type Client struct {
// You may configure the client by passing in options from the [google.golang.org/api/option]
// package. You may also use options defined in this package, such as [WithJSONReads].
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
// Use the experimental gRPC client if the env var is set.
// This is an experimental API and not intended for public use.
if withGRPC := os.Getenv("STORAGE_USE_GRPC"); withGRPC != "" {
return newGRPCClient(ctx, opts...)
}
var creds *google.Credentials
// In general, it is recommended to use raw.NewService instead of htransport.NewClient
@ -220,11 +214,20 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
}, nil
}
// newGRPCClient creates a new Storage client that initializes a gRPC-based
// client. Calls that have not been implemented in gRPC will panic.
// NewGRPCClient creates a new Storage client using the gRPC transport and API.
// Client methods which have not been implemented in gRPC will return an error.
// In particular, methods for Cloud Pub/Sub notifications are not supported.
//
// This is an experimental API and not intended for public use.
func newGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
// The storage gRPC API is still in preview and not yet publicly available.
// If you would like to use the API, please first contact your GCP account rep to
// request access. The API may be subject to breaking changes.
//
// Clients should be reused instead of created as needed. The methods of Client
// are safe for concurrent use by multiple goroutines.
//
// You may configure the client by passing in options from the [google.golang.org/api/option]
// package.
func NewGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
opts = append(defaultGRPCOptions(), opts...)
tc, err := newGRPCStorageClient(ctx, withClientOptions(opts...))
if err != nil {
@ -2187,8 +2190,6 @@ func toProjectResource(project string) string {
// setConditionProtoField uses protobuf reflection to set named condition field
// to the given condition value if supported on the protobuf message.
//
// This is an experimental API and not intended for public use.
func setConditionProtoField(m protoreflect.Message, f string, v int64) bool {
fields := m.Descriptor().Fields()
if rf := fields.ByName(protoreflect.Name(f)); rf != nil {
@ -2201,8 +2202,6 @@ func setConditionProtoField(m protoreflect.Message, f string, v int64) bool {
// applyCondsProto validates and attempts to set the conditions on a protobuf
// message using protobuf reflection.
//
// This is an experimental API and not intended for public use.
func applyCondsProto(method string, gen int64, conds *Conditions, msg proto.Message) error {
rmsg := msg.ProtoReflect()

View file

@ -1,3 +1,21 @@
# v1.18.43 (2023-10-02)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.42 (2023-09-22)
* **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0.
* **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats.
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.41 (2023-09-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.40 (2023-09-18)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.39 (2023-09-05)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package config
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.18.39"
const goModuleVersion = "1.18.43"

View file

@ -740,6 +740,8 @@ func mergeSections(dst *ini.Sections, src ini.Sections) error {
defaultsModeKey,
retryModeKey,
caBundleKey,
roleDurationSecondsKey,
retryMaxAttemptsKey,
ssoSessionNameKey,
ssoAccountIDKey,
@ -753,16 +755,6 @@ func mergeSections(dst *ini.Sections, src ini.Sections) error {
}
}
intKeys := []string{
roleDurationSecondsKey,
retryMaxAttemptsKey,
}
for i := range intKeys {
if err := mergeIntKey(&srcSection, &dstSection, sectionName, intKeys[i]); err != nil {
return err
}
}
// set srcSection on dst srcSection
*dst = dst.SetSection(sectionName, dstSection)
}
@ -789,26 +781,6 @@ func mergeStringKey(srcSection *ini.Section, dstSection *ini.Section, sectionNam
return nil
}
func mergeIntKey(srcSection *ini.Section, dstSection *ini.Section, sectionName, key string) error {
if srcSection.Has(key) {
srcValue := srcSection.Int(key)
v, err := ini.NewIntValue(srcValue)
if err != nil {
return fmt.Errorf("error merging %s, %w", key, err)
}
if dstSection.Has(key) {
dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, key,
dstSection.SourceFile[key], srcSection.SourceFile[key]))
}
dstSection.UpdateValue(key, v)
dstSection.UpdateSourceFile(key, srcSection.SourceFile[key])
}
return nil
}
func newMergeKeyLogMessage(sectionName, key, dstSourceFile, srcSourceFile string) string {
return fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
"with a %v value found in a duplicate profile defined at file %v. \n",
@ -962,9 +934,16 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er
updateString(&c.SSOAccountID, section, ssoAccountIDKey)
updateString(&c.SSORoleName, section, ssoRoleNameKey)
// we're retaining a behavioral quirk with this field that existed before
// the removal of literal parsing for #2276:
// - if the key is missing, the config field will not be set
// - if the key is set to a non-numeric, the config field will be set to 0
if section.Has(roleDurationSecondsKey) {
d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second
c.RoleDurationSeconds = &d
if v, ok := section.Int(roleDurationSecondsKey); ok {
c.RoleDurationSeconds = aws.Duration(time.Duration(v) * time.Second)
} else {
c.RoleDurationSeconds = aws.Duration(time.Duration(0))
}
}
updateString(&c.CredentialProcess, section, credentialProcessKey)
@ -1314,12 +1293,13 @@ func updateInt(dst *int, section ini.Section, key string) error {
if !section.Has(key) {
return nil
}
if vt, _ := section.ValueType(key); vt != ini.IntegerType {
return fmt.Errorf("invalid value %s=%s, expect integer",
key, section.String(key))
v, ok := section.Int(key)
if !ok {
return fmt.Errorf("invalid value %s=%s, expect integer", key, section.String(key))
}
*dst = int(section.Int(key))
*dst = int(v)
return nil
}
@ -1329,7 +1309,10 @@ func updateBool(dst *bool, section ini.Section, key string) {
if !section.Has(key) {
return
}
*dst = section.Bool(key)
// retains pre-#2276 behavior where non-bool value would resolve to false
v, _ := section.Bool(key)
*dst = v
}
// updateBoolPtr will only update the dst with the value in the section key,
@ -1338,8 +1321,11 @@ func updateBoolPtr(dst **bool, section ini.Section, key string) {
if !section.Has(key) {
return
}
// retains pre-#2276 behavior where non-bool value would resolve to false
v, _ := section.Bool(key)
*dst = new(bool)
**dst = section.Bool(key)
**dst = v
}
// updateEndpointDiscoveryType will only update the dst with the value in the section, if
@ -1371,7 +1357,8 @@ func updateUseDualStackEndpoint(dst *aws.DualStackEndpointState, section ini.Sec
return
}
if section.Bool(key) {
// retains pre-#2276 behavior where non-bool value would resolve to false
if v, _ := section.Bool(key); v {
*dst = aws.DualStackEndpointStateEnabled
} else {
*dst = aws.DualStackEndpointStateDisabled
@ -1387,7 +1374,8 @@ func updateUseFIPSEndpoint(dst *aws.FIPSEndpointState, section ini.Section, key
return
}
if section.Bool(key) {
// retains pre-#2276 behavior where non-bool value would resolve to false
if v, _ := section.Bool(key); v {
*dst = aws.FIPSEndpointStateEnabled
} else {
*dst = aws.FIPSEndpointStateDisabled

View file

@ -1,3 +1,19 @@
# v1.13.41 (2023-10-02)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.40 (2023-09-22)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.39 (2023-09-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.38 (2023-09-18)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.37 (2023-09-05)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package credentials
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.13.37"
const goModuleVersion = "1.13.41"

View file

@ -1,3 +1,23 @@
# v1.11.88 (2023-10-02)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.87 (2023-09-26)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.86 (2023-09-22)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.85 (2023-09-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.84 (2023-09-18)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.83 (2023-09-05)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package manager
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.11.83"
const goModuleVersion = "1.11.88"

View file

@ -1,3 +1,8 @@
# v1.3.43 (2023-09-22)
* **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0.
* **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats.
# v1.3.42 (2023-08-21)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package ini
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.3.42"
const goModuleVersion = "1.3.43"

View file

@ -12,34 +12,6 @@ var (
runesFalse = []rune("false")
)
var literalValues = [][]rune{
runesTrue,
runesFalse,
}
func isBoolValue(b []rune) bool {
for _, lv := range literalValues {
if isCaselessLitValue(lv, b) {
return true
}
}
return false
}
func isLitValue(want, have []rune) bool {
if len(have) < len(want) {
return false
}
for i := 0; i < len(want); i++ {
if want[i] != have[i] {
return false
}
}
return true
}
// isCaselessLitValue is a caseless value comparison, assumes want is already lower-cased for efficiency.
func isCaselessLitValue(want, have []rune) bool {
if len(have) < len(want) {
@ -55,68 +27,6 @@ func isCaselessLitValue(want, have []rune) bool {
return true
}
// isNumberValue will return whether not the leading characters in
// a byte slice is a number. A number is delimited by whitespace or
// the newline token.
//
// A number is defined to be in a binary, octal, decimal (int | float), hex format,
// or in scientific notation.
func isNumberValue(b []rune) bool {
negativeIndex := 0
helper := numberHelper{}
needDigit := false
for i := 0; i < len(b); i++ {
negativeIndex++
switch b[i] {
case '-':
if helper.IsNegative() || negativeIndex != 1 {
return false
}
helper.Determine(b[i])
needDigit = true
continue
case 'e', 'E':
if err := helper.Determine(b[i]); err != nil {
return false
}
negativeIndex = 0
needDigit = true
continue
case 'b':
if helper.numberFormat == hex {
break
}
fallthrough
case 'o', 'x':
needDigit = true
if i == 0 {
return false
}
fallthrough
case '.':
if err := helper.Determine(b[i]); err != nil {
return false
}
needDigit = true
continue
}
if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) {
return !needDigit
}
if !helper.CorrectByte(b[i]) {
return false
}
needDigit = false
}
return !needDigit
}
func isValid(b []rune) (bool, int, error) {
if len(b) == 0 {
// TODO: should probably return an error
@ -138,14 +48,8 @@ func (v ValueType) String() string {
switch v {
case NoneType:
return "NONE"
case DecimalType:
return "FLOAT"
case IntegerType:
return "INT"
case StringType:
return "STRING"
case BoolType:
return "BOOL"
}
return ""
@ -154,11 +58,9 @@ func (v ValueType) String() string {
// ValueType enums
const (
NoneType = ValueType(iota)
DecimalType
IntegerType
StringType
QuotedStringType
BoolType
// FUTURE(2226) MapType
)
// Value is a union container
@ -166,10 +68,8 @@ type Value struct {
Type ValueType
raw []rune
integer int64
decimal float64
boolean bool
str string
// FUTURE(2226) mp map[string]string
}
func newValue(t ValueType, base int, raw []rune) (Value, error) {
@ -177,36 +77,15 @@ func newValue(t ValueType, base int, raw []rune) (Value, error) {
Type: t,
raw: raw,
}
var err error
switch t {
case DecimalType:
v.decimal, err = strconv.ParseFloat(string(raw), 64)
case IntegerType:
if base != 10 {
raw = raw[2:]
}
v.integer, err = strconv.ParseInt(string(raw), base, 64)
case StringType:
v.str = string(raw)
case QuotedStringType:
v.str = string(raw[1 : len(raw)-1])
case BoolType:
v.boolean = isCaselessLitValue(runesTrue, v.raw)
}
// issue 2253
//
// if the value trying to be parsed is too large, then we will use
// the 'StringType' and raw value instead.
if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange {
v.Type = StringType
v.str = string(raw)
err = nil
}
return v, err
return v, nil
}
// NewStringValue returns a Value type generated using a string input.
@ -214,24 +93,12 @@ func NewStringValue(str string) (Value, error) {
return newValue(StringType, 10, []rune(str))
}
// NewIntValue returns a Value type generated using an int64 input.
func NewIntValue(i int64) (Value, error) {
v := strconv.FormatInt(i, 10)
return newValue(IntegerType, 10, []rune(v))
}
func (v Value) String() string {
switch v.Type {
case DecimalType:
return fmt.Sprintf("decimal: %f", v.decimal)
case IntegerType:
return fmt.Sprintf("integer: %d", v.integer)
case StringType:
return fmt.Sprintf("string: %s", string(v.raw))
case QuotedStringType:
return fmt.Sprintf("quoted string: %s", string(v.raw))
case BoolType:
return fmt.Sprintf("bool: %t", v.boolean)
default:
return "union not set"
}
@ -249,24 +116,6 @@ func newLitToken(b []rune) (Token, int, error) {
}
token = newToken(TokenLit, b[:n], QuotedStringType)
} else if isNumberValue(b) {
var base int
base, n, err = getNumericalValue(b)
if err != nil {
return token, 0, err
}
value := b[:n]
vType := IntegerType
if contains(value, '.') || hasExponent(value) {
vType = DecimalType
}
token = newToken(TokenLit, value, vType)
token.base = base
} else if isBoolValue(b) {
n, err = getBoolValue(b)
token = newToken(TokenLit, b[:n], BoolType)
} else {
n, err = getValue(b)
token = newToken(TokenLit, b[:n], StringType)
@ -276,18 +125,33 @@ func newLitToken(b []rune) (Token, int, error) {
}
// IntValue returns an integer value
func (v Value) IntValue() int64 {
return v.integer
func (v Value) IntValue() (int64, bool) {
i, err := strconv.ParseInt(string(v.raw), 0, 64)
if err != nil {
return 0, false
}
return i, true
}
// FloatValue returns a float value
func (v Value) FloatValue() float64 {
return v.decimal
func (v Value) FloatValue() (float64, bool) {
f, err := strconv.ParseFloat(string(v.raw), 64)
if err != nil {
return 0, false
}
return f, true
}
// BoolValue returns a bool value
func (v Value) BoolValue() bool {
return v.boolean
func (v Value) BoolValue() (bool, bool) {
// we don't use ParseBool as it recognizes more than what we've
// historically supported
if isCaselessLitValue(runesTrue, v.raw) {
return true, true
} else if isCaselessLitValue(runesFalse, v.raw) {
return false, true
}
return false, false
}
func isTrimmable(r rune) bool {

View file

@ -1,152 +0,0 @@
package ini
import (
"bytes"
"fmt"
"strconv"
)
const (
none = numberFormat(iota)
binary
octal
decimal
hex
exponent
)
type numberFormat int
// numberHelper is used to dictate what format a number is in
// and what to do for negative values. Since -1e-4 is a valid
// number, we cannot just simply check for duplicate negatives.
type numberHelper struct {
numberFormat numberFormat
negative bool
negativeExponent bool
}
func (b numberHelper) Exists() bool {
return b.numberFormat != none
}
func (b numberHelper) IsNegative() bool {
return b.negative || b.negativeExponent
}
func (b *numberHelper) Determine(c rune) error {
if b.Exists() {
return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c)))
}
switch c {
case 'b':
b.numberFormat = binary
case 'o':
b.numberFormat = octal
case 'x':
b.numberFormat = hex
case 'e', 'E':
b.numberFormat = exponent
case '-':
if b.numberFormat != exponent {
b.negative = true
} else {
b.negativeExponent = true
}
case '.':
b.numberFormat = decimal
default:
return NewParseError(fmt.Sprintf("invalid number character: %v", string(c)))
}
return nil
}
func (b numberHelper) CorrectByte(c rune) bool {
switch {
case b.numberFormat == binary:
if !isBinaryByte(c) {
return false
}
case b.numberFormat == octal:
if !isOctalByte(c) {
return false
}
case b.numberFormat == hex:
if !isHexByte(c) {
return false
}
case b.numberFormat == decimal:
if !isDigit(c) {
return false
}
case b.numberFormat == exponent:
if !isDigit(c) {
return false
}
case b.negativeExponent:
if !isDigit(c) {
return false
}
case b.negative:
if !isDigit(c) {
return false
}
default:
if !isDigit(c) {
return false
}
}
return true
}
func (b numberHelper) Base() int {
switch b.numberFormat {
case binary:
return 2
case octal:
return 8
case hex:
return 16
default:
return 10
}
}
func (b numberHelper) String() string {
buf := bytes.Buffer{}
i := 0
switch b.numberFormat {
case binary:
i++
buf.WriteString(strconv.Itoa(i) + ": binary format\n")
case octal:
i++
buf.WriteString(strconv.Itoa(i) + ": octal format\n")
case hex:
i++
buf.WriteString(strconv.Itoa(i) + ": hex format\n")
case exponent:
i++
buf.WriteString(strconv.Itoa(i) + ": exponent format\n")
default:
i++
buf.WriteString(strconv.Itoa(i) + ": integer format\n")
}
if b.negative {
i++
buf.WriteString(strconv.Itoa(i) + ": negative format\n")
}
if b.negativeExponent {
i++
buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n")
}
return buf.String()
}

View file

@ -41,149 +41,6 @@ func getStringValue(b []rune) (int, error) {
return i + 1, nil
}
// getBoolValue will return a boolean and the amount
// of bytes read
//
// an error will be returned if the boolean is not of a correct
// value
func getBoolValue(b []rune) (int, error) {
if len(b) < 4 {
return 0, NewParseError("invalid boolean value")
}
n := 0
for _, lv := range literalValues {
if len(lv) > len(b) {
continue
}
if isCaselessLitValue(lv, b) {
n = len(lv)
}
}
if n == 0 {
return 0, NewParseError("invalid boolean value")
}
return n, nil
}
// getNumericalValue will return a numerical string, the amount
// of bytes read, and the base of the number
//
// an error will be returned if the number is not of a correct
// value
func getNumericalValue(b []rune) (int, int, error) {
if !isDigit(b[0]) {
return 0, 0, NewParseError("invalid digit value")
}
i := 0
helper := numberHelper{}
loop:
for negativeIndex := 0; i < len(b); i++ {
negativeIndex++
if !isDigit(b[i]) {
switch b[i] {
case '-':
if helper.IsNegative() || negativeIndex != 1 {
return 0, 0, NewParseError("parse error '-'")
}
n := getNegativeNumber(b[i:])
i += (n - 1)
helper.Determine(b[i])
continue
case '.':
if err := helper.Determine(b[i]); err != nil {
return 0, 0, err
}
case 'e', 'E':
if err := helper.Determine(b[i]); err != nil {
return 0, 0, err
}
negativeIndex = 0
case 'b':
if helper.numberFormat == hex {
break
}
fallthrough
case 'o', 'x':
if i == 0 && b[i] != '0' {
return 0, 0, NewParseError("incorrect base format, expected leading '0'")
}
if i != 1 {
return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i))
}
if err := helper.Determine(b[i]); err != nil {
return 0, 0, err
}
default:
if isWhitespace(b[i]) {
break loop
}
if isNewline(b[i:]) {
break loop
}
if !(helper.numberFormat == hex && isHexByte(b[i])) {
if i+2 < len(b) && !isNewline(b[i:i+2]) {
return 0, 0, NewParseError("invalid numerical character")
} else if !isNewline([]rune{b[i]}) {
return 0, 0, NewParseError("invalid numerical character")
}
break loop
}
}
}
}
return helper.Base(), i, nil
}
// isDigit will return whether or not something is an integer
func isDigit(b rune) bool {
return b >= '0' && b <= '9'
}
func hasExponent(v []rune) bool {
return contains(v, 'e') || contains(v, 'E')
}
func isBinaryByte(b rune) bool {
switch b {
case '0', '1':
return true
default:
return false
}
}
func isOctalByte(b rune) bool {
switch b {
case '0', '1', '2', '3', '4', '5', '6', '7':
return true
default:
return false
}
}
func isHexByte(b rune) bool {
if isDigit(b) {
return true
}
return (b >= 'A' && b <= 'F') ||
(b >= 'a' && b <= 'f')
}
func getValue(b []rune) (int, error) {
i := 0
@ -211,24 +68,6 @@ func getValue(b []rune) (int, error) {
return i, nil
}
// getNegativeNumber will return a negative number from a
// byte slice. This will iterate through all characters until
// a non-digit has been found.
func getNegativeNumber(b []rune) int {
if b[0] != '-' {
return 0
}
i := 1
for ; i < len(b); i++ {
if !isDigit(b[i]) {
return i
}
}
return i
}
// isEscaped will return whether or not the character is an escaped
// character.
func isEscaped(value []rune, b rune) bool {

View file

@ -245,17 +245,17 @@ func (t Section) ValueType(k string) (ValueType, bool) {
}
// Bool returns a bool value at k
func (t Section) Bool(k string) bool {
func (t Section) Bool(k string) (bool, bool) {
return t.values[k].BoolValue()
}
// Int returns an integer value at k
func (t Section) Int(k string) int64 {
func (t Section) Int(k string) (int64, bool) {
return t.values[k].IntValue()
}
// Float64 returns a float value at k
func (t Section) Float64(k string) float64 {
func (t Section) Float64(k string) (float64, bool) {
return t.values[k].FloatValue()
}

View file

@ -1,3 +1,11 @@
# v1.40.0 (2023-09-26)
* **Feature**: This release adds a new field COMPLETED to the ReplicationStatus Enum. You can now use this field to validate the replication status of S3 objects using the AWS SDK.
# v1.39.0 (2023-09-20)
* **Feature**: Fix an issue where the SDK can fail to unmarshall response due to NumberFormatException
# v1.38.5 (2023-08-21)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -87,9 +87,11 @@ type AbortMultipartUploadInput struct {
ExpectedBucketOwner *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -156,9 +156,11 @@ type CompleteMultipartUploadInput struct {
MultipartUpload *types.CompletedMultipartUpload
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -137,9 +137,12 @@ import (
// use the CopyObject action to change the storage class of an object that is
// already stored in Amazon S3 by using the StorageClass parameter. For more
// information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
// in the Amazon S3 User Guide. If the source object's storage class is GLACIER,
// you must restore a copy of this object before you can use it as a source object
// for the copy operation. For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html)
// in the Amazon S3 User Guide. If the source object's storage class is GLACIER or
// DEEP_ARCHIVE, or the object's storage class is INTELLIGENT_TIERING and it's S3
// Intelligent-Tiering access tier (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition)
// is Archive Access or Deep Archive Access, you must restore a copy of this object
// before you can use it as a source object for the copy operation. For more
// information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html)
// . For more information, see Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html)
// . Versioning By default, x-amz-copy-source header identifies the current
// version of an object to copy. If the current version is a delete marker, Amazon
@ -332,9 +335,11 @@ type CopyObjectInput struct {
ObjectLockRetainUntilDate *time.Time
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer
@ -359,11 +364,11 @@ type CopyObjectInput struct {
// JSON with the encryption context key-value pairs.
SSEKMSEncryptionContext *string
// Specifies the KMS key ID to use for object encryption. All GET and PUT requests
// for an object protected by KMS will fail if they're not made via SSL or using
// SigV4. For information about configuring any of the officially supported Amazon
// Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature
// Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version)
// Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object
// encryption. All GET and PUT requests for an object protected by KMS will fail if
// they're not made via SSL or using SigV4. For information about configuring any
// of the officially supported Amazon Web Services SDKs and Amazon Web Services
// CLI, see Specifying the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version)
// in the Amazon S3 User Guide.
SSEKMSKeyId *string
@ -371,11 +376,11 @@ type CopyObjectInput struct {
// (for example, AES256 , aws:kms , aws:kms:dsse ).
ServerSideEncryption types.ServerSideEncryption
// By default, Amazon S3 uses the STANDARD Storage Class to store newly created
// objects. The STANDARD storage class provides high durability and high
// availability. Depending on performance needs, you can specify a different
// Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For
// more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
// If the x-amz-storage-class header is not used, the copied object will be stored
// in the STANDARD Storage Class by default. The STANDARD storage class provides
// high durability and high availability. Depending on performance needs, you can
// specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS
// Storage Class. For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
// in the Amazon S3 User Guide.
StorageClass types.StorageClass

View file

@ -26,11 +26,12 @@ import (
// information about bucket naming restrictions, see Bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html)
// . If you want to create an Amazon S3 on Outposts bucket, see Create Bucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html)
// . By default, the bucket is created in the US East (N. Virginia) Region. You can
// optionally specify a Region in the request body. You might choose a Region to
// optimize latency, minimize costs, or address regulatory requirements. For
// example, if you reside in Europe, you will probably find it advantageous to
// create buckets in the Europe (Ireland) Region. For more information, see
// Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
// optionally specify a Region in the request body. To constrain the bucket
// creation to a specific Region, you can use LocationConstraint (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketConfiguration.html)
// condition key. You might choose a Region to optimize latency, minimize costs, or
// address regulatory requirements. For example, if you reside in Europe, you will
// probably find it advantageous to create buckets in the Europe (Ireland) Region.
// For more information, see Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
// . If you send your create bucket request to the s3.amazonaws.com endpoint, the
// request goes to the us-east-1 Region. Accordingly, the signature calculations
// in Signature Version 4 must use us-east-1 as the Region, even if the location

View file

@ -290,9 +290,11 @@ type CreateMultipartUploadInput struct {
ObjectLockRetainUntilDate *time.Time
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer
@ -317,11 +319,12 @@ type CreateMultipartUploadInput struct {
// JSON with the encryption context key-value pairs.
SSEKMSEncryptionContext *string
// Specifies the ID of the symmetric encryption customer managed key to use for
// object encryption. All GET and PUT requests for an object protected by KMS will
// fail if they're not made via SSL or using SigV4. For information about
// configuring any of the officially supported Amazon Web Services SDKs and Amazon
// Web Services CLI, see Specifying the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version)
// Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric encryption
// customer managed key to use for object encryption. All GET and PUT requests for
// an object protected by KMS will fail if they're not made via SSL or using SigV4.
// For information about configuring any of the officially supported Amazon Web
// Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version
// in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version)
// in the Amazon S3 User Guide.
SSEKMSKeyId *string

View file

@ -95,9 +95,11 @@ type DeleteObjectInput struct {
MFA *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer
@ -109,8 +111,10 @@ type DeleteObjectInput struct {
type DeleteObjectOutput struct {
// Specifies whether the versioned object that was permanently deleted was (true)
// or was not (false) a delete marker.
// Indicates whether the specified object version that was permanently deleted was
// (true) or was not (false) a delete marker before deletion. In a simple DELETE,
// this header indicates whether (true) or not (false) the current version of the
// object is a delete marker.
DeleteMarker bool
// If present, indicates that the requester was successfully charged for the

View file

@ -119,9 +119,11 @@ type DeleteObjectsInput struct {
MFA *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -65,9 +65,11 @@ type GetBucketAccelerateConfigurationInput struct {
ExpectedBucketOwner *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -191,9 +191,11 @@ type GetObjectInput struct {
Range *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -75,9 +75,11 @@ type GetObjectAclInput struct {
ExpectedBucketOwner *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -139,9 +139,11 @@ type GetObjectAttributesInput struct {
PartNumberMarker *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -63,9 +63,11 @@ type GetObjectLegalHoldInput struct {
ExpectedBucketOwner *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -63,9 +63,11 @@ type GetObjectRetentionInput struct {
ExpectedBucketOwner *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -78,9 +78,11 @@ type GetObjectTaggingInput struct {
ExpectedBucketOwner *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -59,9 +59,11 @@ type GetObjectTorrentInput struct {
ExpectedBucketOwner *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -155,9 +155,11 @@ type HeadObjectInput struct {
Range *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -119,9 +119,11 @@ type ListMultipartUploadsInput struct {
Prefix *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -95,9 +95,11 @@ type ListObjectVersionsInput struct {
Prefix *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -100,9 +100,11 @@ type ListPartsInput struct {
PartNumberMarker *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -24,16 +24,14 @@ import (
// default encryption configuration that uses server-side encryption with Amazon S3
// managed keys (SSE-S3). You can optionally configure default encryption for a
// bucket by using server-side encryption with Key Management Service (KMS) keys
// (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys
// (DSSE-KMS), or server-side encryption with customer-provided keys (SSE-C). If
// you specify default encryption by using SSE-KMS, you can also configure Amazon
// S3 Bucket Keys. For information about bucket default encryption, see Amazon S3
// bucket default encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html)
// in the Amazon S3 User Guide. For more information about S3 Bucket Keys, see
// Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html)
// in the Amazon S3 User Guide. This action requires Amazon Web Services Signature
// Version 4. For more information, see Authenticating Requests (Amazon Web
// Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html)
// (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys
// (DSSE-KMS). If you specify default encryption by using SSE-KMS, you can also
// configure Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html)
// . If you use PutBucketEncryption to set your default bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html)
// to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does
// not validate the KMS key ID provided in PutBucketEncryption requests. This
// action requires Amazon Web Services Signature Version 4. For more information,
// see Authenticating Requests (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html)
// . To use this operation, you must have permission to perform the
// s3:PutEncryptionConfiguration action. The bucket owner has this permission by
// default. The bucket owner can grant this permission to others. For more

View file

@ -25,17 +25,19 @@ import (
// request body. In the replication configuration, you provide the name of the
// destination bucket or buckets where you want Amazon S3 to replicate objects, the
// IAM role that Amazon S3 can assume to replicate objects on your behalf, and
// other relevant information. A replication configuration must include at least
// one rule, and can contain a maximum of 1,000. Each rule identifies a subset of
// objects to replicate by filtering the objects in the source bucket. To choose
// additional subsets of objects to replicate, add a rule for each subset. To
// specify a subset of the objects in the source bucket to apply a replication rule
// to, add the Filter element as a child of the Rule element. You can filter
// objects based on an object key prefix, one or more object tags, or both. When
// you add the Filter element in the configuration, you must also add the following
// elements: DeleteMarkerReplication , Status , and Priority . If you are using an
// earlier version of the replication configuration, Amazon S3 handles replication
// of delete markers differently. For more information, see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations)
// other relevant information. You can invoke this request for a specific Amazon
// Web Services Region by using the aws:RequestedRegion (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-requestedregion)
// condition key. A replication configuration must include at least one rule, and
// can contain a maximum of 1,000. Each rule identifies a subset of objects to
// replicate by filtering the objects in the source bucket. To choose additional
// subsets of objects to replicate, add a rule for each subset. To specify a subset
// of the objects in the source bucket to apply a replication rule to, add the
// Filter element as a child of the Rule element. You can filter objects based on
// an object key prefix, one or more object tags, or both. When you add the Filter
// element in the configuration, you must also add the following elements:
// DeleteMarkerReplication , Status , and Priority . If you are using an earlier
// version of the replication configuration, Amazon S3 handles replication of
// delete markers differently. For more information, see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations)
// . For information about enabling versioning on a bucket, see Using Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html)
// . Handling Replication of Encrypted Objects By default, Amazon S3 doesn't
// replicate objects that are stored at rest using server-side encryption with KMS

View file

@ -27,7 +27,7 @@ import (
// specific application name, and then organize your billing information to see the
// total cost of that application across several services. For more information,
// see Cost Allocation and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html)
// and Using Cost Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html)
// and Using Cost Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html)
// . When this operation sets the tags for a bucket, it will overwrite any current
// tags the bucket already has. You cannot use this operation to add tags to an
// existing list of tags. To use this operation, you must have permissions to
@ -35,20 +35,17 @@ import (
// default and can grant this permission to others. For more information about
// permissions, see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
// . PutBucketTagging has the following special errors:
// - Error code: InvalidTagError
// - Description: The tag provided was not a valid tag. This error can occur if
// the tag did not pass input validation. For information about tag restrictions,
// see User-Defined Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html)
// and Amazon Web Services-Generated Cost Allocation Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html)
// . PutBucketTagging has the following special errors. For more Amazon S3 errors
// see, Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html)
// .
// - Error code: MalformedXMLError
// - Description: The XML provided does not match the schema.
// - Error code: OperationAbortedError
// - Description: A conflicting conditional action is currently in progress
// - InvalidTag - The tag provided was not a valid tag. This error can occur if
// the tag did not pass input validation. For more information, see Using Cost
// Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html)
// .
// - MalformedXML - The XML provided does not match the schema.
// - OperationAborted - A conflicting conditional action is currently in progress
// against this resource. Please try again.
// - Error code: InternalError
// - Description: The service was unable to apply the provided tag to the
// - InternalError - The service was unable to apply the provided tag to the
// bucket.
//
// The following operations are related to PutBucketTagging :

View file

@ -61,7 +61,7 @@ import (
// Amazon S3 has a limitation of 50 routing rules per website configuration. If
// you require more than 50 routing rules, you can use object redirect. For more
// information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html)
// in the Amazon S3 User Guide.
// in the Amazon S3 User Guide. The maximum request length is limited to 128 KB.
func (c *Client) PutBucketWebsite(ctx context.Context, params *PutBucketWebsiteInput, optFns ...func(*Options)) (*PutBucketWebsiteOutput, error) {
if params == nil {
params = &PutBucketWebsiteInput{}

View file

@ -264,9 +264,11 @@ type PutObjectInput struct {
ObjectLockRetainUntilDate *time.Time
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer
@ -294,9 +296,9 @@ type PutObjectInput struct {
SSEKMSEncryptionContext *string
// If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse ,
// this header specifies the ID of the Key Management Service (KMS) symmetric
// encryption customer managed key that was used for the object. If you specify
// x-amz-server-side-encryption:aws:kms or
// this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key
// Management Service (KMS) symmetric encryption customer managed key that was used
// for the object. If you specify x-amz-server-side-encryption:aws:kms or
// x-amz-server-side-encryption:aws:kms:dsse , but do not provide
// x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web
// Services managed key ( aws/s3 ) to protect the data. If the KMS key does not

View file

@ -199,9 +199,11 @@ type PutObjectAclInput struct {
GrantWriteACP *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -81,9 +81,11 @@ type PutObjectLegalHoldInput struct {
LegalHold *types.ObjectLockLegalHold
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -74,9 +74,11 @@ type PutObjectLockConfigurationInput struct {
ObjectLockConfiguration *types.ObjectLockConfiguration
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -84,9 +84,11 @@ type PutObjectRetentionInput struct {
ExpectedBucketOwner *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -20,10 +20,10 @@ import (
)
// Sets the supplied tag-set to an object that already exists in a bucket. A tag
// is a key-value pair. You can associate tags with an object by sending a PUT
// request against the tagging subresource that is associated with the object. You
// can retrieve tags by sending a GET request. For more information, see
// GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html)
// is a key-value pair. For more information, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html)
// . You can associate tags with an object by sending a PUT request against the
// tagging subresource that is associated with the object. You can retrieve tags by
// sending a GET request. For more information, see GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html)
// . For tagging-related restrictions related to characters and encodings, see Tag
// Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html)
// . Note that Amazon S3 limits the maximum number of tags to 10 tags per object.
@ -31,20 +31,18 @@ import (
// s3:PutObjectTagging action. By default, the bucket owner has this permission and
// can grant this permission to others. To put tags of any other version, use the
// versionId query parameter. You also need permission for the
// s3:PutObjectVersionTagging action. For information about the Amazon S3 object
// tagging feature, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html)
// . PutObjectTagging has the following special errors:
// - Code: InvalidTagError
// - Cause: The tag provided was not a valid tag. This error can occur if the
// tag did not pass input validation. For more information, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html)
// s3:PutObjectVersionTagging action. PutObjectTagging has the following special
// errors. For more Amazon S3 errors see, Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html)
// .
// - Code: MalformedXMLError
// - Cause: The XML provided does not match the schema.
// - Code: OperationAbortedError
// - Cause: A conflicting conditional action is currently in progress against
// this resource. Please try again.
// - Code: InternalError
// - Cause: The service was unable to apply the provided tag to the object.
// - InvalidTag - The tag provided was not a valid tag. This error can occur if
// the tag did not pass input validation. For more information, see Object
// Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html)
// .
// - MalformedXML - The XML provided does not match the schema.
// - OperationAborted - A conflicting conditional action is currently in progress
// against this resource. Please try again.
// - InternalError - The service was unable to apply the provided tag to the
// object.
//
// The following operations are related to PutObjectTagging :
// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html)
@ -116,9 +114,11 @@ type PutObjectTaggingInput struct {
ExpectedBucketOwner *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -27,7 +27,7 @@ import (
// an object, it checks the PublicAccessBlock configuration for both the bucket
// (or the bucket that contains the object) and the bucket owner's account. If the
// PublicAccessBlock configurations are different between the bucket and the
// account, Amazon S3 uses the most restrictive combination of the bucket-level and
// account, S3 uses the most restrictive combination of the bucket-level and
// account-level settings. For more information about when Amazon S3 considers a
// bucket or an object public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status)
// . The following operations are related to PutPublicAccessBlock :

View file

@ -224,9 +224,11 @@ type RestoreObjectInput struct {
ExpectedBucketOwner *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -202,9 +202,11 @@ type UploadPartInput struct {
ExpectedBucketOwner *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -215,9 +215,11 @@ type UploadPartCopyInput struct {
ExpectedSourceBucketOwner *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -242,9 +242,9 @@ type WriteGetObjectResponseInput struct {
// .
SSECustomerKeyMD5 *string
// If present, specifies the ID of the Amazon Web Services Key Management Service
// (Amazon Web Services KMS) symmetric encryption customer managed key that was
// used for stored in Amazon S3 object.
// If present, specifies the ID (Key ID, Key ARN, or Key Alias) of the Amazon Web
// Services Key Management Service (Amazon Web Services KMS) symmetric encryption
// customer managed key that was used for stored in Amazon S3 object.
SSEKMSKeyId *string
// The server-side encryption algorithm used when storing requested object in

View file

@ -3,4 +3,4 @@
package s3
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.38.5"
const goModuleVersion = "1.40.0"

View file

@ -1010,6 +1010,7 @@ const (
ReplicationStatusPending ReplicationStatus = "PENDING"
ReplicationStatusFailed ReplicationStatus = "FAILED"
ReplicationStatusReplica ReplicationStatus = "REPLICA"
ReplicationStatusCompleted ReplicationStatus = "COMPLETED"
)
// Values returns all known values for ReplicationStatus. Note that this can be
@ -1021,6 +1022,7 @@ func (ReplicationStatus) Values() []ReplicationStatus {
"PENDING",
"FAILED",
"REPLICA",
"COMPLETED",
}
}

View file

@ -594,9 +594,10 @@ type Delete struct {
// Information about the deleted object.
type DeletedObject struct {
// Specifies whether the versioned object that was permanently deleted was (true)
// or was not (false) a delete marker. In a simple DELETE, this header indicates
// whether (true) or not (false) a delete marker was created.
// Indicates whether the specified object version that was permanently deleted was
// (true) or was not (false) a delete marker before deletion. In a simple DELETE,
// this header indicates whether (true) or not (false) the current version of the
// object is a delete marker.
DeleteMarker bool
// The version ID of the delete marker created as a result of the DELETE
@ -2997,17 +2998,17 @@ type ServerSideEncryptionByDefault struct {
// Amazon Web Services Key Management Service (KMS) customer Amazon Web Services
// KMS key ID to use for the default encryption. This parameter is allowed if and
// only if SSEAlgorithm is set to aws:kms . You can specify the key ID or the
// Amazon Resource Name (ARN) of the KMS key. If you use a key ID, you can run into
// a LogDestination undeliverable error when creating a VPC flow log. If you are
// using encryption with cross-account or Amazon Web Services service operations
// you must use a fully qualified KMS key ARN. For more information, see Using
// encryption for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy)
// .
// only if SSEAlgorithm is set to aws:kms . You can specify the key ID, key alias,
// or the Amazon Resource Name (ARN) of the KMS key.
// - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab
// - Key ARN:
// arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
// Amazon S3 only supports symmetric encryption KMS keys. For more information,
// - Key Alias: alias/alias-name
// If you use a key ID, you can run into a LogDestination undeliverable error when
// creating a VPC flow log. If you are using encryption with cross-account or
// Amazon Web Services service operations you must use a fully qualified KMS key
// ARN. For more information, see Using encryption for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy)
// . Amazon S3 only supports symmetric encryption KMS keys. For more information,
// see Asymmetric keys in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html)
// in the Amazon Web Services Key Management Service Developer Guide.
KMSMasterKeyID *string

View file

@ -1,3 +1,16 @@
# v1.15.0 (2023-10-02)
* **Feature**: Fix FIPS Endpoints in aws-us-gov.
# v1.14.1 (2023-09-22)
* No change notes available for this release.
# v1.14.0 (2023-09-18)
* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service.
* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field.
# v1.13.6 (2023-08-31)
* No change notes available for this release.

View file

@ -411,6 +411,25 @@ func (r *resolver) ResolveEndpoint(
}
if _UseFIPS == true {
if true == _PartitionResult.SupportsFIPS {
if "aws-us-gov" == _PartitionResult.Name {
uriString := func() string {
var out strings.Builder
out.WriteString("https://portal.sso.")
out.WriteString(_Region)
out.WriteString(".amazonaws.com")
return out.String()
}()
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
uriString := func() string {
var out strings.Builder
out.WriteString("https://portal.sso-fips.")
@ -455,279 +474,6 @@ func (r *resolver) ResolveEndpoint(
}
return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack")
}
if _Region == "ap-east-1" {
uriString := "https://portal.sso.ap-east-1.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "ap-northeast-1" {
uriString := "https://portal.sso.ap-northeast-1.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "ap-northeast-2" {
uriString := "https://portal.sso.ap-northeast-2.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "ap-northeast-3" {
uriString := "https://portal.sso.ap-northeast-3.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "ap-south-1" {
uriString := "https://portal.sso.ap-south-1.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "ap-southeast-1" {
uriString := "https://portal.sso.ap-southeast-1.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "ap-southeast-2" {
uriString := "https://portal.sso.ap-southeast-2.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "ca-central-1" {
uriString := "https://portal.sso.ca-central-1.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "eu-central-1" {
uriString := "https://portal.sso.eu-central-1.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "eu-north-1" {
uriString := "https://portal.sso.eu-north-1.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "eu-south-1" {
uriString := "https://portal.sso.eu-south-1.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "eu-west-1" {
uriString := "https://portal.sso.eu-west-1.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "eu-west-2" {
uriString := "https://portal.sso.eu-west-2.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "eu-west-3" {
uriString := "https://portal.sso.eu-west-3.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "me-south-1" {
uriString := "https://portal.sso.me-south-1.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "sa-east-1" {
uriString := "https://portal.sso.sa-east-1.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "us-east-1" {
uriString := "https://portal.sso.us-east-1.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "us-east-2" {
uriString := "https://portal.sso.us-east-2.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "us-west-2" {
uriString := "https://portal.sso.us-west-2.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "us-gov-east-1" {
uriString := "https://portal.sso.us-gov-east-1.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "us-gov-west-1" {
uriString := "https://portal.sso.us-gov-west-1.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
uriString := func() string {
var out strings.Builder
out.WriteString("https://portal.sso.")

View file

@ -3,4 +3,4 @@
package sso
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.13.6"
const goModuleVersion = "1.15.0"

View file

@ -227,6 +227,14 @@ var defaultPartitions = endpoints.Partitions{
Region: "eu-central-1",
},
},
endpoints.EndpointKey{
Region: "eu-central-2",
}: endpoints.Endpoint{
Hostname: "portal.sso.eu-central-2.amazonaws.com",
CredentialScope: endpoints.CredentialScope{
Region: "eu-central-2",
},
},
endpoints.EndpointKey{
Region: "eu-north-1",
}: endpoints.Endpoint{
@ -359,6 +367,24 @@ var defaultPartitions = endpoints.Partitions{
},
RegionRegex: partitionRegexp.AwsCn,
IsRegionalized: true,
Endpoints: endpoints.Endpoints{
endpoints.EndpointKey{
Region: "cn-north-1",
}: endpoints.Endpoint{
Hostname: "portal.sso.cn-north-1.amazonaws.com.cn",
CredentialScope: endpoints.CredentialScope{
Region: "cn-north-1",
},
},
endpoints.EndpointKey{
Region: "cn-northwest-1",
}: endpoints.Endpoint{
Hostname: "portal.sso.cn-northwest-1.amazonaws.com.cn",
CredentialScope: endpoints.CredentialScope{
Region: "cn-northwest-1",
},
},
},
},
{
ID: "aws-iso",

View file

@ -1,3 +1,16 @@
# v1.17.1 (2023-09-22)
* No change notes available for this release.
# v1.17.0 (2023-09-20)
* **Feature**: Update FIPS endpoints in aws-us-gov.
# v1.16.0 (2023-09-18)
* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service.
* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field.
# v1.15.6 (2023-09-05)
* No change notes available for this release.

View file

@ -411,6 +411,25 @@ func (r *resolver) ResolveEndpoint(
}
if _UseFIPS == true {
if true == _PartitionResult.SupportsFIPS {
if "aws-us-gov" == _PartitionResult.Name {
uriString := func() string {
var out strings.Builder
out.WriteString("https://oidc.")
out.WriteString(_Region)
out.WriteString(".amazonaws.com")
return out.String()
}()
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
uriString := func() string {
var out strings.Builder
out.WriteString("https://oidc-fips.")
@ -455,279 +474,6 @@ func (r *resolver) ResolveEndpoint(
}
return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack")
}
if _Region == "ap-east-1" {
uriString := "https://oidc.ap-east-1.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "ap-northeast-1" {
uriString := "https://oidc.ap-northeast-1.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "ap-northeast-2" {
uriString := "https://oidc.ap-northeast-2.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "ap-northeast-3" {
uriString := "https://oidc.ap-northeast-3.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "ap-south-1" {
uriString := "https://oidc.ap-south-1.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "ap-southeast-1" {
uriString := "https://oidc.ap-southeast-1.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "ap-southeast-2" {
uriString := "https://oidc.ap-southeast-2.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "ca-central-1" {
uriString := "https://oidc.ca-central-1.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "eu-central-1" {
uriString := "https://oidc.eu-central-1.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "eu-north-1" {
uriString := "https://oidc.eu-north-1.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "eu-south-1" {
uriString := "https://oidc.eu-south-1.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "eu-west-1" {
uriString := "https://oidc.eu-west-1.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "eu-west-2" {
uriString := "https://oidc.eu-west-2.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "eu-west-3" {
uriString := "https://oidc.eu-west-3.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "me-south-1" {
uriString := "https://oidc.me-south-1.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "sa-east-1" {
uriString := "https://oidc.sa-east-1.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "us-east-1" {
uriString := "https://oidc.us-east-1.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "us-east-2" {
uriString := "https://oidc.us-east-2.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "us-west-2" {
uriString := "https://oidc.us-west-2.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "us-gov-east-1" {
uriString := "https://oidc.us-gov-east-1.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
if _Region == "us-gov-west-1" {
uriString := "https://oidc.us-gov-west-1.amazonaws.com"
uri, err := url.Parse(uriString)
if err != nil {
return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
}
return smithyendpoints.Endpoint{
URI: *uri,
Headers: http.Header{},
}, nil
}
uriString := func() string {
var out strings.Builder
out.WriteString("https://oidc.")

View file

@ -3,4 +3,4 @@
package ssooidc
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.15.6"
const goModuleVersion = "1.17.1"

View file

@ -227,6 +227,14 @@ var defaultPartitions = endpoints.Partitions{
Region: "eu-central-1",
},
},
endpoints.EndpointKey{
Region: "eu-central-2",
}: endpoints.Endpoint{
Hostname: "oidc.eu-central-2.amazonaws.com",
CredentialScope: endpoints.CredentialScope{
Region: "eu-central-2",
},
},
endpoints.EndpointKey{
Region: "eu-north-1",
}: endpoints.Endpoint{
@ -359,6 +367,24 @@ var defaultPartitions = endpoints.Partitions{
},
RegionRegex: partitionRegexp.AwsCn,
IsRegionalized: true,
Endpoints: endpoints.Endpoints{
endpoints.EndpointKey{
Region: "cn-north-1",
}: endpoints.Endpoint{
Hostname: "oidc.cn-north-1.amazonaws.com.cn",
CredentialScope: endpoints.CredentialScope{
Region: "cn-north-1",
},
},
endpoints.EndpointKey{
Region: "cn-northwest-1",
}: endpoints.Endpoint{
Hostname: "oidc.cn-northwest-1.amazonaws.com.cn",
CredentialScope: endpoints.CredentialScope{
Region: "cn-northwest-1",
},
},
},
},
{
ID: "aws-iso",

View file

@ -1,3 +1,12 @@
# v1.23.0 (2023-10-02)
* **Feature**: STS API updates for assumeRole
# v1.22.0 (2023-09-18)
* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service.
* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field.
# v1.21.5 (2023-08-21)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package sts
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.21.5"
const goModuleVersion = "1.23.0"

View file

@ -2572,21 +2572,81 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
Hostname: "appflow-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
Hostname: "appflow-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
Hostname: "appflow-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
Hostname: "appflow-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "appflow-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
}: endpoint{},
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "appflow-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
}: endpoint{},
endpointKey{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "appflow-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
}: endpoint{},
endpointKey{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "appflow-fips.us-west-2.amazonaws.com",
},
},
},
"application-autoscaling": service{
@ -4408,6 +4468,99 @@ var awsPartition = partition{
},
},
},
"bedrock": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "bedrock-ap-southeast-1",
}: endpoint{
Hostname: "bedrock.ap-southeast-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ap-southeast-1",
},
},
endpointKey{
Region: "bedrock-fips-us-east-1",
}: endpoint{
Hostname: "bedrock-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
},
endpointKey{
Region: "bedrock-fips-us-west-2",
}: endpoint{
Hostname: "bedrock-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
},
endpointKey{
Region: "bedrock-runtime-ap-southeast-1",
}: endpoint{
Hostname: "bedrock-runtime.ap-southeast-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ap-southeast-1",
},
},
endpointKey{
Region: "bedrock-runtime-fips-us-east-1",
}: endpoint{
Hostname: "bedrock-runtime-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
},
endpointKey{
Region: "bedrock-runtime-fips-us-west-2",
}: endpoint{
Hostname: "bedrock-runtime-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
},
endpointKey{
Region: "bedrock-runtime-us-east-1",
}: endpoint{
Hostname: "bedrock-runtime.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
},
endpointKey{
Region: "bedrock-runtime-us-west-2",
}: endpoint{
Hostname: "bedrock-runtime.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
},
endpointKey{
Region: "bedrock-us-east-1",
}: endpoint{
Hostname: "bedrock.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
},
endpointKey{
Region: "bedrock-us-west-2",
}: endpoint{
Hostname: "bedrock.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
},
endpointKey{
Region: "us-east-1",
}: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
},
},
"billingconductor": service{
PartitionEndpoint: "aws-global",
IsRegionalized: boxedFalse,
@ -7114,6 +7267,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
@ -7209,6 +7365,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-south-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@ -7239,12 +7398,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-2",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
endpointKey{
Region: "eu-south-2",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@ -7257,6 +7422,9 @@ var awsPartition = partition{
endpointKey{
Region: "il-central-1",
}: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@ -8785,6 +8953,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "il-central-1",
}: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@ -11026,6 +11197,12 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "email-fips.ca-central-1.amazonaws.com",
},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@ -11044,6 +11221,15 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "fips-ca-central-1",
}: endpoint{
Hostname: "email-fips.ca-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ca-central-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@ -11053,6 +11239,24 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
Hostname: "email-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
Hostname: "email-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
@ -11083,9 +11287,21 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-2",
}: endpoint{},
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "email-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
}: endpoint{},
endpointKey{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "email-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
}: endpoint{},
@ -14253,6 +14469,9 @@ var awsPartition = partition{
},
"inspector2": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
}: endpoint{},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
@ -14262,6 +14481,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
Region: "ap-northeast-3",
}: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@ -14271,12 +14493,18 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-2",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
@ -17602,6 +17830,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-south-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@ -17611,12 +17842,18 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
endpointKey{
Region: "ap-southeast-4",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-2",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
@ -18279,6 +18516,13 @@ var awsPartition = partition{
}: endpoint{},
},
},
"managedblockchain-query": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-east-1",
}: endpoint{},
},
},
"marketplacecommerceanalytics": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -18809,12 +19053,30 @@ var awsPartition = partition{
},
"meetings-chime": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
endpointKey{
Region: "il-central-1",
}: endpoint{},
@ -20042,6 +20304,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "il-central-1",
}: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@ -20336,6 +20601,14 @@ var awsPartition = partition{
Region: "eu-central-1",
},
},
endpointKey{
Region: "eu-central-2",
}: endpoint{
Hostname: "oidc.eu-central-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "eu-central-2",
},
},
endpointKey{
Region: "eu-north-1",
}: endpoint{
@ -20486,6 +20759,14 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "il-central-1",
}: endpoint{
Hostname: "omics.il-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "il-central-1",
},
},
endpointKey{
Region: "us-east-1",
}: endpoint{
@ -21167,6 +21448,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-south-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@ -21182,12 +21466,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-2",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
endpointKey{
Region: "eu-south-2",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@ -21434,6 +21724,14 @@ var awsPartition = partition{
Region: "eu-central-1",
},
},
endpointKey{
Region: "eu-central-2",
}: endpoint{
Hostname: "portal.sso.eu-central-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "eu-central-2",
},
},
endpointKey{
Region: "eu-north-1",
}: endpoint{
@ -23132,6 +23430,11 @@ var awsPartition = partition{
}: endpoint{
Hostname: "resource-explorer-2.ap-southeast-2.api.aws",
},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{
Hostname: "resource-explorer-2.ap-southeast-3.api.aws",
},
endpointKey{
Region: "ap-southeast-4",
}: endpoint{
@ -23177,6 +23480,11 @@ var awsPartition = partition{
}: endpoint{
Hostname: "resource-explorer-2.il-central-1.api.aws",
},
endpointKey{
Region: "me-south-1",
}: endpoint{
Hostname: "resource-explorer-2.me-south-1.api.aws",
},
endpointKey{
Region: "sa-east-1",
}: endpoint{
@ -27965,6 +28273,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-2",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
@ -29106,15 +29417,30 @@ var awsPartition = partition{
},
"tnb": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-2",
}: endpoint{},
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@ -32048,6 +32374,20 @@ var awscnPartition = partition{
},
},
},
"api.pricing": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
CredentialScope: credentialScope{
Service: "pricing",
},
},
},
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
},
},
"api.sagemaker": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -32879,6 +33219,16 @@ var awscnPartition = partition{
},
},
},
"identitystore": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
},
},
"internetmonitor": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@ -33178,6 +33528,26 @@ var awscnPartition = partition{
}: endpoint{},
},
},
"oidc": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-north-1",
}: endpoint{
Hostname: "oidc.cn-north-1.amazonaws.com.cn",
CredentialScope: credentialScope{
Region: "cn-north-1",
},
},
endpointKey{
Region: "cn-northwest-1",
}: endpoint{
Hostname: "oidc.cn-northwest-1.amazonaws.com.cn",
CredentialScope: credentialScope{
Region: "cn-northwest-1",
},
},
},
},
"organizations": service{
PartitionEndpoint: "aws-cn-global",
IsRegionalized: boxedFalse,
@ -33216,6 +33586,26 @@ var awscnPartition = partition{
}: endpoint{},
},
},
"portal.sso": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-north-1",
}: endpoint{
Hostname: "portal.sso.cn-north-1.amazonaws.com.cn",
CredentialScope: credentialScope{
Region: "cn-north-1",
},
},
endpointKey{
Region: "cn-northwest-1",
}: endpoint{
Hostname: "portal.sso.cn-northwest-1.amazonaws.com.cn",
CredentialScope: credentialScope{
Region: "cn-northwest-1",
},
},
},
},
"ram": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -33647,6 +34037,16 @@ var awscnPartition = partition{
}: endpoint{},
},
},
"sso": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
},
},
"states": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -38416,6 +38816,16 @@ var awsusgovPartition = partition{
}: endpoint{},
},
},
"rolesanywhere": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
},
},
"route53": service{
PartitionEndpoint: "aws-us-gov-global",
IsRegionalized: boxedFalse,
@ -40485,14 +40895,45 @@ var awsisoPartition = partition{
},
"elasticmapreduce": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "fips-us-iso-east-1",
}: endpoint{
Hostname: "elasticmapreduce.us-iso-east-1.c2s.ic.gov",
CredentialScope: credentialScope{
Region: "us-iso-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-iso-west-1",
}: endpoint{
Hostname: "elasticmapreduce.us-iso-west-1.c2s.ic.gov",
CredentialScope: credentialScope{
Region: "us-iso-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-iso-east-1",
}: endpoint{
Protocols: []string{"https"},
},
endpointKey{
Region: "us-iso-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "elasticmapreduce.us-iso-east-1.c2s.ic.gov",
Protocols: []string{"https"},
},
endpointKey{
Region: "us-iso-west-1",
}: endpoint{},
endpointKey{
Region: "us-iso-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "elasticmapreduce.us-iso-west-1.c2s.ic.gov",
},
},
},
"es": service{
@ -41278,9 +41719,24 @@ var awsisobPartition = partition{
},
"elasticmapreduce": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "fips-us-isob-east-1",
}: endpoint{
Hostname: "elasticmapreduce.us-isob-east-1.sc2s.sgov.gov",
CredentialScope: credentialScope{
Region: "us-isob-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-isob-east-1",
}: endpoint{},
endpointKey{
Region: "us-isob-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "elasticmapreduce.us-isob-east-1.sc2s.sgov.gov",
},
},
},
"es": service{

View file

@ -389,8 +389,15 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e
updateString(&cfg.Region, section, regionKey)
updateString(&cfg.CustomCABundle, section, customCABundleKey)
// we're retaining a behavioral quirk with this field that existed before
// the removal of literal parsing for (aws-sdk-go-v2/#2276):
// - if the key is missing, the config field will not be set
// - if the key is set to a non-numeric, the config field will be set to 0
if section.Has(roleDurationSecondsKey) {
d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second
var d time.Duration
if v, ok := section.Int(roleDurationSecondsKey); ok {
d = time.Duration(v) * time.Second
}
cfg.AssumeRoleDuration = &d
}
@ -668,7 +675,10 @@ func updateBool(dst *bool, section ini.Section, key string) {
if !section.Has(key) {
return
}
*dst = section.Bool(key)
// retains pre-(aws-sdk-go-v2#2276) behavior where non-bool value would resolve to false
v, _ := section.Bool(key)
*dst = v
}
// updateBoolPtr will only update the dst with the value in the section key,
@ -677,8 +687,11 @@ func updateBoolPtr(dst **bool, section ini.Section, key string) {
if !section.Has(key) {
return
}
// retains pre-(aws-sdk-go-v2#2276) behavior where non-bool value would resolve to false
v, _ := section.Bool(key)
*dst = new(bool)
**dst = section.Bool(key)
**dst = v
}
// SharedConfigLoadError is an error for the shared config file failed to load.
@ -805,7 +818,8 @@ func updateUseDualStackEndpoint(dst *endpoints.DualStackEndpointState, section i
return
}
if section.Bool(key) {
// retains pre-(aws-sdk-go-v2/#2276) behavior where non-bool value would resolve to false
if v, _ := section.Bool(key); v {
*dst = endpoints.DualStackEndpointStateEnabled
} else {
*dst = endpoints.DualStackEndpointStateDisabled
@ -821,7 +835,8 @@ func updateUseFIPSEndpoint(dst *endpoints.FIPSEndpointState, section ini.Section
return
}
if section.Bool(key) {
// retains pre-(aws-sdk-go-v2/#2276) behavior where non-bool value would resolve to false
if v, _ := section.Bool(key); v {
*dst = endpoints.FIPSEndpointStateEnabled
} else {
*dst = endpoints.FIPSEndpointStateDisabled

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.45.4"
const SDKVersion = "1.45.20"

View file

@ -154,11 +154,11 @@ func (v ValueType) String() string {
// ValueType enums
const (
NoneType = ValueType(iota)
DecimalType
IntegerType
DecimalType // deprecated
IntegerType // deprecated
StringType
QuotedStringType
BoolType
BoolType // deprecated
)
// Value is a union container
@ -166,9 +166,9 @@ type Value struct {
Type ValueType
raw []rune
integer int64
decimal float64
boolean bool
integer int64 // deprecated
decimal float64 // deprecated
boolean bool // deprecated
str string
}
@ -253,24 +253,6 @@ func newLitToken(b []rune) (Token, int, error) {
}
token = newToken(TokenLit, b[:n], QuotedStringType)
} else if isNumberValue(b) {
var base int
base, n, err = getNumericalValue(b)
if err != nil {
return token, 0, err
}
value := b[:n]
vType := IntegerType
if contains(value, '.') || hasExponent(value) {
vType = DecimalType
}
token = newToken(TokenLit, value, vType)
token.base = base
} else if isBoolValue(b) {
n, err = getBoolValue(b)
token = newToken(TokenLit, b[:n], BoolType)
} else {
n, err = getValue(b)
token = newToken(TokenLit, b[:n], StringType)
@ -280,18 +262,33 @@ func newLitToken(b []rune) (Token, int, error) {
}
// IntValue returns an integer value
func (v Value) IntValue() int64 {
return v.integer
func (v Value) IntValue() (int64, bool) {
i, err := strconv.ParseInt(string(v.raw), 0, 64)
if err != nil {
return 0, false
}
return i, true
}
// FloatValue returns a float value
func (v Value) FloatValue() float64 {
return v.decimal
func (v Value) FloatValue() (float64, bool) {
f, err := strconv.ParseFloat(string(v.raw), 64)
if err != nil {
return 0, false
}
return f, true
}
// BoolValue returns a bool value
func (v Value) BoolValue() bool {
return v.boolean
func (v Value) BoolValue() (bool, bool) {
// we don't use ParseBool as it recognizes more than what we've
// historically supported
if isCaselessLitValue(runesTrue, v.raw) {
return true, true
} else if isCaselessLitValue(runesFalse, v.raw) {
return false, true
}
return false, false
}
func isTrimmable(r rune) bool {

View file

@ -145,17 +145,17 @@ func (t Section) ValueType(k string) (ValueType, bool) {
}
// Bool returns a bool value at k
func (t Section) Bool(k string) bool {
func (t Section) Bool(k string) (bool, bool) {
return t.values[k].BoolValue()
}
// Int returns an integer value at k
func (t Section) Int(k string) int64 {
func (t Section) Int(k string) (int64, bool) {
return t.values[k].IntValue()
}
// Float64 returns a float value at k
func (t Section) Float64(k string) float64 {
func (t Section) Float64(k string) (float64, bool) {
return t.values[k].FloatValue()
}

View file

@ -35,6 +35,8 @@ import (
const signAPI = "EnterpriseCertSigner.Sign"
const certificateChainAPI = "EnterpriseCertSigner.CertificateChain"
const publicKeyAPI = "EnterpriseCertSigner.Public"
const encryptAPI = "EnterpriseCertSigner.Encrypt"
const decryptAPI = "EnterpriseCertSigner.Decrypt"
// A Connection wraps a pair of unidirectional streams as an io.ReadWriteCloser.
type Connection struct {
@ -54,13 +56,28 @@ func (c *Connection) Close() error {
func init() {
gob.Register(crypto.SHA256)
gob.Register(crypto.SHA384)
gob.Register(crypto.SHA512)
gob.Register(&rsa.PSSOptions{})
gob.Register(&rsa.OAEPOptions{})
}
// SignArgs contains arguments to a crypto Signer.Sign method.
// SignArgs contains arguments for a Sign API call.
type SignArgs struct {
Digest []byte // The content to sign.
Opts crypto.SignerOpts // Options for signing, such as Hash identifier.
Opts crypto.SignerOpts // Options for signing. Must implement HashFunc().
}
// EncryptArgs contains arguments for an Encrypt API call.
type EncryptArgs struct {
Plaintext []byte // The plaintext to encrypt.
Opts any // Options for encryption. Ex: an instance of crypto.Hash.
}
// DecryptArgs contains arguments to for a Decrypt API call.
type DecryptArgs struct {
Ciphertext []byte // The ciphertext to decrypt.
Opts crypto.DecrypterOpts // Options for decryption. Ex: an instance of *rsa.OAEPOptions.
}
// Key implements credential.Credential by holding the executed signer subprocess.
@ -98,7 +115,7 @@ func (k *Key) Public() crypto.PublicKey {
return k.publicKey
}
// Sign signs a message digest, using the specified signer options.
// Sign signs a message digest, using the specified signer opts. Implements crypto.Signer interface.
func (k *Key) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signed []byte, err error) {
if opts != nil && opts.HashFunc() != 0 && len(digest) != opts.HashFunc().Size() {
return nil, fmt.Errorf("Digest length of %v bytes does not match Hash function size of %v bytes", len(digest), opts.HashFunc().Size())
@ -107,6 +124,18 @@ func (k *Key) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signed [
return
}
// Encrypt encrypts a plaintext msg into ciphertext, using the specified encrypt opts.
func (k *Key) Encrypt(_ io.Reader, msg []byte, opts any) (ciphertext []byte, err error) {
err = k.client.Call(encryptAPI, EncryptArgs{Plaintext: msg, Opts: opts}, &ciphertext)
return
}
// Decrypt decrypts a ciphertext msg into plaintext, using the specified decrypter opts. Implements crypto.Decrypter interface.
func (k *Key) Decrypt(_ io.Reader, msg []byte, opts crypto.DecrypterOpts) (plaintext []byte, err error) {
err = k.client.Call(decryptAPI, DecryptArgs{Ciphertext: msg, Opts: opts}, &plaintext)
return
}
// ErrCredUnavailable is a sentinel error that indicates ECP Cred is unavailable,
// possibly due to missing config or missing binary path.
var ErrCredUnavailable = errors.New("Cred is unavailable")
@ -120,8 +149,13 @@ var ErrCredUnavailable = errors.New("Cred is unavailable")
// The config file also specifies which certificate the signer should use.
func Cred(configFilePath string) (*Key, error) {
if configFilePath == "" {
envFilePath := util.GetConfigFilePathFromEnv()
if envFilePath != "" {
configFilePath = envFilePath
} else {
configFilePath = util.GetDefaultConfigFilePath()
}
}
enterpriseCertSignerPath, err := util.LoadSignerBinaryPath(configFilePath)
if err != nil {
if errors.Is(err, util.ErrConfigUnavailable) {

View file

@ -22,6 +22,7 @@ import (
"os/user"
"path/filepath"
"runtime"
"strings"
)
const configFileName = "certificate_config.json"
@ -63,6 +64,9 @@ func LoadSignerBinaryPath(configFilePath string) (path string, err error) {
if signerBinaryPath == "" {
return "", ErrConfigUnavailable
}
signerBinaryPath = strings.ReplaceAll(signerBinaryPath, "~", guessHomeDir())
signerBinaryPath = strings.ReplaceAll(signerBinaryPath, "$HOME", guessHomeDir())
return signerBinaryPath, nil
}
@ -89,3 +93,8 @@ func getDefaultConfigFileDirectory() (directory string) {
func GetDefaultConfigFilePath() (path string) {
return filepath.Join(getDefaultConfigFileDirectory(), configFileName)
}
// GetConfigFilePathFromEnv returns the path associated with environment variable GOOGLE_API_CERTIFICATE_CONFIG
func GetConfigFilePathFromEnv() (path string) {
return os.Getenv("GOOGLE_API_CERTIFICATE_CONFIG")
}

View file

@ -3,7 +3,7 @@
before:
hooks:
- ./gen.sh
- go install mvdan.cc/garble@v0.9.3
- go install mvdan.cc/garble@v0.10.1
builds:
-
@ -92,16 +92,7 @@ builds:
archives:
-
id: s2-binaries
name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}"
replacements:
aix: AIX
darwin: OSX
linux: Linux
windows: Windows
386: i386
amd64: x86_64
freebsd: FreeBSD
netbsd: NetBSD
name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
format_overrides:
- goos: windows
format: zip
@ -125,7 +116,7 @@ changelog:
nfpms:
-
file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
vendor: Klaus Post
homepage: https://github.com/klauspost/compress
maintainer: Klaus Post <klauspost@gmail.com>
@ -134,8 +125,3 @@ nfpms:
formats:
- deb
- rpm
replacements:
darwin: Darwin
linux: Linux
freebsd: FreeBSD
amd64: x86_64

View file

@ -16,6 +16,10 @@ This package provides various compression algorithms.
# changelog
* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7)
* zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829
* s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832
* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6)
* zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806
* zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824
@ -50,6 +54,9 @@ This package provides various compression algorithms.
* s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747
* s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746
<details>
<summary>See changes to v1.15.x</summary>
* Jan 21st, 2023 (v1.15.15)
* deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
@ -176,6 +183,8 @@ Stream decompression is now faster on asynchronous, since the goroutine allocati
While the release has been extensively tested, it is recommended to testing when upgrading.
</details>
<details>
<summary>See changes to v1.14.x</summary>
@ -636,6 +645,7 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv
* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer.
* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression.
* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression.
* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index.
# license

View file

@ -7,6 +7,7 @@ package flate
import (
"encoding/binary"
"errors"
"fmt"
"io"
"math"
@ -833,6 +834,12 @@ func (d *compressor) init(w io.Writer, level int) (err error) {
d.initDeflate()
d.fill = (*compressor).fillDeflate
d.step = (*compressor).deflateLazy
case -level >= MinCustomWindowSize && -level <= MaxCustomWindowSize:
d.w.logNewTablePenalty = 7
d.fast = &fastEncL5Window{maxOffset: int32(-level), cur: maxStoreBlockSize}
d.window = make([]byte, maxStoreBlockSize)
d.fill = (*compressor).fillBlock
d.step = (*compressor).storeFast
default:
return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level)
}
@ -929,6 +936,28 @@ func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
return zw, err
}
// MinCustomWindowSize is the minimum window size that can be sent to NewWriterWindow.
const MinCustomWindowSize = 32
// MaxCustomWindowSize is the maximum custom window that can be sent to NewWriterWindow.
const MaxCustomWindowSize = windowSize
// NewWriterWindow returns a new Writer compressing data with a custom window size.
// windowSize must be from MinCustomWindowSize to MaxCustomWindowSize.
func NewWriterWindow(w io.Writer, windowSize int) (*Writer, error) {
if windowSize < MinCustomWindowSize {
return nil, errors.New("flate: requested window size less than MinWindowSize")
}
if windowSize > MaxCustomWindowSize {
return nil, errors.New("flate: requested window size bigger than MaxCustomWindowSize")
}
var dw Writer
if err := dw.d.init(w, -windowSize); err != nil {
return nil, err
}
return &dw, nil
}
// A Writer takes data written to it and writes the compressed
// form of that data to an underlying writer (see NewWriter).
type Writer struct {

View file

@ -8,7 +8,6 @@ package flate
import (
"encoding/binary"
"fmt"
"math/bits"
)
type fastEnc interface {
@ -192,25 +191,3 @@ func (e *fastGen) Reset() {
}
e.hist = e.hist[:0]
}
// matchLen returns the maximum length.
// 'a' must be the shortest of the two.
func matchLen(a, b []byte) int {
var checked int
for len(a) >= 8 {
if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
return checked + (bits.TrailingZeros64(diff) >> 3)
}
checked += 8
a = a[8:]
b = b[8:]
}
b = b[:len(a)]
for i := range a {
if a[i] != b[i] {
return i + checked
}
}
return len(a) + checked
}

View file

@ -308,3 +308,401 @@ emitRemainder:
emitLiteral(dst, src[nextEmit:])
}
}
// fastEncL5Window is a level 5 encoder,
// but with a custom window size.
type fastEncL5Window struct {
hist []byte
cur int32
maxOffset int32
table [tableSize]tableEntry
bTable [tableSize]tableEntryPrev
}
func (e *fastEncL5Window) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
hashShortBytes = 4
)
maxMatchOffset := e.maxOffset
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
}
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
for i := range e.bTable[:] {
e.bTable[i] = tableEntryPrev{}
}
e.cur = maxMatchOffset
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
for i := range e.table[:] {
v := e.table[i].offset
if v <= minOff {
v = 0
} else {
v = v - e.cur + maxMatchOffset
}
e.table[i].offset = v
}
for i := range e.bTable[:] {
v := e.bTable[i]
if v.Cur.offset <= minOff {
v.Cur.offset = 0
v.Prev.offset = 0
} else {
v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
if v.Prev.offset <= minOff {
v.Prev.offset = 0
} else {
v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
}
}
e.bTable[i] = v
}
e.cur = maxMatchOffset
}
s := e.addBlock(src)
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = uint16(len(src))
return
}
// Override src
src = e.hist
nextEmit := s
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
cv := load6432(src, s)
for {
const skipLog = 6
const doEvery = 1
nextS := s
var l int32
var t int32
for {
nextHashS := hashLen(cv, tableBits, hashShortBytes)
nextHashL := hash7(cv, tableBits)
s = nextS
nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit {
goto emitRemainder
}
// Fetch a short+long candidate
sCandidate := e.table[nextHashS]
lCandidate := e.bTable[nextHashL]
next := load6432(src, nextS)
entry := tableEntry{offset: s + e.cur}
e.table[nextHashS] = entry
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = entry, eLong.Cur
nextHashS = hashLen(next, tableBits, hashShortBytes)
nextHashL = hash7(next, tableBits)
t = lCandidate.Cur.offset - e.cur
if s-t < maxMatchOffset {
if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
t2 := lCandidate.Prev.offset - e.cur
if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
l = e.matchlen(s+4, t+4, src) + 4
ml1 := e.matchlen(s+4, t2+4, src) + 4
if ml1 > l {
t = t2
l = ml1
break
}
}
break
}
t = lCandidate.Prev.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
break
}
}
t = sCandidate.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
// Found a 4 match...
l = e.matchlen(s+4, t+4, src) + 4
lCandidate = e.bTable[nextHashL]
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
// If the next long is a candidate, use that...
t2 := lCandidate.Cur.offset - e.cur
if nextS-t2 < maxMatchOffset {
if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
ml := e.matchlen(nextS+4, t2+4, src) + 4
if ml > l {
t = t2
s = nextS
l = ml
break
}
}
// If the previous long is a candidate, use that...
t2 = lCandidate.Prev.offset - e.cur
if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
ml := e.matchlen(nextS+4, t2+4, src) + 4
if ml > l {
t = t2
s = nextS
l = ml
break
}
}
}
break
}
cv = next
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
if l == 0 {
// Extend the 4-byte match as long as possible.
l = e.matchlenLong(s+4, t+4, src) + 4
} else if l == maxMatchLength {
l += e.matchlenLong(s+l, t+l, src)
}
// Try to locate a better match by checking the end of best match...
if sAt := s + l; l < 30 && sAt < sLimit {
// Allow some bytes at the beginning to mismatch.
// Sweet spot is 2/3 bytes depending on input.
// 3 is only a little better when it is but sometimes a lot worse.
// The skipped bytes are tested in Extend backwards,
// and still picked up as part of the match if they do.
const skipBeginning = 2
eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset
t2 := eLong - e.cur - l + skipBeginning
s2 := s + skipBeginning
off := s2 - t2
if t2 >= 0 && off < maxMatchOffset && off > 0 {
if l2 := e.matchlenLong(s2, t2, src); l2 > l {
t = t2
l = l2
s = s2
}
}
}
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
l++
}
if nextEmit < s {
if false {
emitLiteral(dst, src[nextEmit:s])
} else {
for _, v := range src[nextEmit:s] {
dst.tokens[dst.n] = token(v)
dst.litHist[v]++
dst.n++
}
}
}
if debugDeflate {
if t >= s {
panic(fmt.Sprintln("s-t", s, t))
}
if (s - t) > maxMatchOffset {
panic(fmt.Sprintln("mmo", s-t))
}
if l < baseMatchLength {
panic("bml")
}
}
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
s += l
nextEmit = s
if nextS >= s {
s = nextS + 1
}
if s >= sLimit {
goto emitRemainder
}
// Store every 3rd hash in-between.
if true {
const hashEvery = 3
i := s - l + 1
if i < s-1 {
cv := load6432(src, i)
t := tableEntry{offset: i + e.cur}
e.table[hashLen(cv, tableBits, hashShortBytes)] = t
eLong := &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = t, eLong.Cur
// Do an long at i+1
cv >>= 8
t = tableEntry{offset: t.offset + 1}
eLong = &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = t, eLong.Cur
// We only have enough bits for a short entry at i+2
cv >>= 8
t = tableEntry{offset: t.offset + 1}
e.table[hashLen(cv, tableBits, hashShortBytes)] = t
// Skip one - otherwise we risk hitting 's'
i += 4
for ; i < s-1; i += hashEvery {
cv := load6432(src, i)
t := tableEntry{offset: i + e.cur}
t2 := tableEntry{offset: t.offset + 1}
eLong := &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = t, eLong.Cur
e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
}
}
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-1 and at s.
x := load6432(src, s-1)
o := e.cur + s - 1
prevHashS := hashLen(x, tableBits, hashShortBytes)
prevHashL := hash7(x, tableBits)
e.table[prevHashS] = tableEntry{offset: o}
eLong := &e.bTable[prevHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur
cv = x >> 8
}
emitRemainder:
if int(nextEmit) < len(src) {
// If nothing was added, don't encode literals.
if dst.n == 0 {
return
}
emitLiteral(dst, src[nextEmit:])
}
}
// Reset the encoding table.
func (e *fastEncL5Window) Reset() {
// We keep the same allocs, since we are compressing the same block sizes.
if cap(e.hist) < allocHistory {
e.hist = make([]byte, 0, allocHistory)
}
// We offset current position so everything will be out of reach.
// If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
if e.cur <= int32(bufferReset) {
e.cur += e.maxOffset + int32(len(e.hist))
}
e.hist = e.hist[:0]
}
func (e *fastEncL5Window) addBlock(src []byte) int32 {
// check if we have space already
maxMatchOffset := e.maxOffset
if len(e.hist)+len(src) > cap(e.hist) {
if cap(e.hist) == 0 {
e.hist = make([]byte, 0, allocHistory)
} else {
if cap(e.hist) < int(maxMatchOffset*2) {
panic("unexpected buffer size")
}
// Move down
offset := int32(len(e.hist)) - maxMatchOffset
copy(e.hist[0:maxMatchOffset], e.hist[offset:])
e.cur += offset
e.hist = e.hist[:maxMatchOffset]
}
}
s := int32(len(e.hist))
e.hist = append(e.hist, src...)
return s
}
// matchlen will return the match length between offsets and t in src.
// The maximum length returned is maxMatchLength - 4.
// It is assumed that s > t, that t >=0 and s < len(src).
func (e *fastEncL5Window) matchlen(s, t int32, src []byte) int32 {
if debugDecode {
if t >= s {
panic(fmt.Sprint("t >=s:", t, s))
}
if int(s) >= len(src) {
panic(fmt.Sprint("s >= len(src):", s, len(src)))
}
if t < 0 {
panic(fmt.Sprint("t < 0:", t))
}
if s-t > e.maxOffset {
panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
}
}
s1 := int(s) + maxMatchLength - 4
if s1 > len(src) {
s1 = len(src)
}
// Extend the match to be as long as possible.
return int32(matchLen(src[s:s1], src[t:]))
}
// matchlenLong will return the match length between offsets and t in src.
// It is assumed that s > t, that t >=0 and s < len(src).
func (e *fastEncL5Window) matchlenLong(s, t int32, src []byte) int32 {
if debugDeflate {
if t >= s {
panic(fmt.Sprint("t >=s:", t, s))
}
if int(s) >= len(src) {
panic(fmt.Sprint("s >= len(src):", s, len(src)))
}
if t < 0 {
panic(fmt.Sprint("t < 0:", t))
}
if s-t > e.maxOffset {
panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
}
}
// Extend the match to be as long as possible.
return int32(matchLen(src[s:], src[t:]))
}

View file

@ -0,0 +1,16 @@
//go:build amd64 && !appengine && !noasm && gc
// +build amd64,!appengine,!noasm,gc
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
package flate
// matchLen returns how many bytes match in a and b
//
// It assumes that:
//
// len(a) <= len(b) and len(a) > 0
//
//go:noescape
func matchLen(a []byte, b []byte) int

View file

@ -0,0 +1,68 @@
// Copied from S2 implementation.
//go:build !appengine && !noasm && gc && !noasm
#include "textflag.h"
// func matchLen(a []byte, b []byte) int
// Requires: BMI
TEXT ·matchLen(SB), NOSPLIT, $0-56
MOVQ a_base+0(FP), AX
MOVQ b_base+24(FP), CX
MOVQ a_len+8(FP), DX
// matchLen
XORL SI, SI
CMPL DX, $0x08
JB matchlen_match4_standalone
matchlen_loopback_standalone:
MOVQ (AX)(SI*1), BX
XORQ (CX)(SI*1), BX
TESTQ BX, BX
JZ matchlen_loop_standalone
#ifdef GOAMD64_v3
TZCNTQ BX, BX
#else
BSFQ BX, BX
#endif
SARQ $0x03, BX
LEAL (SI)(BX*1), SI
JMP gen_match_len_end
matchlen_loop_standalone:
LEAL -8(DX), DX
LEAL 8(SI), SI
CMPL DX, $0x08
JAE matchlen_loopback_standalone
matchlen_match4_standalone:
CMPL DX, $0x04
JB matchlen_match2_standalone
MOVL (AX)(SI*1), BX
CMPL (CX)(SI*1), BX
JNE matchlen_match2_standalone
LEAL -4(DX), DX
LEAL 4(SI), SI
matchlen_match2_standalone:
CMPL DX, $0x02
JB matchlen_match1_standalone
MOVW (AX)(SI*1), BX
CMPW (CX)(SI*1), BX
JNE matchlen_match1_standalone
LEAL -2(DX), DX
LEAL 2(SI), SI
matchlen_match1_standalone:
CMPL DX, $0x01
JB gen_match_len_end
MOVB (AX)(SI*1), BL
CMPB (CX)(SI*1), BL
JNE gen_match_len_end
INCL SI
gen_match_len_end:
MOVQ SI, ret+48(FP)
RET

View file

@ -0,0 +1,33 @@
//go:build !amd64 || appengine || !gc || noasm
// +build !amd64 appengine !gc noasm
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
package flate
import (
"encoding/binary"
"math/bits"
)
// matchLen returns the maximum common prefix length of a and b.
// a must be the shortest of the two.
func matchLen(a, b []byte) (n int) {
for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
if diff != 0 {
return n + bits.TrailingZeros64(diff)>>3
}
n += 8
}
for i := range a {
if a[i] != b[i] {
break
}
n++
}
return n
}

View file

@ -152,12 +152,11 @@ func (b *bitWriter) flushAlign() {
// close will write the alignment bit and write the final byte(s)
// to the output.
func (b *bitWriter) close() error {
func (b *bitWriter) close() {
// End mark
b.addBits16Clean(1, 1)
// flush until next byte.
b.flushAlign()
return nil
}
// reset and continue writing by appending to out.

View file

@ -199,7 +199,8 @@ func (s *Scratch) compress(src []byte) error {
c2.flush(s.actualTableLog)
c1.flush(s.actualTableLog)
return s.bw.close()
s.bw.close()
return nil
}
// writeCount will write the normalized histogram count to header.

View file

@ -106,6 +106,7 @@ func (z *Reader) Reset(r io.Reader) error {
*z = Reader{
decompressor: z.decompressor,
multistream: true,
br: z.br,
}
if rr, ok := r.(flate.Reader); ok {
z.r = rr

View file

@ -74,6 +74,27 @@ func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
return z, nil
}
// MinCustomWindowSize is the minimum window size that can be sent to NewWriterWindow.
const MinCustomWindowSize = flate.MinCustomWindowSize
// MaxCustomWindowSize is the maximum custom window that can be sent to NewWriterWindow.
const MaxCustomWindowSize = flate.MaxCustomWindowSize
// NewWriterWindow returns a new Writer compressing data with a custom window size.
// windowSize must be from MinCustomWindowSize to MaxCustomWindowSize.
func NewWriterWindow(w io.Writer, windowSize int) (*Writer, error) {
if windowSize < MinCustomWindowSize {
return nil, errors.New("gzip: requested window size less than MinWindowSize")
}
if windowSize > MaxCustomWindowSize {
return nil, errors.New("gzip: requested window size bigger than MaxCustomWindowSize")
}
z := new(Writer)
z.init(w, -windowSize)
return z, nil
}
func (z *Writer) init(w io.Writer, level int) {
compressor := z.compressor
if level != StatelessCompression {

View file

@ -94,10 +94,9 @@ func (b *bitWriter) flushAlign() {
// close will write the alignment bit and write the final byte(s)
// to the output.
func (b *bitWriter) close() error {
func (b *bitWriter) close() {
// End mark
b.addBits16Clean(1, 1)
// flush until next byte.
b.flushAlign()
return nil
}

View file

@ -227,10 +227,10 @@ func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err err
}
func (s *Scratch) compress1X(src []byte) ([]byte, error) {
return s.compress1xDo(s.Out, src)
return s.compress1xDo(s.Out, src), nil
}
func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
func (s *Scratch) compress1xDo(dst, src []byte) []byte {
var bw = bitWriter{out: dst}
// N is length divisible by 4.
@ -260,8 +260,8 @@ func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
bw.encTwoSymbols(cTable, tmp[1], tmp[0])
}
}
err := bw.close()
return bw.out, err
bw.close()
return bw.out
}
var sixZeros [6]byte
@ -283,12 +283,8 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) {
}
src = src[len(toDo):]
var err error
idx := len(s.Out)
s.Out, err = s.compress1xDo(s.Out, toDo)
if err != nil {
return nil, err
}
s.Out = s.compress1xDo(s.Out, toDo)
if len(s.Out)-idx > math.MaxUint16 {
// We cannot store the size in the jump table
return nil, ErrIncompressible
@ -315,7 +311,6 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
segmentSize := (len(src) + 3) / 4
var wg sync.WaitGroup
var errs [4]error
wg.Add(4)
for i := 0; i < 4; i++ {
toDo := src
@ -326,15 +321,12 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
// Separate goroutine for each block.
go func(i int) {
s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo)
s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo)
wg.Done()
}(i)
}
wg.Wait()
for i := 0; i < 4; i++ {
if errs[i] != nil {
return nil, errs[i]
}
o := s.tmpOut[i]
if len(o) > math.MaxUint16 {
// We cannot store the size in the jump table

View file

@ -106,6 +106,25 @@ func MakeDict(data []byte, searchStart []byte) *Dict {
return &d
}
// MakeDictManual will create a dictionary.
// 'data' must be at least MinDictSize and less than or equal to MaxDictSize.
// A manual first repeat index into data must be provided.
// It must be less than len(data)-8.
func MakeDictManual(data []byte, firstIdx uint16) *Dict {
if len(data) < MinDictSize || int(firstIdx) >= len(data)-8 || len(data) > MaxDictSize {
return nil
}
var d Dict
dict := data
d.dict = dict
if cap(d.dict) < len(d.dict)+16 {
d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...)
}
d.repeat = int(firstIdx)
return &d
}
// Encode returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.

File diff suppressed because it is too large Load diff

View file

@ -511,13 +511,14 @@ func IndexStream(r io.Reader) ([]byte, error) {
// JSON returns the index as JSON text.
func (i *Index) JSON() []byte {
type offset struct {
CompressedOffset int64 `json:"compressed"`
UncompressedOffset int64 `json:"uncompressed"`
}
x := struct {
TotalUncompressed int64 `json:"total_uncompressed"` // Total Uncompressed size if known. Will be -1 if unknown.
TotalCompressed int64 `json:"total_compressed"` // Total Compressed size if known. Will be -1 if unknown.
Offsets []struct {
CompressedOffset int64 `json:"compressed"`
UncompressedOffset int64 `json:"uncompressed"`
} `json:"offsets"`
Offsets []offset `json:"offsets"`
EstBlockUncomp int64 `json:"est_block_uncompressed"`
}{
TotalUncompressed: i.TotalUncompressed,
@ -525,10 +526,7 @@ func (i *Index) JSON() []byte {
EstBlockUncomp: i.estBlockUncomp,
}
for _, v := range i.info {
x.Offsets = append(x.Offsets, struct {
CompressedOffset int64 `json:"compressed"`
UncompressedOffset int64 `json:"uncompressed"`
}{CompressedOffset: v.compressedOffset, UncompressedOffset: v.uncompressedOffset})
x.Offsets = append(x.Offsets, offset{CompressedOffset: v.compressedOffset, UncompressedOffset: v.uncompressedOffset})
}
b, _ := json.MarshalIndent(x, "", " ")
return b

View file

@ -17,7 +17,6 @@ import (
// for aligning the input.
type bitReader struct {
in []byte
off uint // next byte to read is at in[off - 1]
value uint64 // Maybe use [16]byte, but shifting is awkward.
bitsRead uint8
}
@ -28,7 +27,6 @@ func (b *bitReader) init(in []byte) error {
return errors.New("corrupt stream: too short")
}
b.in = in
b.off = uint(len(in))
// The highest bit of the last byte indicates where to start
v := in[len(in)-1]
if v == 0 {
@ -69,21 +67,19 @@ func (b *bitReader) fillFast() {
if b.bitsRead < 32 {
return
}
// 2 bounds checks.
v := b.in[b.off-4:]
v = v[:4]
v := b.in[len(b.in)-4:]
b.in = b.in[:len(b.in)-4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value = (b.value << 32) | uint64(low)
b.bitsRead -= 32
b.off -= 4
}
// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
func (b *bitReader) fillFastStart() {
// Do single re-slice to avoid bounds checks.
b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
v := b.in[len(b.in)-8:]
b.in = b.in[:len(b.in)-8]
b.value = binary.LittleEndian.Uint64(v)
b.bitsRead = 0
b.off -= 8
}
// fill() will make sure at least 32 bits are available.
@ -91,25 +87,25 @@ func (b *bitReader) fill() {
if b.bitsRead < 32 {
return
}
if b.off >= 4 {
v := b.in[b.off-4:]
v = v[:4]
if len(b.in) >= 4 {
v := b.in[len(b.in)-4:]
b.in = b.in[:len(b.in)-4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value = (b.value << 32) | uint64(low)
b.bitsRead -= 32
b.off -= 4
return
}
for b.off > 0 {
b.value = (b.value << 8) | uint64(b.in[b.off-1])
b.bitsRead -= 8
b.off--
b.bitsRead -= uint8(8 * len(b.in))
for len(b.in) > 0 {
b.value = (b.value << 8) | uint64(b.in[len(b.in)-1])
b.in = b.in[:len(b.in)-1]
}
}
// finished returns true if all bits have been read from the bit stream.
func (b *bitReader) finished() bool {
return b.off == 0 && b.bitsRead >= 64
return len(b.in) == 0 && b.bitsRead >= 64
}
// overread returns true if more bits have been requested than is on the stream.
@ -119,7 +115,7 @@ func (b *bitReader) overread() bool {
// remain returns the number of bits remaining.
func (b *bitReader) remain() uint {
return b.off*8 + 64 - uint(b.bitsRead)
return 8*uint(len(b.in)) + 64 - uint(b.bitsRead)
}
// close the bitstream and returns an error if out-of-buffer reads occurred.

View file

@ -97,12 +97,11 @@ func (b *bitWriter) flushAlign() {
// close will write the alignment bit and write the final byte(s)
// to the output.
func (b *bitWriter) close() error {
func (b *bitWriter) close() {
// End mark
b.addBits16Clean(1, 1)
// flush until next byte.
b.flushAlign()
return nil
}
// reset and continue writing by appending to out.

View file

@ -361,14 +361,21 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error {
if len(lits) >= 1024 {
// Use 4 Streams.
out, reUsed, err = huff0.Compress4X(lits, b.litEnc)
} else if len(lits) > 32 {
} else if len(lits) > 16 {
// Use 1 stream
single = true
out, reUsed, err = huff0.Compress1X(lits, b.litEnc)
} else {
err = huff0.ErrIncompressible
}
if err == nil && len(out)+5 > len(lits) {
// If we are close, we may still be worse or equal to raw.
var lh literalsHeader
lh.setSizes(len(out), len(lits), single)
if len(out)+lh.size() >= len(lits) {
err = huff0.ErrIncompressible
}
}
switch err {
case huff0.ErrIncompressible:
if debugEncoder {
@ -503,7 +510,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
if len(b.literals) >= 1024 && !raw {
// Use 4 Streams.
out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
} else if len(b.literals) > 32 && !raw {
} else if len(b.literals) > 16 && !raw {
// Use 1 stream
single = true
out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
@ -511,6 +518,17 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
err = huff0.ErrIncompressible
}
if err == nil && len(out)+5 > len(b.literals) {
// If we are close, we may still be worse or equal to raw.
var lh literalsHeader
lh.setSize(len(b.literals))
szRaw := lh.size()
lh.setSizes(len(out), len(b.literals), single)
szComp := lh.size()
if len(out)+szComp >= len(b.literals)+szRaw {
err = huff0.ErrIncompressible
}
}
switch err {
case huff0.ErrIncompressible:
lh.setType(literalsBlockRaw)
@ -773,10 +791,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
ml.flush(mlEnc.actualTableLog)
of.flush(ofEnc.actualTableLog)
ll.flush(llEnc.actualTableLog)
err = wr.close()
if err != nil {
return err
}
wr.close()
b.output = wr.out
// Maybe even add a bigger margin.

View file

@ -1,10 +1,13 @@
package zstd
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"math"
"sort"
"github.com/klauspost/compress/huff0"
)
@ -14,7 +17,6 @@ type dict struct {
litEnc *huff0.Scratch
llDec, ofDec, mlDec sequenceDec
//llEnc, ofEnc, mlEnc []*fseEncoder
offsets [3]int
content []byte
}
@ -159,3 +161,374 @@ func InspectDictionary(b []byte) (interface {
d, err := loadDict(b)
return d, err
}
type BuildDictOptions struct {
// Dictionary ID.
ID uint32
// Content to use to create dictionary tables.
Contents [][]byte
// History to use for all blocks.
History []byte
// Offsets to use.
Offsets [3]int
// CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier.
// See https://github.com/facebook/zstd/issues/3724
CompatV155 bool
// Use the specified encoder level.
// The dictionary will be built using the specified encoder level,
// which will reflect speed and make the dictionary tailored for that level.
// If not set SpeedBestCompression will be used.
Level EncoderLevel
// DebugOut will write stats and other details here if set.
DebugOut io.Writer
}
func BuildDict(o BuildDictOptions) ([]byte, error) {
initPredefined()
hist := o.History
contents := o.Contents
debug := o.DebugOut != nil
println := func(args ...interface{}) {
if o.DebugOut != nil {
fmt.Fprintln(o.DebugOut, args...)
}
}
printf := func(s string, args ...interface{}) {
if o.DebugOut != nil {
fmt.Fprintf(o.DebugOut, s, args...)
}
}
print := func(args ...interface{}) {
if o.DebugOut != nil {
fmt.Fprint(o.DebugOut, args...)
}
}
if int64(len(hist)) > dictMaxLength {
return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength))
}
if len(hist) < 8 {
return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8)
}
if len(contents) == 0 {
return nil, errors.New("no content provided")
}
d := dict{
id: o.ID,
litEnc: nil,
llDec: sequenceDec{},
ofDec: sequenceDec{},
mlDec: sequenceDec{},
offsets: o.Offsets,
content: hist,
}
block := blockEnc{lowMem: false}
block.init()
enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}})
if o.Level != 0 {
eOpts := encoderOptions{
level: o.Level,
blockSize: maxMatchLen,
windowSize: maxMatchLen,
dict: &d,
lowMem: false,
}
enc = eOpts.encoder()
} else {
o.Level = SpeedBestCompression
}
var (
remain [256]int
ll [256]int
ml [256]int
of [256]int
)
addValues := func(dst *[256]int, src []byte) {
for _, v := range src {
dst[v]++
}
}
addHist := func(dst *[256]int, src *[256]uint32) {
for i, v := range src {
dst[i] += int(v)
}
}
seqs := 0
nUsed := 0
litTotal := 0
newOffsets := make(map[uint32]int, 1000)
for _, b := range contents {
block.reset(nil)
if len(b) < 8 {
continue
}
nUsed++
enc.Reset(&d, true)
enc.Encode(&block, b)
addValues(&remain, block.literals)
litTotal += len(block.literals)
seqs += len(block.sequences)
block.genCodes()
addHist(&ll, block.coders.llEnc.Histogram())
addHist(&ml, block.coders.mlEnc.Histogram())
addHist(&of, block.coders.ofEnc.Histogram())
for i, seq := range block.sequences {
if i > 3 {
break
}
offset := seq.offset
if offset == 0 {
continue
}
if offset > 3 {
newOffsets[offset-3]++
} else {
newOffsets[uint32(o.Offsets[offset-1])]++
}
}
}
// Find most used offsets.
var sortedOffsets []uint32
for k := range newOffsets {
sortedOffsets = append(sortedOffsets, k)
}
sort.Slice(sortedOffsets, func(i, j int) bool {
a, b := sortedOffsets[i], sortedOffsets[j]
if a == b {
// Prefer the longer offset
return sortedOffsets[i] > sortedOffsets[j]
}
return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]]
})
if len(sortedOffsets) > 3 {
if debug {
print("Offsets:")
for i, v := range sortedOffsets {
if i > 20 {
break
}
printf("[%d: %d],", v, newOffsets[v])
}
println("")
}
sortedOffsets = sortedOffsets[:3]
}
for i, v := range sortedOffsets {
o.Offsets[i] = int(v)
}
if debug {
println("New repeat offsets", o.Offsets)
}
if nUsed == 0 || seqs == 0 {
return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs)
}
if debug {
println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal)
}
if seqs/nUsed < 512 {
// Use 512 as minimum.
nUsed = seqs / 512
}
copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) {
hist := dst.Histogram()
var maxSym uint8
var maxCount int
var fakeLength int
for i, v := range src {
if v > 0 {
v = v / nUsed
if v == 0 {
v = 1
}
}
if v > maxCount {
maxCount = v
}
if v != 0 {
maxSym = uint8(i)
}
fakeLength += v
hist[i] = uint32(v)
}
dst.HistogramFinished(maxSym, maxCount)
dst.reUsed = false
dst.useRLE = false
err := dst.normalizeCount(fakeLength)
if err != nil {
return nil, err
}
if debug {
println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength)
}
return dst.writeCount(nil)
}
if debug {
print("Literal lengths: ")
}
llTable, err := copyHist(block.coders.llEnc, &ll)
if err != nil {
return nil, err
}
if debug {
print("Match lengths: ")
}
mlTable, err := copyHist(block.coders.mlEnc, &ml)
if err != nil {
return nil, err
}
if debug {
print("Offsets: ")
}
ofTable, err := copyHist(block.coders.ofEnc, &of)
if err != nil {
return nil, err
}
// Literal table
avgSize := litTotal
if avgSize > huff0.BlockSizeMax/2 {
avgSize = huff0.BlockSizeMax / 2
}
huffBuff := make([]byte, 0, avgSize)
// Target size
div := litTotal / avgSize
if div < 1 {
div = 1
}
if debug {
println("Huffman weights:")
}
for i, n := range remain[:] {
if n > 0 {
n = n / div
// Allow all entries to be represented.
if n == 0 {
n = 1
}
huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
if debug {
printf("[%d: %d], ", i, n)
}
}
}
if o.CompatV155 && remain[255]/div == 0 {
huffBuff = append(huffBuff, 255)
}
scratch := &huff0.Scratch{TableLog: 11}
for tries := 0; tries < 255; tries++ {
scratch = &huff0.Scratch{TableLog: 11}
_, _, err = huff0.Compress1X(huffBuff, scratch)
if err == nil {
break
}
if debug {
printf("Try %d: Huffman error: %v\n", tries+1, err)
}
huffBuff = huffBuff[:0]
if tries == 250 {
if debug {
println("Huffman: Bailing out with predefined table")
}
// Bail out.... Just generate something
huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...)
for i := 0; i < 128; i++ {
huffBuff = append(huffBuff, byte(i))
}
continue
}
if errors.Is(err, huff0.ErrIncompressible) {
// Try truncating least common.
for i, n := range remain[:] {
if n > 0 {
n = n / (div * (i + 1))
if n > 0 {
huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
}
}
}
if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 {
huffBuff = append(huffBuff, 255)
}
if len(huffBuff) == 0 {
huffBuff = append(huffBuff, 0, 255)
}
}
if errors.Is(err, huff0.ErrUseRLE) {
for i, n := range remain[:] {
n = n / (div * (i + 1))
// Allow all entries to be represented.
if n == 0 {
n = 1
}
huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
}
}
}
var out bytes.Buffer
out.Write([]byte(dictMagic))
out.Write(binary.LittleEndian.AppendUint32(nil, o.ID))
out.Write(scratch.OutTable)
if debug {
println("huff table:", len(scratch.OutTable), "bytes")
println("of table:", len(ofTable), "bytes")
println("ml table:", len(mlTable), "bytes")
println("ll table:", len(llTable), "bytes")
}
out.Write(ofTable)
out.Write(mlTable)
out.Write(llTable)
out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0])))
out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1])))
out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2])))
out.Write(hist)
if debug {
_, err := loadDict(out.Bytes())
if err != nil {
panic(err)
}
i, err := InspectDictionary(out.Bytes())
if err != nil {
panic(err)
}
println("ID:", i.ID())
println("Content size:", i.ContentSize())
println("Encoder:", i.LitEncoder() != nil)
println("Offsets:", i.Offsets())
var totalSize int
for _, b := range contents {
totalSize += len(b)
}
encWith := func(opts ...EOption) int {
enc, err := NewWriter(nil, opts...)
if err != nil {
panic(err)
}
defer enc.Close()
var dst []byte
var totalSize int
for _, b := range contents {
dst = enc.EncodeAll(b, dst[:0])
totalSize += len(dst)
}
return totalSize
}
plain := encWith(WithEncoderLevel(o.Level))
withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes()))
println("Input size:", totalSize)
println("Plain Compressed:", plain)
println("Dict Compressed:", withDict)
println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)")
}
return out.Bytes(), nil
}

View file

@ -227,10 +227,7 @@ func (e *Encoder) nextBlock(final bool) error {
DictID: e.o.dict.ID(),
}
dst, err := fh.appendTo(tmp[:0])
if err != nil {
return err
}
dst := fh.appendTo(tmp[:0])
s.headerWritten = true
s.wWg.Wait()
var n2 int
@ -483,7 +480,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
Checksum: false,
DictID: 0,
}
dst, _ = fh.appendTo(dst)
dst = fh.appendTo(dst)
// Write raw block as last one only.
var blk blockHeader
@ -518,10 +515,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem {
dst = make([]byte, 0, len(src))
}
dst, err := fh.appendTo(dst)
if err != nil {
panic(err)
}
dst = fh.appendTo(dst)
// If we can do everything in one block, prefer that.
if len(src) <= e.o.blockSize {
@ -581,6 +575,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
// Add padding with content from crypto/rand.Reader
if e.o.pad > 0 {
add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad))
var err error
dst, err = skippableFrame(dst, add, rand.Reader)
if err != nil {
panic(err)

View file

@ -22,7 +22,7 @@ type frameHeader struct {
const maxHeaderSize = 14
func (f frameHeader) appendTo(dst []byte) ([]byte, error) {
func (f frameHeader) appendTo(dst []byte) []byte {
dst = append(dst, frameMagic...)
var fhd uint8
if f.Checksum {
@ -88,7 +88,7 @@ func (f frameHeader) appendTo(dst []byte) ([]byte, error) {
default:
panic("invalid fcs")
}
return dst, nil
return dst
}
const skippableFrameHeader = 4 + 4

View file

@ -245,7 +245,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
return io.ErrUnexpectedEOF
}
var ll, mo, ml int
if br.off > 4+((maxOffsetBits+16+16)>>3) {
if len(br.in) > 4+((maxOffsetBits+16+16)>>3) {
// inlined function:
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
@ -452,18 +452,13 @@ func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol)
// extra bits are stored in reverse order.
br.fill()
if s.maxBits <= 32 {
mo += br.getBits(moB)
ml += br.getBits(mlB)
ll += br.getBits(llB)
} else {
mo += br.getBits(moB)
if s.maxBits > 32 {
br.fill()
}
// matchlength+literal length, max 32 bits
ml += br.getBits(mlB)
ll += br.getBits(llB)
}
mo = s.adjustOffset(mo, ll, moB)
return
}

Some files were not shown because too many files have changed in this diff Show more