vendor: make vendor-update

This commit is contained in:
Aliaksandr Valialkin 2022-12-10 21:46:16 -08:00
parent b01607e3fb
commit 19f20c0f4e
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
64 changed files with 1832 additions and 964 deletions

24
go.mod
View file

@ -5,7 +5,7 @@ go 1.19
require (
cloud.google.com/go/storage v1.28.1
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.2.0
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.6.1
github.com/VictoriaMetrics/fastcache v1.12.0
// Do not use the original github.com/valyala/fasthttp because of issues
@ -22,19 +22,19 @@ require (
github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4
github.com/googleapis/gax-go/v2 v2.7.0
github.com/influxdata/influxdb v1.10.0
github.com/influxdata/influxdb v1.11.0
github.com/klauspost/compress v1.15.12
github.com/prometheus/prometheus v0.40.5
github.com/urfave/cli/v2 v2.23.6
github.com/prometheus/prometheus v0.40.6
github.com/urfave/cli/v2 v2.23.7
github.com/valyala/fastjson v1.6.3
github.com/valyala/fastrand v1.1.0
github.com/valyala/fasttemplate v1.2.2
github.com/valyala/gozstd v1.17.0
github.com/valyala/quicktemplate v1.7.0
golang.org/x/net v0.3.0
golang.org/x/oauth2 v0.2.0
golang.org/x/net v0.4.0
golang.org/x/oauth2 v0.3.0
golang.org/x/sys v0.3.0
google.golang.org/api v0.103.0
google.golang.org/api v0.104.0
gopkg.in/yaml.v2 v2.4.0
)
@ -46,7 +46,7 @@ require (
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
github.com/aws/aws-sdk-go v1.44.153 // indirect
github.com/aws/aws-sdk-go v1.44.157 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.13.4 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20 // indirect
@ -76,7 +76,7 @@ require (
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect
github.com/hashicorp/go-hclog v0.16.2 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
@ -92,7 +92,7 @@ require (
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.14.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/common v0.38.0 // indirect
github.com/prometheus/common/sigv4 v0.1.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/rivo/uniseg v0.4.3 // indirect
@ -108,13 +108,13 @@ require (
go.opentelemetry.io/otel/trace v1.11.2 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/goleak v1.2.0 // indirect
golang.org/x/exp v0.0.0-20221205204356-47842c84f3db // indirect
golang.org/x/exp v0.0.0-20221208152030-732eee02a75a // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/text v0.5.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20221205194025-8222ab48f5fc // indirect
google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 // indirect
google.golang.org/grpc v1.51.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect

59
go.sum
View file

@ -48,8 +48,8 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.2.0/go.mod h1:uGG2W01BaETf0Ozp+Q
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1 h1:Oj853U9kG+RLTCQXpjvOnrv0WaZHxgmZz1TlLywgOPY=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1 h1:BMTdr+ib5ljLa9MxTJK8x/Ds0MbBb4MfuW5BL0zMJnI=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1/go.mod h1:c6WvOhtmjNUWbLfOG1qxM/q0SPvQNSVJvolm+C52dIU=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.6.1 h1:YvQv9Mz6T8oR5ypQOL6erY0Z5t71ak1uHV4QFokCOZk=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.6.1/go.mod h1:c6WvOhtmjNUWbLfOG1qxM/q0SPvQNSVJvolm+C52dIU=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM=
github.com/Azure/go-autorest/autorest/adal v0.9.21 h1:jjQnVFXPfekaqb8vIsv2G1lxshoW+oGv4MDlhRtnYZk=
@ -89,8 +89,8 @@ github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu
github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.44.153 h1:KfN5URb9O/Fk48xHrAinrPV2DzPcLa0cd9yo1ax5KGg=
github.com/aws/aws-sdk-go v1.44.153/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go v1.44.157 h1:JVBPpEWC8+yA7CbfAuTl/ZFFlHS3yoqWFqxFyTCISwg=
github.com/aws/aws-sdk-go v1.44.157/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go-v2 v1.17.2 h1:r0yRZInwiPBNpQ4aDy/Ssh3ROWsGtKDwar2JS8Lm+N8=
github.com/aws/aws-sdk-go-v2 v1.17.2/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs=
@ -180,7 +180,6 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
@ -271,8 +270,8 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.2.0 h1:y8Yozv7SZtlU//QXbezB6QkpuE6jMD2/gfzk4AftXjs=
github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg=
github.com/googleapis/enterprise-certificate-proxy v0.2.1 h1:RY7tHKZcRlk788d5WSo/e83gOyyy742E8GSs771ySpg=
github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ=
@ -299,8 +298,8 @@ github.com/hashicorp/serf v0.9.7 h1:hkdgbqizGQHuU5IPqYM1JdSMV8nKfpuOnZYXssk9muY=
github.com/hetznercloud/hcloud-go v1.35.3 h1:WCmFAhLRooih2QHAsbCbEdpIHnshQQmrPqsr3rHE1Ow=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
github.com/influxdata/influxdb v1.10.0 h1:8xDpt8KO3lzrzf/ss+l8r42AGUZvoITu5824berK7SE=
github.com/influxdata/influxdb v1.10.0/go.mod h1:IVPuoA2pOOxau/NguX7ipW0Jp9Bn+dMWlo0+VOscevU=
github.com/influxdata/influxdb v1.11.0 h1:0X+ZsbcOWc6AEi5MHee9BYqXCKmz8IZsljrRYjmV8Qg=
github.com/influxdata/influxdb v1.11.0/go.mod h1:V93tJcidY0Zh0LtSONZWnXXGDyt20dtVf+Ddp4EnhaA=
github.com/ionos-cloud/sdk-go/v6 v6.1.3 h1:vb6yqdpiqaytvreM0bsn2pXw+1YDvEk2RKSmBAQvgDQ=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
@ -313,7 +312,6 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
@ -362,7 +360,6 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
@ -383,7 +380,6 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
@ -396,20 +392,18 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/common v0.38.0 h1:VTQitp6mXTdUoCmDMugDVOJ1opi6ADftKfp/yeqTR/E=
github.com/prometheus/common v0.38.0/go.mod h1:MBXfmBQZrK5XpbCkjofnXs96LD2QQ7fEq4C0xjC/yec=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
github.com/prometheus/prometheus v0.40.5 h1:wmk5yNrQlkQ2OvZucMhUB4k78AVfG34szb1UtopS8Vc=
github.com/prometheus/prometheus v0.40.5/go.mod h1:bxgdmtoSNLmmIVPGmeTJ3OiP67VmuY4yalE4ZP6L/j8=
github.com/prometheus/prometheus v0.40.6 h1:JP2Wbm4HJI9OlWbOzCGRL3zlOXFdSzC0TttI09+EodM=
github.com/prometheus/prometheus v0.40.6/go.mod h1:nO+vI0cJo1ezp2DPGw5NEnTlYHGRpBFrqE4zb9O0g0U=
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw=
@ -435,8 +429,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/urfave/cli/v2 v2.23.6 h1:iWmtKD+prGo1nKUtLO0Wg4z9esfBM4rAV4QRLQiEmJ4=
github.com/urfave/cli/v2 v2.23.6/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
github.com/urfave/cli/v2 v2.23.7 h1:YHDQ46s3VghFHFf1DdF+Sh7H4RqhcM+t0TmZRJx4oJY=
github.com/urfave/cli/v2 v2.23.7/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
@ -499,8 +493,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20221205204356-47842c84f3db h1:D/cFflL63o2KSLJIwjlcIt8PR064j/xsmdEJL/YvY/o=
golang.org/x/exp v0.0.0-20221205204356-47842c84f3db/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/exp v0.0.0-20221208152030-732eee02a75a h1:4iLhBPcpqFmylhnkbY3W0ONLUYYkDAW9xMFLfxgsvCw=
golang.org/x/exp v0.0.0-20221208152030-732eee02a75a/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -557,21 +551,18 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.3.0 h1:VWL6FNY2bEEmsGVKabSlHu5Irp34xmMRoqb/9lF9lxk=
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU=
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.2.0 h1:GtQkldQ9m7yvzCL1V+LrYow3Khe0eJH0w7RbX/VbaIU=
golang.org/x/oauth2 v0.2.0/go.mod h1:Cwn6afJ8jrQwYMxQDTpISoXmXW9I6qF6vDeuuoX3Ibs=
golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8=
golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -627,8 +618,6 @@ golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -722,8 +711,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.103.0 h1:9yuVqlu2JCvcLg9p8S3fcFLZij8EPSyvODIY1rkMizQ=
google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0=
google.golang.org/api v0.104.0 h1:KBfmLRqdZEbwQleFlSLnzpQJwhjpmNOk4cKQIBDZ9mg=
google.golang.org/api v0.104.0/go.mod h1:JCspTXJbBxa5ySXw4UgUqVer7DfVxbvc/CTUFqAED5U=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -761,8 +750,8 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20221205194025-8222ab48f5fc h1:nUKKji0AarrQKh6XpFEpG3p1TNztxhe7C8TcUvDgXqw=
google.golang.org/genproto v0.0.0-20221205194025-8222ab48f5fc/go.mod h1:1dOng4TWOomJrDGhpXjfCD35wQC6jnC7HpRmOFRqEV0=
google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 h1:jmIfw8+gSvXcZSgaFAGyInDXeWzUhvYH57G/5GKMn70=
google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=

View file

@ -1,5 +1,45 @@
# Release History
## 0.6.1 (2022-12-09)
### Bugs Fixed
* Fix compilation error on Darwin.
## 0.6.0 (2022-12-08)
### Features Added
* Added BlobDeleteType to DeleteOptions to allow access to ['Permanent'](https://learn.microsoft.com/rest/api/storageservices/delete-blob#permanent-delete) DeleteType.
* Added [Set Blob Expiry API](https://learn.microsoft.com/rest/api/storageservices/set-blob-expiry).
* Added method `ServiceClient()` to the `azblob.Client` type, allowing access to the underlying service client.
* Added support for object level immutability policy with versioning (Version Level WORM).
* Added the custom CRC64 polynomial used by storage for transactional hashes, and implemented automatic hashing for transactions.
### Breaking Changes
* Corrected the name for `saoid` and `suoid` SAS parameters in `BlobSignatureValues` struct as per [this](https://learn.microsoft.com/rest/api/storageservices/create-user-delegation-sas#construct-a-user-delegation-sas)
* Updated type of `BlockSize` from int to int64 in `UploadStreamOptions`
* CRC64 transactional hashes are now supplied with a `uint64` rather than a `[]byte` to conform with Golang's `hash/crc64` package
* Field `XMSContentCRC64` has been renamed to `ContentCRC64`
* The `Lease*` constant types and values in the `blob` and `container` packages have been moved to the `lease` package and their names fixed up to avoid stuttering.
* Fields `TransactionalContentCRC64` and `TransactionalContentMD5` have been replaced by `TransactionalValidation`.
* Fields `SourceContentCRC64` and `SourceContentMD5` have been replaced by `SourceContentValidation`.
* Field `TransactionalContentMD5` has been removed from type `AppendBlockFromURLOptions`.
### Bugs Fixed
* Corrected signing of User Delegation SAS. Fixes [#19372](https://github.com/Azure/azure-sdk-for-go/issues/19372) and [#19454](https://github.com/Azure/azure-sdk-for-go/issues/19454)
* Added formatting of start and expiry time in [SetAccessPolicy](https://learn.microsoft.com/rest/api/storageservices/set-container-acl#request-body). Fixes [#18712](https://github.com/Azure/azure-sdk-for-go/issues/18712)
* Uploading block blobs larger than 256MB can fail in some cases with error `net/http: HTTP/1.x transport connection broken`.
* Blob name parameters are URL-encoded before constructing the complete blob URL.
### Other Changes
* Added some missing public surface area in the `container` and `service` packages.
* The `UploadStream()` methods now use anonymous memory mapped files for buffers in order to reduce heap allocations/fragmentation.
* The anonymous memory mapped files are typically backed by the page/swap file, multiple files are not actually created.
## 0.5.1 (2022-10-11)
### Bugs Fixed

View file

@ -10,6 +10,7 @@ import (
"context"
"io"
"os"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
@ -103,6 +104,11 @@ func (ab *Client) generated() *generated.AppendBlobClient {
return appendBlob
}
func (ab *Client) innerBlobGenerated() *generated.BlobClient {
b := ab.BlobClient()
return base.InnerClient((*base.Client[generated.BlobClient])(b))
}
// URL returns the URL endpoint used by the Client object.
func (ab *Client) URL() string {
return ab.generated().Endpoint()
@ -153,6 +159,13 @@ func (ab *Client) AppendBlock(ctx context.Context, body io.ReadSeekCloser, o *Ap
appendOptions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions := o.format()
if o != nil && o.TransactionalValidation != nil {
body, err = o.TransactionalValidation.Apply(body, appendOptions)
if err != nil {
return AppendBlockResponse{}, nil
}
}
resp, err := ab.generated().AppendBlock(ctx, count, body, appendOptions, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions)
return resp, err
@ -190,6 +203,24 @@ func (ab *Client) Undelete(ctx context.Context, o *blob.UndeleteOptions) (blob.U
return ab.BlobClient().Undelete(ctx, o)
}
// SetImmutabilityPolicy operation enables users to set the immutability policy on a blob.
// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview
func (ab *Client) SetImmutabilityPolicy(ctx context.Context, expiryTime time.Time, options *blob.SetImmutabilityPolicyOptions) (blob.SetImmutabilityPolicyResponse, error) {
return ab.BlobClient().SetImmutabilityPolicy(ctx, expiryTime, options)
}
// DeleteImmutabilityPolicy operation enables users to delete the immutability policy on a blob.
// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview
func (ab *Client) DeleteImmutabilityPolicy(ctx context.Context, options *blob.DeleteImmutabilityPolicyOptions) (blob.DeleteImmutabilityPolicyResponse, error) {
return ab.BlobClient().DeleteImmutabilityPolicy(ctx, options)
}
// SetLegalHold operation enables users to set legal hold on a blob.
// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview
func (ab *Client) SetLegalHold(ctx context.Context, legalHold bool, options *blob.SetLegalHoldOptions) (blob.SetLegalHoldResponse, error) {
return ab.BlobClient().SetLegalHold(ctx, legalHold, options)
}
// SetTier operation sets the tier on a blob. The operation is allowed on a page
// blob in a premium storage account and on a block blob in a blob storage account (locally
// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and
@ -200,6 +231,17 @@ func (ab *Client) SetTier(ctx context.Context, tier blob.AccessTier, o *blob.Set
return ab.BlobClient().SetTier(ctx, tier, o)
}
// SetExpiry operation sets an expiry time on an existing blob. This operation is only allowed on Hierarchical Namespace enabled accounts.
// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-blob-expiry
func (ab *Client) SetExpiry(ctx context.Context, expiryType ExpiryType, o *SetExpiryOptions) (SetExpiryResponse, error) {
if expiryType == nil {
expiryType = ExpiryTypeNever{}
}
et, opts := expiryType.Format(o)
resp, err := ab.innerBlobGenerated().SetExpiry(ctx, et, opts)
return resp, err
}
// GetProperties returns the blob's properties.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties.
func (ab *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOptions) (blob.GetPropertiesResponse, error) {

View file

@ -73,10 +73,9 @@ func (o *CreateOptions) format() (*generated.AppendBlobClientCreateOptions, *gen
// AppendBlockOptions contains the optional parameters for the Client.AppendBlock method.
type AppendBlockOptions struct {
// Specify the transactional crc64 for the body, to be validated by the service.
TransactionalContentCRC64 []byte
// Specify the transactional md5 for the body, to be validated by the service.
TransactionalContentMD5 []byte
// TransactionalValidation specifies the transfer validation type to use.
// The default is nil (no transfer validation).
TransactionalValidation blob.TransferValidationType
AppendPositionAccessConditions *AppendPositionAccessConditions
@ -93,24 +92,16 @@ func (o *AppendBlockOptions) format() (*generated.AppendBlobClientAppendBlockOpt
return nil, nil, nil, nil, nil, nil
}
options := &generated.AppendBlobClientAppendBlockOptions{
TransactionalContentCRC64: o.TransactionalContentCRC64,
TransactionalContentMD5: o.TransactionalContentMD5,
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return options, o.AppendPositionAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions, leaseAccessConditions
return &generated.AppendBlobClientAppendBlockOptions{}, o.AppendPositionAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions, leaseAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// AppendBlockFromURLOptions contains the optional parameters for the Client.AppendBlockFromURL method.
type AppendBlockFromURLOptions struct {
// Specify the md5 calculated for the range of bytes that must be read from the copy source.
SourceContentMD5 []byte
// Specify the crc64 calculated for the range of bytes that must be read from the copy source.
SourceContentCRC64 []byte
// Specify the transactional md5 for the body, to be validated by the service.
TransactionalContentMD5 []byte
// SourceContentValidation contains the validation mechanism used on the range of bytes read from the source.
SourceContentValidation blob.SourceContentValidationType
AppendPositionAccessConditions *AppendPositionAccessConditions
@ -135,9 +126,10 @@ func (o *AppendBlockFromURLOptions) format() (*generated.AppendBlobClientAppendB
options := &generated.AppendBlobClientAppendBlockFromURLOptions{
SourceRange: exported.FormatHTTPRange(o.Range),
SourceContentMD5: o.SourceContentMD5,
SourceContentcrc64: o.SourceContentCRC64,
TransactionalContentMD5: o.TransactionalContentMD5,
}
if o.SourceContentValidation != nil {
o.SourceContentValidation.Apply(options)
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
@ -164,3 +156,21 @@ func (o *SealOptions) format() (*generated.LeaseAccessConditions,
}
// ---------------------------------------------------------------------------------------------------------------------
// ExpiryType defines values for ExpiryType
type ExpiryType = exported.ExpiryType
// ExpiryTypeAbsolute defines the absolute time for the blob expiry
type ExpiryTypeAbsolute = exported.ExpiryTypeAbsolute
// ExpiryTypeRelativeToNow defines the duration relative to now for the blob expiry
type ExpiryTypeRelativeToNow = exported.ExpiryTypeRelativeToNow
// ExpiryTypeRelativeToCreation defines the duration relative to creation for the blob expiry
type ExpiryTypeRelativeToCreation = exported.ExpiryTypeRelativeToCreation
// ExpiryTypeNever defines that the blob will be set to never expire
type ExpiryTypeNever = exported.ExpiryTypeNever
// SetExpiryOptions contains the optional parameters for the Client.SetExpiry method.
type SetExpiryOptions = exported.SetExpiryOptions

View file

@ -21,3 +21,6 @@ type AppendBlockFromURLResponse = generated.AppendBlobClientAppendBlockFromURLRe
// SealResponse contains the response from method Client.Seal.
type SealResponse = generated.AppendBlobClientSealResponse
// SetExpiryResponse contains the response from method BlobClient.SetExpiry.
type SetExpiryResponse = generated.BlobClientSetExpiryResponse

View file

@ -231,6 +231,31 @@ func (b *Client) GetTags(ctx context.Context, options *GetTagsOptions) (GetTagsR
}
// SetImmutabilityPolicy operation enables users to set the immutability policy on a blob. Mode defaults to "Unlocked".
// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview
func (b *Client) SetImmutabilityPolicy(ctx context.Context, expiryTime time.Time, options *SetImmutabilityPolicyOptions) (SetImmutabilityPolicyResponse, error) {
blobSetImmutabilityPolicyOptions, modifiedAccessConditions := options.format()
blobSetImmutabilityPolicyOptions.ImmutabilityPolicyExpiry = &expiryTime
resp, err := b.generated().SetImmutabilityPolicy(ctx, blobSetImmutabilityPolicyOptions, modifiedAccessConditions)
return resp, err
}
// DeleteImmutabilityPolicy operation enables users to delete the immutability policy on a blob.
// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview
func (b *Client) DeleteImmutabilityPolicy(ctx context.Context, options *DeleteImmutabilityPolicyOptions) (DeleteImmutabilityPolicyResponse, error) {
deleteImmutabilityOptions := options.format()
resp, err := b.generated().DeleteImmutabilityPolicy(ctx, deleteImmutabilityOptions)
return resp, err
}
// SetLegalHold operation enables users to set legal hold on a blob.
// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview
func (b *Client) SetLegalHold(ctx context.Context, legalHold bool, options *SetLegalHoldOptions) (SetLegalHoldResponse, error) {
setLegalHoldOptions := options.format()
resp, err := b.generated().SetLegalHold(ctx, legalHold, setLegalHoldOptions)
return resp, err
}
// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB.
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url.
func (b *Client) CopyFromURL(ctx context.Context, copySource string, options *CopyFromURLOptions) (CopyFromURLResponse, error) {
@ -311,8 +336,7 @@ func (b *Client) download(ctx context.Context, writer io.WriterAt, o downloadOpt
TransferSize: count,
ChunkSize: o.BlockSize,
Concurrency: o.Concurrency,
Operation: func(chunkStart int64, count int64, ctx context.Context) error {
Operation: func(ctx context.Context, chunkStart int64, count int64) error {
downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{
Offset: chunkStart + o.Range.Offset,
Count: count,

View file

@ -168,21 +168,6 @@ func PossibleDeleteTypeValues() []DeleteType {
return generated.PossibleDeleteTypeValues()
}
// ExpiryOptions defines values for ExpiryOptions
type ExpiryOptions = generated.ExpiryOptions
const (
ExpiryOptionsAbsolute ExpiryOptions = generated.ExpiryOptionsAbsolute
ExpiryOptionsNeverExpire ExpiryOptions = generated.ExpiryOptionsNeverExpire
ExpiryOptionsRelativeToCreation ExpiryOptions = generated.ExpiryOptionsRelativeToCreation
ExpiryOptionsRelativeToNow ExpiryOptions = generated.ExpiryOptionsRelativeToNow
)
// PossibleExpiryOptionsValues returns the possible values for the ExpiryOptions const type.
func PossibleExpiryOptionsValues() []ExpiryOptions {
return generated.PossibleExpiryOptionsValues()
}
// QueryFormatType - The quick query format type.
type QueryFormatType = generated.QueryFormatType
@ -198,44 +183,47 @@ func PossibleQueryFormatTypeValues() []QueryFormatType {
return generated.PossibleQueryFormatTypeValues()
}
// LeaseDurationType defines values for LeaseDurationType
type LeaseDurationType = generated.LeaseDurationType
// TransferValidationType abstracts the various mechanisms used to verify a transfer.
type TransferValidationType = exported.TransferValidationType
const (
LeaseDurationTypeInfinite LeaseDurationType = generated.LeaseDurationTypeInfinite
LeaseDurationTypeFixed LeaseDurationType = generated.LeaseDurationTypeFixed
)
// TransferValidationTypeCRC64 is a TransferValidationType used to provide a precomputed CRC64.
type TransferValidationTypeCRC64 = exported.TransferValidationTypeCRC64
// PossibleLeaseDurationTypeValues returns the possible values for the LeaseDurationType const type.
func PossibleLeaseDurationTypeValues() []LeaseDurationType {
return generated.PossibleLeaseDurationTypeValues()
// TransferValidationTypeComputeCRC64 is a TransferValidationType that indicates a CRC64 should be computed during transfer.
func TransferValidationTypeComputeCRC64() TransferValidationType {
return exported.TransferValidationTypeComputeCRC64()
}
// LeaseStateType defines values for LeaseStateType
type LeaseStateType = generated.LeaseStateType
// TransferValidationTypeMD5 is a TransferValidationType used to provide a precomputed MD5.
type TransferValidationTypeMD5 = exported.TransferValidationTypeMD5
const (
LeaseStateTypeAvailable LeaseStateType = generated.LeaseStateTypeAvailable
LeaseStateTypeLeased LeaseStateType = generated.LeaseStateTypeLeased
LeaseStateTypeExpired LeaseStateType = generated.LeaseStateTypeExpired
LeaseStateTypeBreaking LeaseStateType = generated.LeaseStateTypeBreaking
LeaseStateTypeBroken LeaseStateType = generated.LeaseStateTypeBroken
)
// PossibleLeaseStateTypeValues returns the possible values for the LeaseStateType const type.
func PossibleLeaseStateTypeValues() []LeaseStateType {
return generated.PossibleLeaseStateTypeValues()
// SourceContentValidationType abstracts the various mechanisms used to validate source content.
// This interface is not publicly implementable.
type SourceContentValidationType interface {
Apply(generated.SourceContentSetter)
notPubliclyImplementable()
}
// LeaseStatusType defines values for LeaseStatusType
type LeaseStatusType = generated.LeaseStatusType
// SourceContentValidationTypeCRC64 is a SourceContentValidationType used to provided a precomputed CRC64.
type SourceContentValidationTypeCRC64 []byte
const (
LeaseStatusTypeLocked LeaseStatusType = generated.LeaseStatusTypeLocked
LeaseStatusTypeUnlocked LeaseStatusType = generated.LeaseStatusTypeUnlocked
)
// PossibleLeaseStatusTypeValues returns the possible values for the LeaseStatusType const type.
func PossibleLeaseStatusTypeValues() []LeaseStatusType {
return generated.PossibleLeaseStatusTypeValues()
// Apply implements the SourceContentValidationType interface for type SourceContentValidationTypeCRC64.
func (s SourceContentValidationTypeCRC64) Apply(src generated.SourceContentSetter) {
src.SetSourceContentCRC64(s)
}
func (SourceContentValidationTypeCRC64) notPubliclyImplementable() {}
var _ SourceContentValidationType = (SourceContentValidationTypeCRC64)(nil)
// SourceContentValidationTypeMD5 is a SourceContentValidationType used to provided a precomputed MD5.
type SourceContentValidationTypeMD5 []byte
// Apply implements the SourceContentValidationType interface for type SourceContentValidationTypeMD5.
func (s SourceContentValidationTypeMD5) Apply(src generated.SourceContentSetter) {
src.SetSourceContentMD5(s)
}
func (SourceContentValidationTypeMD5) notPubliclyImplementable() {}
var _ SourceContentValidationType = (SourceContentValidationTypeMD5)(nil)

View file

@ -194,6 +194,11 @@ type DeleteOptions struct {
// and all of its snapshots. only: Delete only the blob's snapshots and not the blob itself
DeleteSnapshots *DeleteSnapshotsOptionType
AccessConditions *AccessConditions
// Setting DeleteType to DeleteTypePermanent will permanently delete soft-delete snapshot and/or version blobs.
// WARNING: This is a dangerous operation and should not be used unless you know the implications. Please proceed
// with caution.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob
BlobDeleteType *DeleteType
}
func (o *DeleteOptions) format() (*generated.BlobClientDeleteOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) {
@ -203,6 +208,7 @@ func (o *DeleteOptions) format() (*generated.BlobClientDeleteOptions, *generated
basics := generated.BlobClientDeleteOptions{
DeleteSnapshots: o.DeleteSnapshots,
DeleteType: o.BlobDeleteType, // None by default
}
if o.AccessConditions == nil {
@ -442,6 +448,54 @@ func (o *GetTagsOptions) format() (*generated.BlobClientGetTagsOptions, *generat
// ---------------------------------------------------------------------------------------------------------------------
// SetImmutabilityPolicyOptions contains the parameter for Client.SetImmutabilityPolicy
type SetImmutabilityPolicyOptions struct {
// Specifies the immutability policy mode to set on the blob. Possible values to set include: "Locked", "Unlocked".
// "Mutable" can only be returned by service, don't set to "Mutable". If mode is not set - it will default to Unlocked.
Mode *ImmutabilityPolicySetting
ModifiedAccessConditions *ModifiedAccessConditions
}
func (o *SetImmutabilityPolicyOptions) format() (*generated.BlobClientSetImmutabilityPolicyOptions, *ModifiedAccessConditions) {
if o == nil {
return nil, nil
}
ac := &exported.BlobAccessConditions{
ModifiedAccessConditions: o.ModifiedAccessConditions,
}
_, modifiedAccessConditions := exported.FormatBlobAccessConditions(ac)
options := &generated.BlobClientSetImmutabilityPolicyOptions{
ImmutabilityPolicyMode: o.Mode,
}
return options, modifiedAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// DeleteImmutabilityPolicyOptions contains the optional parameters for the Client.DeleteImmutabilityPolicy method.
type DeleteImmutabilityPolicyOptions struct {
// placeholder for future options
}
func (o *DeleteImmutabilityPolicyOptions) format() *generated.BlobClientDeleteImmutabilityPolicyOptions {
return nil
}
// ---------------------------------------------------------------------------------------------------------------------
// SetLegalHoldOptions contains the optional parameters for the Client.SetLegalHold method.
type SetLegalHoldOptions struct {
// placeholder for future options
}
func (o *SetLegalHoldOptions) format() *generated.BlobClientSetLegalHoldOptions {
return nil
}
// ---------------------------------------------------------------------------------------------------------------------
// CopyFromURLOptions contains the optional parameters for the Client.CopyFromURL method.
type CopyFromURLOptions struct {
// Optional. Used to set blob tags in various blob operations.

View file

@ -85,6 +85,15 @@ type SetTagsResponse = generated.BlobClientSetTagsResponse
// GetTagsResponse contains the response from method BlobClient.GetTags.
type GetTagsResponse = generated.BlobClientGetTagsResponse
// SetImmutabilityPolicyResponse contains the response from method BlobClient.SetImmutabilityPolicy.
type SetImmutabilityPolicyResponse = generated.BlobClientSetImmutabilityPolicyResponse
// DeleteImmutabilityPolicyResponse contains the response from method BlobClient.DeleteImmutabilityPolicyResponse.
type DeleteImmutabilityPolicyResponse = generated.BlobClientDeleteImmutabilityPolicyResponse
// SetLegalHoldResponse contains the response from method BlobClient.SetLegalHold.
type SetLegalHoldResponse = generated.BlobClientSetLegalHoldResponse
// CopyFromURLResponse contains the response from method BlobClient.CopyFromURL.
type CopyFromURLResponse = generated.BlobClientCopyFromURLResponse

View file

@ -12,225 +12,302 @@ import (
"encoding/base64"
"encoding/binary"
"errors"
"fmt"
"io"
"sync"
"sync/atomic"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
"github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
)
// blockWriter provides methods to upload blocks that represent a file to a server and commit them.
// This allows us to provide a local implementation that fakes the server for hermetic testing.
type blockWriter interface {
StageBlock(context.Context, string, io.ReadSeekCloser, *StageBlockOptions) (StageBlockResponse, error)
Upload(context.Context, io.ReadSeekCloser, *UploadOptions) (UploadResponse, error)
CommitBlockList(context.Context, []string, *CommitBlockListOptions) (CommitBlockListResponse, error)
}
// copyFromReader copies a source io.Reader to blob storage using concurrent uploads.
// TODO(someone): The existing model provides a buffer size and buffer limit as limiting factors. The buffer size is probably
// useless other than needing to be above some number, as the network stack is going to hack up the buffer over some size. The
// max buffers is providing a cap on how much memory we use (by multiplying it times the buffer size) and how many go routines can upload
// at a time. I think having a single max memory dial would be more efficient. We can choose an internal buffer size that works
// well, 4 MiB or 8 MiB, and auto-scale to as many goroutines within the memory limit. This gives a single dial to tweak and we can
// choose a max value for the memory setting based on internal transfers within Azure (which will give us the maximum throughput model).
// We can even provide a utility to dial this number in for customer networks to optimize their copies.
func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o UploadStreamOptions) (CommitBlockListResponse, error) {
if err := o.format(); err != nil {
return CommitBlockListResponse{}, err
}
// bufferManager provides an abstraction for the management of buffers.
// this is mostly for testing purposes, but does allow for different implementations without changing the algorithm.
type bufferManager[T ~[]byte] interface {
// Acquire returns the channel that contains the pool of buffers.
Acquire() <-chan T
// Release releases the buffer back to the pool for reuse/cleanup.
Release(T)
// Grow grows the number of buffers, up to the predefined max.
// It returns the total number of buffers or an error.
// No error is returned if the number of buffers has reached max.
// This is called only from the reading goroutine.
Grow() (int, error)
// Free cleans up all buffers.
Free()
}
// copyFromReader copies a source io.Reader to blob storage using concurrent uploads.
func copyFromReader[T ~[]byte](ctx context.Context, src io.Reader, dst blockWriter, options UploadStreamOptions, getBufferManager func(maxBuffers int, bufferSize int64) bufferManager[T]) (CommitBlockListResponse, error) {
options.setDefaults()
wg := sync.WaitGroup{} // Used to know when all outgoing blocks have finished processing
errCh := make(chan error, 1) // contains the first error encountered during processing
buffers := getBufferManager(options.Concurrency, options.BlockSize)
defer buffers.Free()
// this controls the lifetime of the uploading goroutines.
// if an error is encountered, cancel() is called which will terminate all uploads.
// NOTE: the ordering is important here. cancel MUST execute before
// cleaning up the buffers so that any uploading goroutines exit first,
// releasing their buffers back to the pool for cleanup.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
var err error
generatedUuid, err := uuid.New()
// all blocks have IDs that start with a random UUID
blockIDPrefix, err := uuid.New()
if err != nil {
return CommitBlockListResponse{}, err
}
cp := &copier{
ctx: ctx,
cancel: cancel,
reader: from,
to: to,
id: newID(generatedUuid),
o: o,
errCh: make(chan error, 1),
tracker := blockTracker{
blockIDPrefix: blockIDPrefix,
options: options,
}
// Send all our chunks until we get an error.
for {
if err = cp.sendChunk(); err != nil {
// This goroutine grabs a buffer, reads from the stream into the buffer,
// then creates a goroutine to upload/stage the block.
for blockNum := uint32(0); true; blockNum++ {
var buffer T
select {
case buffer = <-buffers.Acquire():
// got a buffer
default:
// no buffer available; allocate a new buffer if possible
if _, err := buffers.Grow(); err != nil {
return CommitBlockListResponse{}, err
}
// either grab the newly allocated buffer or wait for one to become available
buffer = <-buffers.Acquire()
}
var n int
n, err = io.ReadFull(src, buffer)
if n > 0 {
// some data was read, upload it
wg.Add(1) // We're posting a buffer to be sent
// NOTE: we must pass blockNum as an arg to our goroutine else
// it's captured by reference and can change underneath us!
go func(blockNum uint32) {
// Upload the outgoing block, matching the number of bytes read
err := tracker.uploadBlock(ctx, dst, blockNum, buffer[:n])
if err != nil {
select {
case errCh <- err:
// error was set
default:
// some other error is already set
}
cancel()
}
buffers.Release(buffer) // The goroutine reading from the stream can reuse this buffer now
// signal that the block has been staged.
// we MUST do this after attempting to write to errCh
// to avoid it racing with the reading goroutine.
wg.Done()
}(blockNum)
} else {
// nothing was read so the buffer is empty, send it back for reuse/clean-up.
buffers.Release(buffer)
}
if err != nil { // The reader is done, no more outgoing buffers
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
// these are expected errors, we don't surface those
err = nil
} else {
// some other error happened, terminate any outstanding uploads
cancel()
}
break
}
}
// If the error is not EOF, then we have a problem.
if err != nil && !errors.Is(err, io.EOF) {
return CommitBlockListResponse{}, err
}
// Close out our upload.
if err := cp.close(); err != nil {
return CommitBlockListResponse{}, err
}
wg.Wait() // Wait for all outgoing blocks to complete
return cp.result, nil
}
// copier streams a file via chunks in parallel from a reader representing a file.
// Do not use directly, instead use copyFromReader().
type copier struct {
// ctx holds the context of a copier. This is normally a faux pas to store a Context in a struct. In this case,
// the copier has the lifetime of a function call, so it's fine.
ctx context.Context
cancel context.CancelFunc
// reader is the source to be written to storage.
reader io.Reader
// to is the location we are writing our chunks to.
to blockWriter
// o contains our options for uploading.
o UploadStreamOptions
// id provides the ids for each chunk.
id *id
//// num is the current chunk we are on.
//num int32
//// ch is used to pass the next chunk of data from our reader to one of the writers.
//ch chan copierChunk
// errCh is used to hold the first error from our concurrent writers.
errCh chan error
// wg provides a count of how many writers we are waiting to finish.
wg sync.WaitGroup
// result holds the final result from blob storage after we have submitted all chunks.
result CommitBlockListResponse
}
// copierChunk contains buffer
type copierChunk struct {
buffer []byte
id string
length int
}
// getErr returns an error by priority. First, if a function set an error, it returns that error. Next, if the Context has an error
// it returns that error. Otherwise, it is nil. getErr supports only returning an error once per copier.
func (c *copier) getErr() error {
select {
case err := <-c.errCh:
return err
default:
}
return c.ctx.Err()
}
// sendChunk reads data from out internal reader, creates a chunk, and sends it to be written via a channel.
// sendChunk returns io.EOF when the reader returns an io.EOF or io.ErrUnexpectedEOF.
func (c *copier) sendChunk() error {
if err := c.getErr(); err != nil {
return err
}
buffer := c.o.transferManager.Get()
if len(buffer) == 0 {
return fmt.Errorf("transferManager returned a 0 size buffer, this is a bug in the manager")
}
n, err := io.ReadFull(c.reader, buffer)
if n > 0 {
// Some data was read, schedule the Write.
id := c.id.next()
c.wg.Add(1)
c.o.transferManager.Run(
func() {
defer c.wg.Done()
c.write(copierChunk{buffer: buffer, id: id, length: n})
},
)
} else {
// Return the unused buffer to the manager.
c.o.transferManager.Put(buffer)
}
if err == nil {
return nil
} else if err == io.EOF || err == io.ErrUnexpectedEOF {
return io.EOF
}
if cerr := c.getErr(); cerr != nil {
return cerr
}
return err
}
// write uploads a chunk to blob storage.
func (c *copier) write(chunk copierChunk) {
defer c.o.transferManager.Put(chunk.buffer)
if err := c.ctx.Err(); err != nil {
return
}
stageBlockOptions := c.o.getStageBlockOptions()
_, err := c.to.StageBlock(c.ctx, chunk.id, shared.NopCloser(bytes.NewReader(chunk.buffer[:chunk.length])), stageBlockOptions)
if err != nil {
// there was an error reading from src, favor this error over any error during staging
return CommitBlockListResponse{}, err
}
select {
case c.errCh <- err:
// failed to stage block, cancel the copy
case err = <-errCh:
// there was an error during staging
return CommitBlockListResponse{}, err
default:
// don't block the goroutine if there's a pending error
}
// no error was encountered
}
// If no error, after all blocks uploaded, commit them to the blob & return the result
return tracker.commitBlocks(ctx, dst)
}
// close commits our blocks to blob storage and closes our writer.
func (c *copier) close() error {
c.wg.Wait()
// used to manage the uploading and committing of blocks
type blockTracker struct {
blockIDPrefix uuid.UUID // UUID used with all blockIDs
maxBlockNum uint32 // defaults to 0
firstBlock []byte // Used only if maxBlockNum is 0
options UploadStreamOptions
}
if err := c.getErr(); err != nil {
return err
func (bt *blockTracker) uploadBlock(ctx context.Context, to blockWriter, num uint32, buffer []byte) error {
if num == 0 {
bt.firstBlock = buffer
// If whole payload fits in 1 block, don't stage it; End will upload it with 1 I/O operation
// If the payload is exactly the same size as the buffer, there may be more content coming in.
if len(buffer) < int(bt.options.BlockSize) {
return nil
}
} else {
// Else, upload a staged block...
atomicMorphUint32(&bt.maxBlockNum, func(startVal uint32) (val uint32, morphResult uint32) {
// Atomically remember (in t.numBlocks) the maximum block num we've ever seen
if startVal < num {
return num, 0
}
return startVal, 0
})
}
var err error
commitBlockListOptions := c.o.getCommitBlockListOptions()
c.result, err = c.to.CommitBlockList(c.ctx, c.id.issued(), commitBlockListOptions)
blockID := newUUIDBlockID(bt.blockIDPrefix).WithBlockNumber(num).ToBase64()
_, err := to.StageBlock(ctx, blockID, streaming.NopCloser(bytes.NewReader(buffer)), bt.options.getStageBlockOptions())
return err
}
// id allows the creation of unique IDs based on UUID4 + an int32. This auto-increments.
type id struct {
u [64]byte
num uint32
all []string
func (bt *blockTracker) commitBlocks(ctx context.Context, to blockWriter) (CommitBlockListResponse, error) {
// If the first block had the exact same size as the buffer
// we would have staged it as a block thinking that there might be more data coming
if bt.maxBlockNum == 0 && len(bt.firstBlock) < int(bt.options.BlockSize) {
// If whole payload fits in 1 block (block #0), upload it with 1 I/O operation
up, err := to.Upload(ctx, streaming.NopCloser(bytes.NewReader(bt.firstBlock)), bt.options.getUploadOptions())
if err != nil {
return CommitBlockListResponse{}, err
}
// convert UploadResponse to CommitBlockListResponse
return CommitBlockListResponse{
ClientRequestID: up.ClientRequestID,
ContentMD5: up.ContentMD5,
Date: up.Date,
ETag: up.ETag,
EncryptionKeySHA256: up.EncryptionKeySHA256,
EncryptionScope: up.EncryptionScope,
IsServerEncrypted: up.IsServerEncrypted,
LastModified: up.LastModified,
RequestID: up.RequestID,
Version: up.Version,
VersionID: up.VersionID,
//ContentCRC64: up.ContentCRC64, doesn't exist on UploadResponse
}, nil
}
// Multiple blocks staged, commit them all now
blockID := newUUIDBlockID(bt.blockIDPrefix)
blockIDs := make([]string, bt.maxBlockNum+1)
for bn := uint32(0); bn < bt.maxBlockNum+1; bn++ {
blockIDs[bn] = blockID.WithBlockNumber(bn).ToBase64()
}
return to.CommitBlockList(ctx, blockIDs, bt.options.getCommitBlockListOptions())
}
// newID constructs a new id.
func newID(uu uuid.UUID) *id {
u := [64]byte{}
copy(u[:], uu[:])
return &id{u: u}
// AtomicMorpherUint32 identifies a method passed to and invoked by the AtomicMorph function.
// The AtomicMorpher callback is passed a startValue and based on this value it returns
// what the new value should be and the result that AtomicMorph should return to its caller.
type atomicMorpherUint32 func(startVal uint32) (val uint32, morphResult uint32)
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
func atomicMorphUint32(target *uint32, morpher atomicMorpherUint32) uint32 {
for {
currentVal := atomic.LoadUint32(target)
desiredVal, morphResult := morpher(currentVal)
if atomic.CompareAndSwapUint32(target, currentVal, desiredVal) {
return morphResult
}
}
}
// next returns the next ID.
func (id *id) next() string {
defer atomic.AddUint32(&id.num, 1)
type blockID [64]byte
binary.BigEndian.PutUint32(id.u[len(uuid.UUID{}):], atomic.LoadUint32(&id.num))
str := base64.StdEncoding.EncodeToString(id.u[:])
id.all = append(id.all, str)
return str
func (blockID blockID) ToBase64() string {
return base64.StdEncoding.EncodeToString(blockID[:])
}
// issued returns all ids that have been issued. This returned value shares the internal slice, so it is not safe to modify the return.
// The value is only valid until the next time next() is called.
func (id *id) issued() []string {
return id.all
type uuidBlockID blockID
func newUUIDBlockID(u uuid.UUID) uuidBlockID {
ubi := uuidBlockID{} // Create a new uuidBlockID
copy(ubi[:len(u)], u[:]) // Copy the specified UUID into it
// Block number defaults to 0
return ubi
}
func (ubi uuidBlockID) WithBlockNumber(blockNumber uint32) uuidBlockID {
binary.BigEndian.PutUint32(ubi[len(uuid.UUID{}):], blockNumber) // Put block number after UUID
return ubi // Return the passed-in copy
}
func (ubi uuidBlockID) ToBase64() string {
return blockID(ubi).ToBase64()
}
// mmbPool implements the bufferManager interface.
// it uses anonymous memory mapped files for buffers.
// don't use this type directly, use newMMBPool() instead.
type mmbPool struct {
buffers chan mmb
count int
max int
size int64
}
func newMMBPool(maxBuffers int, bufferSize int64) bufferManager[mmb] {
return &mmbPool{
buffers: make(chan mmb, maxBuffers),
max: maxBuffers,
size: bufferSize,
}
}
func (pool *mmbPool) Acquire() <-chan mmb {
return pool.buffers
}
func (pool *mmbPool) Grow() (int, error) {
if pool.count < pool.max {
buffer, err := newMMB(pool.size)
if err != nil {
return 0, err
}
pool.buffers <- buffer
pool.count++
}
return pool.count, nil
}
func (pool *mmbPool) Release(buffer mmb) {
pool.buffers <- buffer
}
func (pool *mmbPool) Free() {
for i := 0; i < pool.count; i++ {
buffer := <-pool.buffers
buffer.delete()
}
pool.count = 0
}

View file

@ -14,6 +14,7 @@ import (
"io"
"os"
"sync"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
@ -104,6 +105,11 @@ func (bb *Client) generated() *generated.BlockBlobClient {
return blockBlob
}
func (bb *Client) innerBlobGenerated() *generated.BlobClient {
b := bb.BlobClient()
return base.InnerClient((*base.Client[generated.BlobClient])(b))
}
// URL returns the URL endpoint used by the Client object.
func (bb *Client) URL() string {
return bb.generated().Endpoint()
@ -169,6 +175,13 @@ func (bb *Client) StageBlock(ctx context.Context, base64BlockID string, body io.
opts, leaseAccessConditions, cpkInfo, cpkScopeInfo := options.format()
if options != nil && options.TransactionalValidation != nil {
body, err = options.TransactionalValidation.Apply(body, opts)
if err != nil {
return StageBlockResponse{}, nil
}
}
resp, err := bb.generated().StageBlock(ctx, base64BlockID, count, body, opts, leaseAccessConditions, cpkInfo, cpkScopeInfo)
return resp, err
}
@ -218,6 +231,9 @@ func (bb *Client) CommitBlockList(ctx context.Context, base64BlockIDs []string,
Timeout: options.Timeout,
TransactionalContentCRC64: options.TransactionalContentCRC64,
TransactionalContentMD5: options.TransactionalContentMD5,
LegalHold: options.LegalHold,
ImmutabilityPolicyMode: options.ImmutabilityPolicyMode,
ImmutabilityPolicyExpiry: options.ImmutabilityPolicyExpiryTime,
}
headers = options.HTTPHeaders
@ -255,6 +271,24 @@ func (bb *Client) Undelete(ctx context.Context, o *blob.UndeleteOptions) (blob.U
return bb.BlobClient().Undelete(ctx, o)
}
// SetImmutabilityPolicy operation enables users to set the immutability policy on a blob.
// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview
func (bb *Client) SetImmutabilityPolicy(ctx context.Context, expiryTime time.Time, options *blob.SetImmutabilityPolicyOptions) (blob.SetImmutabilityPolicyResponse, error) {
return bb.BlobClient().SetImmutabilityPolicy(ctx, expiryTime, options)
}
// DeleteImmutabilityPolicy operation enables users to delete the immutability policy on a blob.
// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview
func (bb *Client) DeleteImmutabilityPolicy(ctx context.Context, options *blob.DeleteImmutabilityPolicyOptions) (blob.DeleteImmutabilityPolicyResponse, error) {
return bb.BlobClient().DeleteImmutabilityPolicy(ctx, options)
}
// SetLegalHold operation enables users to set legal hold on a blob.
// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview
func (bb *Client) SetLegalHold(ctx context.Context, legalHold bool, options *blob.SetLegalHoldOptions) (blob.SetLegalHoldResponse, error) {
return bb.BlobClient().SetLegalHold(ctx, legalHold, options)
}
// SetTier operation sets the tier on a blob. The operation is allowed on a page
// blob in a premium storage account and on a block blob in a blob storage account (locally
// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and
@ -265,6 +299,17 @@ func (bb *Client) SetTier(ctx context.Context, tier blob.AccessTier, o *blob.Set
return bb.BlobClient().SetTier(ctx, tier, o)
}
// SetExpiry operation sets an expiry time on an existing blob. This operation is only allowed on Hierarchical Namespace enabled accounts.
// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-blob-expiry
func (bb *Client) SetExpiry(ctx context.Context, expiryType ExpiryType, o *SetExpiryOptions) (SetExpiryResponse, error) {
if expiryType == nil {
expiryType = ExpiryTypeNever{}
}
et, opts := expiryType.Format(o)
resp, err := bb.innerBlobGenerated().SetExpiry(ctx, et, opts)
return resp, err
}
// GetProperties returns the blob's properties.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties.
func (bb *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOptions) (blob.GetPropertiesResponse, error) {
@ -324,7 +369,8 @@ func (bb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.Co
// Concurrent Upload Functions -----------------------------------------------------------------------------------------
// uploadFromReader uploads a buffer in blocks to a block blob.
func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, readerSize int64, o *uploadFromReaderOptions) (uploadFromReaderResponse, error) {
func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, actualSize int64, o *uploadFromReaderOptions) (uploadFromReaderResponse, error) {
readerSize := actualSize
if o.BlockSize == 0 {
// If bufferSize > (MaxStageBlockBytes * MaxBlocks), then error
if readerSize > MaxStageBlockBytes*MaxBlocks {
@ -374,11 +420,17 @@ func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, read
TransferSize: readerSize,
ChunkSize: o.BlockSize,
Concurrency: o.Concurrency,
Operation: func(offset int64, count int64, ctx context.Context) error {
Operation: func(ctx context.Context, offset int64, chunkSize int64) error {
// This function is called once per block.
// It is passed this block's offset within the buffer and its count of bytes
// Prepare to read the proper block/section of the buffer
var body io.ReadSeeker = io.NewSectionReader(reader, offset, count)
if chunkSize < o.BlockSize {
// this is the last block. its actual size might be less
// than the calculated size due to rounding up of the payload
// size to fit in a whole number of blocks.
chunkSize = (actualSize - offset)
}
var body io.ReadSeeker = io.NewSectionReader(reader, offset, chunkSize)
blockNum := offset / o.BlockSize
if o.Progress != nil {
blockProgress := int64(0)
@ -440,20 +492,11 @@ func (bb *Client) UploadFile(ctx context.Context, file *os.File, o *UploadFileOp
// UploadStream copies the file held in io.Reader to the Blob at blockBlobClient.
// A Context deadline or cancellation will cause this to error.
func (bb *Client) UploadStream(ctx context.Context, body io.Reader, o *UploadStreamOptions) (UploadStreamResponse, error) {
if err := o.format(); err != nil {
return CommitBlockListResponse{}, err
}
if o == nil {
o = &UploadStreamOptions{}
}
// If we used the default manager, we need to close it.
if o.transferMangerNotSet {
defer o.transferManager.Close()
}
result, err := copyFromReader(ctx, body, bb, *o)
result, err := copyFromReader(ctx, body, bb, *o, newMMBPool)
if err != nil {
return CommitBlockListResponse{}, err
}

View file

@ -8,7 +8,6 @@ package blockblob
import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
// nolint
const (
// CountToEnd specifies the end of the file
CountToEnd = 0

View file

@ -0,0 +1,38 @@
//go:build go1.18 && (linux || darwin || freebsd || openbsd || netbsd || solaris)
// +build go1.18
// +build linux darwin freebsd openbsd netbsd solaris
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package blockblob
import (
"fmt"
"os"
"syscall"
)
// mmb is a memory mapped buffer
type mmb []byte
// newMMB creates a new memory mapped buffer with the specified size
func newMMB(size int64) (mmb, error) {
prot, flags := syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANON|syscall.MAP_PRIVATE
addr, err := syscall.Mmap(-1, 0, int(size), prot, flags)
if err != nil {
return nil, os.NewSyscallError("Mmap", err)
}
return mmb(addr), nil
}
// delete cleans up the memory mapped buffer
func (m *mmb) delete() {
err := syscall.Munmap(*m)
*m = nil
if err != nil {
// if we get here, there is likely memory corruption.
// please open an issue https://github.com/Azure/azure-sdk-for-go/issues
panic(fmt.Sprintf("Munmap error: %v", err))
}
}

View file

@ -0,0 +1,54 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package blockblob
import (
"fmt"
"os"
"reflect"
"syscall"
"unsafe"
)
// mmb is a memory mapped buffer
type mmb []byte
// newMMB creates a new memory mapped buffer with the specified size
func newMMB(size int64) (mmb, error) {
const InvalidHandleValue = ^uintptr(0) // -1
prot, access := uint32(syscall.PAGE_READWRITE), uint32(syscall.FILE_MAP_WRITE)
hMMF, err := syscall.CreateFileMapping(syscall.Handle(InvalidHandleValue), nil, prot, uint32(size>>32), uint32(size&0xffffffff), nil)
if err != nil {
return nil, os.NewSyscallError("CreateFileMapping", err)
}
defer syscall.CloseHandle(hMMF)
addr, err := syscall.MapViewOfFile(hMMF, access, 0, 0, uintptr(size))
if err != nil {
return nil, os.NewSyscallError("MapViewOfFile", err)
}
m := mmb{}
h := (*reflect.SliceHeader)(unsafe.Pointer(&m))
h.Data = addr
h.Len = int(size)
h.Cap = h.Len
return m, nil
}
// delete cleans up the memory mapped buffer
func (m *mmb) delete() {
addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0])))
*m = mmb{}
err := syscall.UnmapViewOfFile(addr)
if err != nil {
// if we get here, there is likely memory corruption.
// please open an issue https://github.com/Azure/azure-sdk-for-go/issues
panic(fmt.Sprintf("UnmapViewOfFile error: %v", err))
}
}

View file

@ -7,7 +7,7 @@
package blockblob
import (
"fmt"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
@ -43,6 +43,9 @@ type UploadOptions struct {
CpkInfo *blob.CpkInfo
CpkScopeInfo *blob.CpkScopeInfo
AccessConditions *blob.AccessConditions
LegalHold *bool
ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting
ImmutabilityPolicyExpiryTime *time.Time
}
func (o *UploadOptions) format() (*generated.BlockBlobClientUploadOptions, *generated.BlobHTTPHeaders, *generated.LeaseAccessConditions,
@ -56,6 +59,9 @@ func (o *UploadOptions) format() (*generated.BlockBlobClientUploadOptions, *gene
Metadata: o.Metadata,
Tier: o.Tier,
TransactionalContentMD5: o.TransactionalContentMD5,
LegalHold: o.LegalHold,
ImmutabilityPolicyMode: o.ImmutabilityPolicyMode,
ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiryTime,
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
@ -72,11 +78,9 @@ type StageBlockOptions struct {
LeaseAccessConditions *blob.LeaseAccessConditions
// Specify the transactional crc64 for the body, to be validated by the service.
TransactionalContentCRC64 []byte
// Specify the transactional md5 for the body, to be validated by the service.
TransactionalContentMD5 []byte
// TransactionalValidation specifies the transfer validation type to use.
// The default is nil (no transfer validation).
TransactionalValidation blob.TransferValidationType
}
// StageBlockOptions contains the optional parameters for the Client.StageBlock method.
@ -85,10 +89,7 @@ func (o *StageBlockOptions) format() (*generated.BlockBlobClientStageBlockOption
return nil, nil, nil, nil
}
return &generated.BlockBlobClientStageBlockOptions{
TransactionalContentCRC64: o.TransactionalContentCRC64,
TransactionalContentMD5: o.TransactionalContentMD5,
}, o.LeaseAccessConditions, o.CpkInfo, o.CpkScopeInfo
return &generated.BlockBlobClientStageBlockOptions{}, o.LeaseAccessConditions, o.CpkInfo, o.CpkScopeInfo
}
// ---------------------------------------------------------------------------------------------------------------------
@ -101,10 +102,9 @@ type StageBlockFromURLOptions struct {
LeaseAccessConditions *blob.LeaseAccessConditions
SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions
// Specify the md5 calculated for the range of bytes that must be read from the copy source.
SourceContentMD5 []byte
// Specify the crc64 calculated for the range of bytes that must be read from the copy source.
SourceContentCRC64 []byte
// SourceContentValidation contains the validation mechanism used on the range of bytes read from the source.
SourceContentValidation blob.SourceContentValidationType
// Range specifies a range of bytes. The default value is all bytes.
Range blob.HTTPRange
@ -121,11 +121,13 @@ func (o *StageBlockFromURLOptions) format() (*generated.BlockBlobClientStageBloc
options := &generated.BlockBlobClientStageBlockFromURLOptions{
CopySourceAuthorization: o.CopySourceAuthorization,
SourceContentMD5: o.SourceContentMD5,
SourceContentcrc64: o.SourceContentCRC64,
SourceRange: exported.FormatHTTPRange(o.Range),
}
if o.SourceContentValidation != nil {
o.SourceContentValidation.Apply(options)
}
return options, o.CpkInfo, o.CpkScopeInfo, o.LeaseAccessConditions, o.SourceModifiedAccessConditions
}
@ -144,6 +146,9 @@ type CommitBlockListOptions struct {
CpkInfo *blob.CpkInfo
CpkScopeInfo *blob.CpkScopeInfo
AccessConditions *blob.AccessConditions
LegalHold *bool
ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting
ImmutabilityPolicyExpiryTime *time.Time
}
// ---------------------------------------------------------------------------------------------------------------------
@ -196,10 +201,12 @@ type uploadFromReaderOptions struct {
// Concurrency indicates the maximum number of blocks to upload in parallel (0=default)
Concurrency uint16
TransactionalValidation blob.TransferValidationType
// Optional header, Specifies the transactional crc64 for the body, to be validated by the service.
TransactionalContentCRC64 *[]byte
TransactionalContentCRC64 uint64
// Specify the transactional md5 for the body, to be validated by the service.
TransactionalContentMD5 *[]byte
TransactionalContentMD5 []byte
}
// UploadBufferOptions provides set of configurations for UploadBuffer operation
@ -214,6 +221,8 @@ func (o *uploadFromReaderOptions) getStageBlockOptions() *StageBlockOptions {
CpkInfo: o.CpkInfo,
CpkScopeInfo: o.CpkScopeInfo,
LeaseAccessConditions: leaseAccessConditions,
TransactionalValidation: o.TransactionalValidation,
}
}
@ -244,18 +253,15 @@ func (o *uploadFromReaderOptions) getCommitBlockListOptions() *CommitBlockListOp
// UploadStreamOptions provides set of configurations for UploadStream operation
type UploadStreamOptions struct {
// transferManager provides a transferManager that controls buffer allocation/reuse and
// concurrency. This overrides BlockSize and MaxConcurrency if set.
transferManager shared.TransferManager
transferMangerNotSet bool
// BlockSize defines the size of the buffer used during upload. The default and mimimum value is 1 MiB.
BlockSize int
BlockSize int64
// Concurrency defines the number of concurrent uploads to be performed to upload the file.
// Concurrency defines the max number of concurrent uploads to be performed to upload the file.
// Each concurrent upload will create a buffer of size BlockSize. The default value is one.
Concurrency int
TransactionalValidation blob.TransferValidationType
HTTPHeaders *blob.HTTPHeaders
Metadata map[string]string
AccessConditions *blob.AccessConditions
@ -265,11 +271,7 @@ type UploadStreamOptions struct {
CpkScopeInfo *blob.CpkScopeInfo
}
func (u *UploadStreamOptions) format() error {
if u == nil || u.transferManager != nil {
return nil
}
func (u *UploadStreamOptions) setDefaults() {
if u.Concurrency == 0 {
u.Concurrency = 1
}
@ -277,19 +279,16 @@ func (u *UploadStreamOptions) format() error {
if u.BlockSize < _1MiB {
u.BlockSize = _1MiB
}
var err error
u.transferManager, err = shared.NewStaticBuffer(u.BlockSize, u.Concurrency)
if err != nil {
return fmt.Errorf("bug: default transfer manager could not be created: %s", err)
}
u.transferMangerNotSet = true
return nil
}
func (u *UploadStreamOptions) getStageBlockOptions() *StageBlockOptions {
if u == nil {
return nil
}
leaseAccessConditions, _ := exported.FormatBlobAccessConditions(u.AccessConditions)
return &StageBlockOptions{
TransactionalValidation: u.TransactionalValidation,
CpkInfo: u.CpkInfo,
CpkScopeInfo: u.CpkScopeInfo,
LeaseAccessConditions: leaseAccessConditions,
@ -297,7 +296,11 @@ func (u *UploadStreamOptions) getStageBlockOptions() *StageBlockOptions {
}
func (u *UploadStreamOptions) getCommitBlockListOptions() *CommitBlockListOptions {
options := &CommitBlockListOptions{
if u == nil {
return nil
}
return &CommitBlockListOptions{
Tags: u.Tags,
Metadata: u.Metadata,
Tier: u.AccessTier,
@ -306,6 +309,40 @@ func (u *UploadStreamOptions) getCommitBlockListOptions() *CommitBlockListOption
CpkScopeInfo: u.CpkScopeInfo,
AccessConditions: u.AccessConditions,
}
return options
}
func (u *UploadStreamOptions) getUploadOptions() *UploadOptions {
if u == nil {
return nil
}
return &UploadOptions{
Tags: u.Tags,
Metadata: u.Metadata,
Tier: u.AccessTier,
HTTPHeaders: u.HTTPHeaders,
CpkInfo: u.CpkInfo,
CpkScopeInfo: u.CpkScopeInfo,
AccessConditions: u.AccessConditions,
}
}
// ---------------------------------------------------------------------------------------------------------------------
// ExpiryType defines values for ExpiryType
type ExpiryType = exported.ExpiryType
// ExpiryTypeAbsolute defines the absolute time for the blob expiry
type ExpiryTypeAbsolute = exported.ExpiryTypeAbsolute
// ExpiryTypeRelativeToNow defines the duration relative to now for the blob expiry
type ExpiryTypeRelativeToNow = exported.ExpiryTypeRelativeToNow
// ExpiryTypeRelativeToCreation defines the duration relative to creation for the blob expiry
type ExpiryTypeRelativeToCreation = exported.ExpiryTypeRelativeToCreation
// ExpiryTypeNever defines that the blob will be set to never expire
type ExpiryTypeNever = exported.ExpiryTypeNever
// SetExpiryOptions contains the optional parameters for the Client.SetExpiry method.
type SetExpiryOptions = exported.SetExpiryOptions

View file

@ -97,7 +97,7 @@ func toUploadReaderAtResponseFromCommitBlockListResponse(resp CommitBlockListRes
RequestID: resp.RequestID,
Version: resp.Version,
VersionID: resp.VersionID,
ContentCRC64: resp.XMSContentCRC64,
ContentCRC64: resp.ContentCRC64,
}
}
@ -109,3 +109,6 @@ type UploadBufferResponse = uploadFromReaderResponse
// UploadStreamResponse contains the response from method Client.CommitBlockList.
type UploadStreamResponse = CommitBlockListResponse
// SetExpiryResponse contains the response from method BlobClient.SetExpiry.
type SetExpiryResponse = generated.BlobClientSetExpiryResponse

View file

@ -101,6 +101,11 @@ func (c *Client) URL() string {
return c.svc.URL()
}
// ServiceClient returns the embedded service client for this client.
func (c *Client) ServiceClient() *service.Client {
return c.svc
}
// CreateContainer is a lifecycle method to creates a new container under the specified account.
// If the container with the same name already exists, a ResourceExistsError will be raised.
// This method returns a client with which to interact with the newly created container.

View file

@ -10,6 +10,7 @@ import (
"context"
"errors"
"net/http"
"net/url"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
@ -106,41 +107,38 @@ func (c *Client) URL() string {
return c.generated().Endpoint()
}
// NewBlobClient creates a new BlobClient object by concatenating blobName to the end of
// Client's URL. The new BlobClient uses the same request policy pipeline as the Client.
// To change the pipeline, create the BlobClient and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewBlobClient instead of calling this object's
// NewBlobClient method.
// NewBlobClient creates a new blob.Client object by concatenating blobName to the end of
// Client's URL. The blob name will be URL-encoded.
// The new blob.Client uses the same request policy pipeline as this Client.
func (c *Client) NewBlobClient(blobName string) *blob.Client {
blobName = url.PathEscape(blobName)
blobURL := runtime.JoinPaths(c.URL(), blobName)
return (*blob.Client)(base.NewBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey()))
}
// NewAppendBlobClient creates a new AppendBlobURL object by concatenating blobName to the end of
// Client's URL. The new AppendBlobURL uses the same request policy pipeline as the Client.
// To change the pipeline, create the AppendBlobURL and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewAppendBlobClient instead of calling this object's
// NewAppendBlobClient method.
// NewAppendBlobClient creates a new appendblob.Client object by concatenating blobName to the end of
// this Client's URL. The blob name will be URL-encoded.
// The new appendblob.Client uses the same request policy pipeline as this Client.
func (c *Client) NewAppendBlobClient(blobName string) *appendblob.Client {
blobName = url.PathEscape(blobName)
blobURL := runtime.JoinPaths(c.URL(), blobName)
return (*appendblob.Client)(base.NewAppendBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey()))
}
// NewBlockBlobClient creates a new BlockBlobClient object by concatenating blobName to the end of
// Client's URL. The new BlockBlobClient uses the same request policy pipeline as the Client.
// To change the pipeline, create the BlockBlobClient and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewBlockBlobClient instead of calling this object's
// NewBlockBlobClient method.
// NewBlockBlobClient creates a new blockblob.Client object by concatenating blobName to the end of
// this Client's URL. The blob name will be URL-encoded.
// The new blockblob.Client uses the same request policy pipeline as this Client.
func (c *Client) NewBlockBlobClient(blobName string) *blockblob.Client {
blobName = url.PathEscape(blobName)
blobURL := runtime.JoinPaths(c.URL(), blobName)
return (*blockblob.Client)(base.NewBlockBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey()))
}
// NewPageBlobClient creates a new PageBlobURL object by concatenating blobName to the end of Client's URL. The new PageBlobURL uses the same request policy pipeline as the Client.
// To change the pipeline, create the PageBlobURL and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewPageBlobClient instead of calling this object's
// NewPageBlobClient method.
// NewPageBlobClient creates a new pageblob.Client object by concatenating blobName to the end of
// this Client's URL. The blob name will be URL-encoded.
// The new pageblob.Client uses the same request policy pipeline as this Client.
func (c *Client) NewPageBlobClient(blobName string) *pageblob.Client {
blobName = url.PathEscape(blobName)
blobURL := runtime.JoinPaths(c.URL(), blobName)
return (*pageblob.Client)(base.NewPageBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey()))
}
@ -221,6 +219,12 @@ func (c *Client) GetAccessPolicy(ctx context.Context, o *GetAccessPolicyOptions)
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl.
func (c *Client) SetAccessPolicy(ctx context.Context, containerACL []*SignedIdentifier, o *SetAccessPolicyOptions) (SetAccessPolicyResponse, error) {
accessPolicy, mac, lac := o.format()
for _, c := range containerACL {
err := formatTime(c)
if err != nil {
return SetAccessPolicyResponse{}, err
}
}
resp, err := c.generated().SetAccessPolicy(ctx, containerACL, accessPolicy, mac, lac)
return resp, err
}

View file

@ -8,6 +8,32 @@ package container
import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
// AccessTier defines values for blob access tiers.
type AccessTier = generated.AccessTier
const (
AccessTierArchive AccessTier = generated.AccessTierArchive
AccessTierCool AccessTier = generated.AccessTierCool
AccessTierHot AccessTier = generated.AccessTierHot
AccessTierP10 AccessTier = generated.AccessTierP10
AccessTierP15 AccessTier = generated.AccessTierP15
AccessTierP20 AccessTier = generated.AccessTierP20
AccessTierP30 AccessTier = generated.AccessTierP30
AccessTierP4 AccessTier = generated.AccessTierP4
AccessTierP40 AccessTier = generated.AccessTierP40
AccessTierP50 AccessTier = generated.AccessTierP50
AccessTierP6 AccessTier = generated.AccessTierP6
AccessTierP60 AccessTier = generated.AccessTierP60
AccessTierP70 AccessTier = generated.AccessTierP70
AccessTierP80 AccessTier = generated.AccessTierP80
AccessTierPremium AccessTier = generated.AccessTierPremium
)
// PossibleAccessTierValues returns the possible values for the AccessTier const type.
func PossibleAccessTierValues() []AccessTier {
return generated.PossibleAccessTierValues()
}
// PublicAccessType defines values for AccessType - private (default) or blob or container
type PublicAccessType = generated.PublicAccessType
@ -67,48 +93,6 @@ func PossibleBlobTypeValues() []BlobType {
return generated.PossibleBlobTypeValues()
}
// LeaseStatusType defines values for LeaseStatusType
type LeaseStatusType = generated.LeaseStatusType
const (
LeaseStatusTypeLocked LeaseStatusType = generated.LeaseStatusTypeLocked
LeaseStatusTypeUnlocked LeaseStatusType = generated.LeaseStatusTypeUnlocked
)
// PossibleLeaseStatusTypeValues returns the possible values for the LeaseStatusType const type.
func PossibleLeaseStatusTypeValues() []LeaseStatusType {
return generated.PossibleLeaseStatusTypeValues()
}
// LeaseDurationType defines values for LeaseDurationType
type LeaseDurationType = generated.LeaseDurationType
const (
LeaseDurationTypeInfinite LeaseDurationType = generated.LeaseDurationTypeInfinite
LeaseDurationTypeFixed LeaseDurationType = generated.LeaseDurationTypeFixed
)
// PossibleLeaseDurationTypeValues returns the possible values for the LeaseDurationType const type.
func PossibleLeaseDurationTypeValues() []LeaseDurationType {
return generated.PossibleLeaseDurationTypeValues()
}
// LeaseStateType defines values for LeaseStateType
type LeaseStateType = generated.LeaseStateType
const (
LeaseStateTypeAvailable LeaseStateType = generated.LeaseStateTypeAvailable
LeaseStateTypeLeased LeaseStateType = generated.LeaseStateTypeLeased
LeaseStateTypeExpired LeaseStateType = generated.LeaseStateTypeExpired
LeaseStateTypeBreaking LeaseStateType = generated.LeaseStateTypeBreaking
LeaseStateTypeBroken LeaseStateType = generated.LeaseStateTypeBroken
)
// PossibleLeaseStateTypeValues returns the possible values for the LeaseStateType const type.
func PossibleLeaseStateTypeValues() []LeaseStateType {
return generated.PossibleLeaseStateTypeValues()
}
// ArchiveStatus defines values for ArchiveStatus
type ArchiveStatus = generated.ArchiveStatus

View file

@ -8,6 +8,7 @@ package container
import (
"reflect"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
@ -28,10 +29,16 @@ func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredentia
type CpkScopeInfo = generated.ContainerCpkScopeInfo
// BlobProperties - Properties of a blob
type BlobProperties = generated.BlobPropertiesInternal
type BlobProperties = generated.BlobProperties
// BlobItem - An Azure Storage blob
type BlobItem = generated.BlobItemInternal
type BlobItem = generated.BlobItem
// BlobPrefix is a blob's prefix when hierarchically listing blobs.
type BlobPrefix = generated.BlobPrefix
// BlobTag - a key/value pair on a blob
type BlobTag = generated.BlobTag
// AccessConditions identifies container-specific access conditions which you optionally set.
type AccessConditions = exported.ContainerAccessConditions
@ -261,3 +268,26 @@ func (o *SetAccessPolicyOptions) format() (*generated.ContainerClientSetAccessPo
Access: o.Access,
}, lac, mac
}
func formatTime(c *SignedIdentifier) error {
if c.AccessPolicy == nil {
return nil
}
if c.AccessPolicy.Start != nil {
st, err := time.Parse(time.RFC3339, c.AccessPolicy.Start.UTC().Format(time.RFC3339))
if err != nil {
return err
}
c.AccessPolicy.Start = &st
}
if c.AccessPolicy.Expiry != nil {
et, err := time.Parse(time.RFC3339, c.AccessPolicy.Expiry.UTC().Format(time.RFC3339))
if err != nil {
return err
}
c.AccessPolicy.Expiry = &et
}
return nil
}

View file

@ -0,0 +1,71 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package exported
import (
"net/http"
"strconv"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
)
// ExpiryType defines values for ExpiryType
type ExpiryType interface {
Format(o *SetExpiryOptions) (generated.ExpiryOptions, *generated.BlobClientSetExpiryOptions)
notPubliclyImplementable()
}
// ExpiryTypeAbsolute defines the absolute time for the blob expiry
type ExpiryTypeAbsolute time.Time
// ExpiryTypeRelativeToNow defines the duration relative to now for the blob expiry
type ExpiryTypeRelativeToNow time.Duration
// ExpiryTypeRelativeToCreation defines the duration relative to creation for the blob expiry
type ExpiryTypeRelativeToCreation time.Duration
// ExpiryTypeNever defines that the blob will be set to never expire
type ExpiryTypeNever struct {
// empty struct since NeverExpire expiry type does not require expiry time
}
// SetExpiryOptions contains the optional parameters for the Client.SetExpiry method.
type SetExpiryOptions struct {
// placeholder for future options
}
func (e ExpiryTypeAbsolute) Format(o *SetExpiryOptions) (generated.ExpiryOptions, *generated.BlobClientSetExpiryOptions) {
return generated.ExpiryOptionsAbsolute, &generated.BlobClientSetExpiryOptions{
ExpiresOn: to.Ptr(time.Time(e).UTC().Format(http.TimeFormat)),
}
}
func (e ExpiryTypeAbsolute) notPubliclyImplementable() {}
func (e ExpiryTypeRelativeToNow) Format(o *SetExpiryOptions) (generated.ExpiryOptions, *generated.BlobClientSetExpiryOptions) {
return generated.ExpiryOptionsRelativeToNow, &generated.BlobClientSetExpiryOptions{
ExpiresOn: to.Ptr(strconv.FormatInt(time.Duration(e).Milliseconds(), 10)),
}
}
func (e ExpiryTypeRelativeToNow) notPubliclyImplementable() {}
func (e ExpiryTypeRelativeToCreation) Format(o *SetExpiryOptions) (generated.ExpiryOptions, *generated.BlobClientSetExpiryOptions) {
return generated.ExpiryOptionsRelativeToCreation, &generated.BlobClientSetExpiryOptions{
ExpiresOn: to.Ptr(strconv.FormatInt(time.Duration(e).Milliseconds(), 10)),
}
}
func (e ExpiryTypeRelativeToCreation) notPubliclyImplementable() {}
func (e ExpiryTypeNever) Format(o *SetExpiryOptions) (generated.ExpiryOptions, *generated.BlobClientSetExpiryOptions) {
return generated.ExpiryOptionsNeverExpire, nil
}
func (e ExpiryTypeNever) notPubliclyImplementable() {}

View file

@ -0,0 +1,67 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package exported
import (
"bytes"
"encoding/binary"
"hash/crc64"
"io"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
)
// TransferValidationType abstracts the various mechanisms used to verify a transfer.
type TransferValidationType interface {
Apply(io.ReadSeekCloser, generated.TransactionalContentSetter) (io.ReadSeekCloser, error)
notPubliclyImplementable()
}
// TransferValidationTypeCRC64 is a TransferValidationType used to provide a precomputed CRC64.
type TransferValidationTypeCRC64 uint64
func (c TransferValidationTypeCRC64) Apply(rsc io.ReadSeekCloser, cfg generated.TransactionalContentSetter) (io.ReadSeekCloser, error) {
buf := make([]byte, 8)
binary.LittleEndian.PutUint64(buf, uint64(c))
cfg.SetCRC64(buf)
return rsc, nil
}
func (TransferValidationTypeCRC64) notPubliclyImplementable() {}
// TransferValidationTypeComputeCRC64 is a TransferValidationType that indicates a CRC64 should be computed during transfer.
func TransferValidationTypeComputeCRC64() TransferValidationType {
return transferValidationTypeFn(func(rsc io.ReadSeekCloser, cfg generated.TransactionalContentSetter) (io.ReadSeekCloser, error) {
buf, err := io.ReadAll(rsc)
if err != nil {
return nil, err
}
crc := crc64.Checksum(buf, shared.CRC64Table)
return TransferValidationTypeCRC64(crc).Apply(streaming.NopCloser(bytes.NewReader(buf)), cfg)
})
}
// TransferValidationTypeMD5 is a TransferValidationType used to provide a precomputed MD5.
type TransferValidationTypeMD5 []byte
func (c TransferValidationTypeMD5) Apply(rsc io.ReadSeekCloser, cfg generated.TransactionalContentSetter) (io.ReadSeekCloser, error) {
cfg.SetMD5(c)
return rsc, nil
}
func (TransferValidationTypeMD5) notPubliclyImplementable() {}
type transferValidationTypeFn func(io.ReadSeekCloser, generated.TransactionalContentSetter) (io.ReadSeekCloser, error)
func (t transferValidationTypeFn) Apply(rsc io.ReadSeekCloser, cfg generated.TransactionalContentSetter) (io.ReadSeekCloser, error) {
return t(rsc, cfg)
}
func (transferValidationTypeFn) notPubliclyImplementable() {}

View file

@ -8,5 +8,5 @@ package exported
const (
ModuleName = "azblob"
ModuleVersion = "v0.5.1"
ModuleVersion = "v0.6.1"
)

View file

@ -9,7 +9,7 @@ version: "^3.0.0"
license-header: MICROSOFT_MIT_NO_VERSION
input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/e515b6251fdc21015282d2e84b85beec7c091763/specification/storage/data-plane/Microsoft.BlobStorage/preview/2020-10-02/blob.json"
credential-scope: "https://storage.azure.com/.default"
output-folder: .
output-folder: ../generated
file-prefix: "zz_"
openapi-type: "data-plane"
verbose: true
@ -299,6 +299,25 @@ directive:
where: $
transform: >-
return $.
replace(/StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed\t+\StorageErrorCode\s+=\s+\"IncrementalCopyOfEralierVersionSnapshotNotAllowed"\n, /StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed\t+\StorageErrorCode\s+=\s+\"IncrementalCopyOfEarlierVersionSnapshotNotAllowed"\
replace(/StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed/g, /StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed/g)
replace(/IncrementalCopyOfEralierVersionSnapshotNotAllowed/g, "IncrementalCopyOfEarlierVersionSnapshotNotAllowed");
```
### Fix up x-ms-content-crc64 header response name
``` yaml
directive:
- from: swagger-document
where: $.x-ms-paths.*.*.responses.*.headers.x-ms-content-crc64
transform: >
$["x-ms-client-name"] = "ContentCRC64"
```
``` yaml
directive:
- rename-model:
from: BlobItemInternal
to: BlobItem
- rename-model:
from: BlobPropertiesInternal
to: BlobProperties
```

View file

@ -0,0 +1,10 @@
//go:build go1.18
// +build go1.18
//go:generate autorest ./autorest.md
//go:generate gofmt -w .
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package generated

View file

@ -0,0 +1,65 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package generated
type TransactionalContentSetter interface {
SetCRC64([]byte)
SetMD5([]byte)
}
func (a *AppendBlobClientAppendBlockOptions) SetCRC64(v []byte) {
a.TransactionalContentCRC64 = v
}
func (a *AppendBlobClientAppendBlockOptions) SetMD5(v []byte) {
a.TransactionalContentMD5 = v
}
func (b *BlockBlobClientStageBlockOptions) SetCRC64(v []byte) {
b.TransactionalContentCRC64 = v
}
func (b *BlockBlobClientStageBlockOptions) SetMD5(v []byte) {
b.TransactionalContentMD5 = v
}
func (p *PageBlobClientUploadPagesOptions) SetCRC64(v []byte) {
p.TransactionalContentCRC64 = v
}
func (p *PageBlobClientUploadPagesOptions) SetMD5(v []byte) {
p.TransactionalContentMD5 = v
}
type SourceContentSetter interface {
SetSourceContentCRC64(v []byte)
SetSourceContentMD5(v []byte)
}
func (a *AppendBlobClientAppendBlockFromURLOptions) SetSourceContentCRC64(v []byte) {
a.SourceContentcrc64 = v
}
func (a *AppendBlobClientAppendBlockFromURLOptions) SetSourceContentMD5(v []byte) {
a.SourceContentMD5 = v
}
func (b *BlockBlobClientStageBlockFromURLOptions) SetSourceContentCRC64(v []byte) {
b.SourceContentcrc64 = v
}
func (b *BlockBlobClientStageBlockFromURLOptions) SetSourceContentMD5(v []byte) {
b.SourceContentMD5 = v
}
func (p *PageBlobClientUploadPagesFromURLOptions) SetSourceContentCRC64(v []byte) {
p.SourceContentcrc64 = v
}
func (p *PageBlobClientUploadPagesFromURLOptions) SetSourceContentMD5(v []byte) {
p.SourceContentMD5 = v
}

View file

@ -152,11 +152,11 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) (
result.ContentMD5 = contentMD5
}
if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
xMSContentCRC64, err := base64.StdEncoding.DecodeString(val)
contentCRC64, err := base64.StdEncoding.DecodeString(val)
if err != nil {
return AppendBlobClientAppendBlockResponse{}, err
}
result.XMSContentCRC64 = xMSContentCRC64
result.ContentCRC64 = contentCRC64
}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
@ -339,11 +339,11 @@ func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp
result.ContentMD5 = contentMD5
}
if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
xMSContentCRC64, err := base64.StdEncoding.DecodeString(val)
contentCRC64, err := base64.StdEncoding.DecodeString(val)
if err != nil {
return AppendBlobClientAppendBlockFromURLResponse{}, err
}
result.XMSContentCRC64 = xMSContentCRC64
result.ContentCRC64 = contentCRC64
}
if val := resp.Header.Get("x-ms-request-id"); val != "" {
result.RequestID = &val

View file

@ -551,11 +551,11 @@ func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobCl
result.ContentMD5 = contentMD5
}
if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
xMSContentCRC64, err := base64.StdEncoding.DecodeString(val)
contentCRC64, err := base64.StdEncoding.DecodeString(val)
if err != nil {
return BlobClientCopyFromURLResponse{}, err
}
result.XMSContentCRC64 = xMSContentCRC64
result.ContentCRC64 = contentCRC64
}
return result, nil
}

View file

@ -186,11 +186,11 @@ func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response
result.ContentMD5 = contentMD5
}
if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
xMSContentCRC64, err := base64.StdEncoding.DecodeString(val)
contentCRC64, err := base64.StdEncoding.DecodeString(val)
if err != nil {
return BlockBlobClientCommitBlockListResponse{}, err
}
result.XMSContentCRC64 = xMSContentCRC64
result.ContentCRC64 = contentCRC64
}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
@ -619,11 +619,11 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl
result.Date = &date
}
if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
xMSContentCRC64, err := base64.StdEncoding.DecodeString(val)
contentCRC64, err := base64.StdEncoding.DecodeString(val)
if err != nil {
return BlockBlobClientStageBlockResponse{}, err
}
result.XMSContentCRC64 = xMSContentCRC64
result.ContentCRC64 = contentCRC64
}
if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
isServerEncrypted, err := strconv.ParseBool(val)
@ -745,11 +745,11 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon
result.ContentMD5 = contentMD5
}
if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
xMSContentCRC64, err := base64.StdEncoding.DecodeString(val)
contentCRC64, err := base64.StdEncoding.DecodeString(val)
if err != nil {
return BlockBlobClientStageBlockFromURLResponse{}, err
}
result.XMSContentCRC64 = xMSContentCRC64
result.ContentCRC64 = contentCRC64
}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val

View file

@ -503,7 +503,7 @@ type BlobClientUndeleteOptions struct {
type BlobFlatListSegment struct {
// REQUIRED
BlobItems []*BlobItemInternal `xml:"Blob"`
BlobItems []*BlobItem `xml:"Blob"`
}
// BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method.
@ -527,12 +527,12 @@ type BlobHTTPHeaders struct {
type BlobHierarchyListSegment struct {
// REQUIRED
BlobItems []*BlobItemInternal `xml:"Blob"`
BlobItems []*BlobItem `xml:"Blob"`
BlobPrefixes []*BlobPrefix `xml:"BlobPrefix"`
}
// BlobItemInternal - An Azure Storage blob
type BlobItemInternal struct {
// BlobItem - An Azure Storage blob
type BlobItem struct {
// REQUIRED
Deleted *bool `xml:"Deleted"`
@ -540,7 +540,7 @@ type BlobItemInternal struct {
Name *string `xml:"Name"`
// REQUIRED; Properties of a blob
Properties *BlobPropertiesInternal `xml:"Properties"`
Properties *BlobProperties `xml:"Properties"`
// REQUIRED
Snapshot *string `xml:"Snapshot"`
@ -563,8 +563,8 @@ type BlobPrefix struct {
Name *string `xml:"Name"`
}
// BlobPropertiesInternal - Properties of a blob
type BlobPropertiesInternal struct {
// BlobProperties - Properties of a blob
type BlobProperties struct {
// REQUIRED
ETag *azcore.ETag `xml:"Etag"`

View file

@ -72,7 +72,7 @@ func (b BlobFlatListSegment) MarshalXML(e *xml.Encoder, start xml.StartElement)
type alias BlobFlatListSegment
aux := &struct {
*alias
BlobItems *[]*BlobItemInternal `xml:"Blob"`
BlobItems *[]*BlobItem `xml:"Blob"`
}{
alias: (*alias)(&b),
}
@ -87,7 +87,7 @@ func (b BlobHierarchyListSegment) MarshalXML(e *xml.Encoder, start xml.StartElem
type alias BlobHierarchyListSegment
aux := &struct {
*alias
BlobItems *[]*BlobItemInternal `xml:"Blob"`
BlobItems *[]*BlobItem `xml:"Blob"`
BlobPrefixes *[]*BlobPrefix `xml:"BlobPrefix"`
}{
alias: (*alias)(&b),
@ -101,9 +101,9 @@ func (b BlobHierarchyListSegment) MarshalXML(e *xml.Encoder, start xml.StartElem
return e.EncodeElement(aux, start)
}
// UnmarshalXML implements the xml.Unmarshaller interface for type BlobItemInternal.
func (b *BlobItemInternal) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
type alias BlobItemInternal
// UnmarshalXML implements the xml.Unmarshaller interface for type BlobItem.
func (b *BlobItem) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
type alias BlobItem
aux := &struct {
*alias
Metadata additionalProperties `xml:"Metadata"`
@ -119,9 +119,9 @@ func (b *BlobItemInternal) UnmarshalXML(d *xml.Decoder, start xml.StartElement)
return nil
}
// MarshalXML implements the xml.Marshaller interface for type BlobPropertiesInternal.
func (b BlobPropertiesInternal) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type alias BlobPropertiesInternal
// MarshalXML implements the xml.Marshaller interface for type BlobProperties.
func (b BlobProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type alias BlobProperties
aux := &struct {
*alias
AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"`
@ -151,9 +151,9 @@ func (b BlobPropertiesInternal) MarshalXML(e *xml.Encoder, start xml.StartElemen
return e.EncodeElement(aux, start)
}
// UnmarshalXML implements the xml.Unmarshaller interface for type BlobPropertiesInternal.
func (b *BlobPropertiesInternal) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
type alias BlobPropertiesInternal
// UnmarshalXML implements the xml.Unmarshaller interface for type BlobProperties.
func (b *BlobProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
type alias BlobProperties
aux := &struct {
*alias
AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"`

View file

@ -150,11 +150,11 @@ func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag
result.ContentMD5 = contentMD5
}
if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
xMSContentCRC64, err := base64.StdEncoding.DecodeString(val)
contentCRC64, err := base64.StdEncoding.DecodeString(val)
if err != nil {
return PageBlobClientClearPagesResponse{}, err
}
result.XMSContentCRC64 = xMSContentCRC64
result.ContentCRC64 = contentCRC64
}
if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" {
blobSequenceNumber, err := strconv.ParseInt(val, 10, 64)
@ -1058,11 +1058,11 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa
result.ContentMD5 = contentMD5
}
if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
xMSContentCRC64, err := base64.StdEncoding.DecodeString(val)
contentCRC64, err := base64.StdEncoding.DecodeString(val)
if err != nil {
return PageBlobClientUploadPagesResponse{}, err
}
result.XMSContentCRC64 = xMSContentCRC64
result.ContentCRC64 = contentCRC64
}
if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" {
blobSequenceNumber, err := strconv.ParseInt(val, 10, 64)
@ -1244,11 +1244,11 @@ func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Respon
result.ContentMD5 = contentMD5
}
if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
xMSContentCRC64, err := base64.StdEncoding.DecodeString(val)
contentCRC64, err := base64.StdEncoding.DecodeString(val)
if err != nil {
return PageBlobClientUploadPagesFromURLResponse{}, err
}
result.XMSContentCRC64 = xMSContentCRC64
result.ContentCRC64 = contentCRC64
}
if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" {
blobSequenceNumber, err := strconv.ParseInt(val, 10, 64)

View file

@ -23,6 +23,9 @@ type AppendBlobClientAppendBlockFromURLResponse struct {
// BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response.
BlobCommittedBlockCount *int32
// ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
ContentCRC64 []byte
// ContentMD5 contains the information returned from the Content-MD5 header response.
ContentMD5 []byte
@ -49,9 +52,6 @@ type AppendBlobClientAppendBlockFromURLResponse struct {
// Version contains the information returned from the x-ms-version header response.
Version *string
// XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
XMSContentCRC64 []byte
}
// AppendBlobClientAppendBlockResponse contains the response from method AppendBlobClient.AppendBlock.
@ -65,6 +65,9 @@ type AppendBlobClientAppendBlockResponse struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
// ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
ContentCRC64 []byte
// ContentMD5 contains the information returned from the Content-MD5 header response.
ContentMD5 []byte
@ -91,9 +94,6 @@ type AppendBlobClientAppendBlockResponse struct {
// Version contains the information returned from the x-ms-version header response.
Version *string
// XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
XMSContentCRC64 []byte
}
// AppendBlobClientCreateResponse contains the response from method AppendBlobClient.Create.
@ -248,6 +248,9 @@ type BlobClientCopyFromURLResponse struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
// ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
ContentCRC64 []byte
// ContentMD5 contains the information returned from the Content-MD5 header response.
ContentMD5 []byte
@ -274,9 +277,6 @@ type BlobClientCopyFromURLResponse struct {
// VersionID contains the information returned from the x-ms-version-id header response.
VersionID *string
// XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
XMSContentCRC64 []byte
}
// BlobClientCreateSnapshotResponse contains the response from method BlobClient.CreateSnapshot.
@ -1008,6 +1008,9 @@ type BlockBlobClientCommitBlockListResponse struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
// ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
ContentCRC64 []byte
// ContentMD5 contains the information returned from the Content-MD5 header response.
ContentMD5 []byte
@ -1037,9 +1040,6 @@ type BlockBlobClientCommitBlockListResponse struct {
// VersionID contains the information returned from the x-ms-version-id header response.
VersionID *string
// XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
XMSContentCRC64 []byte
}
// BlockBlobClientGetBlockListResponse contains the response from method BlockBlobClient.GetBlockList.
@ -1111,6 +1111,9 @@ type BlockBlobClientStageBlockFromURLResponse struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
// ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
ContentCRC64 []byte
// ContentMD5 contains the information returned from the Content-MD5 header response.
ContentMD5 []byte
@ -1131,9 +1134,6 @@ type BlockBlobClientStageBlockFromURLResponse struct {
// Version contains the information returned from the x-ms-version header response.
Version *string
// XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
XMSContentCRC64 []byte
}
// BlockBlobClientStageBlockResponse contains the response from method BlockBlobClient.StageBlock.
@ -1141,6 +1141,9 @@ type BlockBlobClientStageBlockResponse struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
// ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
ContentCRC64 []byte
// ContentMD5 contains the information returned from the Content-MD5 header response.
ContentMD5 []byte
@ -1161,9 +1164,6 @@ type BlockBlobClientStageBlockResponse struct {
// Version contains the information returned from the x-ms-version header response.
Version *string
// XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
XMSContentCRC64 []byte
}
// BlockBlobClientUploadResponse contains the response from method BlockBlobClient.Upload.
@ -1588,6 +1588,9 @@ type PageBlobClientClearPagesResponse struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
// ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
ContentCRC64 []byte
// ContentMD5 contains the information returned from the Content-MD5 header response.
ContentMD5 []byte
@ -1605,9 +1608,6 @@ type PageBlobClientClearPagesResponse struct {
// Version contains the information returned from the x-ms-version header response.
Version *string
// XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
XMSContentCRC64 []byte
}
// PageBlobClientCopyIncrementalResponse contains the response from method PageBlobClient.CopyIncremental.
@ -1776,6 +1776,9 @@ type PageBlobClientUploadPagesFromURLResponse struct {
// BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response.
BlobSequenceNumber *int64
// ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
ContentCRC64 []byte
// ContentMD5 contains the information returned from the Content-MD5 header response.
ContentMD5 []byte
@ -1802,9 +1805,6 @@ type PageBlobClientUploadPagesFromURLResponse struct {
// Version contains the information returned from the x-ms-version header response.
Version *string
// XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
XMSContentCRC64 []byte
}
// PageBlobClientUploadPagesResponse contains the response from method PageBlobClient.UploadPages.
@ -1815,6 +1815,9 @@ type PageBlobClientUploadPagesResponse struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
// ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
ContentCRC64 []byte
// ContentMD5 contains the information returned from the Content-MD5 header response.
ContentMD5 []byte
@ -1841,9 +1844,6 @@ type PageBlobClientUploadPagesResponse struct {
// Version contains the information returned from the x-ms-version header response.
Version *string
// XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
XMSContentCRC64 []byte
}
// ServiceClientFilterBlobsResponse contains the response from method ServiceClient.FilterBlobs.

View file

@ -16,7 +16,7 @@ type BatchTransferOptions struct {
TransferSize int64
ChunkSize int64
Concurrency uint16
Operation func(offset int64, chunkSize int64, ctx context.Context) error
Operation func(ctx context.Context, offset int64, chunkSize int64) error
OperationName string
}
@ -57,9 +57,8 @@ func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error {
curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total
}
offset := int64(chunkNum) * o.ChunkSize
operationChannel <- func() error {
return o.Operation(offset, curChunkSize, ctx)
return o.Operation(ctx, offset, curChunkSize)
}
}
close(operationChannel)

View file

@ -9,6 +9,7 @@ package shared
import (
"errors"
"fmt"
"hash/crc64"
"io"
"net"
"net/url"
@ -39,6 +40,10 @@ const (
HeaderRange = "Range"
)
const crc64Polynomial uint64 = 0x9A6C9329AC4BC9B5
var CRC64Table = crc64.MakeTable(crc64Polynomial)
// CopyOptions returns a zero-value T if opts is nil.
// If opts is not nil, a copy is made and its address returned.
func CopyOptions[T any](opts *T) *T {

View file

@ -1,156 +0,0 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package shared
import (
"fmt"
"sync"
)
const _1MiB = 1024 * 1024
// TransferManager provides a buffer and thread pool manager for certain transfer options.
// It is undefined behavior if code outside this package call any of these methods.
type TransferManager interface {
// Get provides a buffer that will be used to read data into and write out to the stream.
// It is guaranteed by this package to not read or write beyond the size of the slice.
Get() []byte
// Put may or may not put the buffer into underlying storage, depending on settings.
// The buffer must not be touched after this has been called.
Put(b []byte) // nolint
// Run will use a goroutine pool entry to run a function. This blocks until a pool
// goroutine becomes available.
Run(func())
// Close shuts down all internal goroutines. This must be called when the TransferManager
// will no longer be used. Not closing it will cause a goroutine leak.
Close()
}
// ---------------------------------------------------------------------------------------------------------------------
type staticBuffer struct {
buffers chan []byte
size int
threadpool chan func()
}
// NewStaticBuffer creates a TransferManager that will use a channel as a circular buffer
// that can hold "max" buffers of "size". The goroutine pool is also sized at max. This
// can be shared between calls if you wish to control maximum memory and concurrency with
// multiple concurrent calls.
func NewStaticBuffer(size, max int) (TransferManager, error) {
if size < 1 || max < 1 {
return nil, fmt.Errorf("cannot be called with size or max set to < 1")
}
if size < _1MiB {
return nil, fmt.Errorf("cannot have size < 1MiB")
}
threadpool := make(chan func(), max)
buffers := make(chan []byte, max)
for i := 0; i < max; i++ {
go func() {
for f := range threadpool {
f()
}
}()
buffers <- make([]byte, size)
}
return staticBuffer{
buffers: buffers,
size: size,
threadpool: threadpool,
}, nil
}
// Get implements TransferManager.Get().
func (s staticBuffer) Get() []byte {
return <-s.buffers
}
// Put implements TransferManager.Put().
func (s staticBuffer) Put(b []byte) { // nolint
select {
case s.buffers <- b:
default: // This shouldn't happen, but just in case they call Put() with there own buffer.
}
}
// Run implements TransferManager.Run().
func (s staticBuffer) Run(f func()) {
s.threadpool <- f
}
// Close implements TransferManager.Close().
func (s staticBuffer) Close() {
close(s.threadpool)
close(s.buffers)
}
// ---------------------------------------------------------------------------------------------------------------------
type syncPool struct {
threadpool chan func()
pool sync.Pool
}
// NewSyncPool creates a TransferManager that will use a sync.Pool
// that can hold a non-capped number of buffers constrained by concurrency. This
// can be shared between calls if you wish to share memory and concurrency.
func NewSyncPool(size, concurrency int) (TransferManager, error) {
if size < 1 || concurrency < 1 {
return nil, fmt.Errorf("cannot be called with size or max set to < 1")
}
if size < _1MiB {
return nil, fmt.Errorf("cannot have size < 1MiB")
}
threadpool := make(chan func(), concurrency)
for i := 0; i < concurrency; i++ {
go func() {
for f := range threadpool {
f()
}
}()
}
return &syncPool{
threadpool: threadpool,
pool: sync.Pool{
New: func() interface{} {
return make([]byte, size)
},
},
}, nil
}
// Get implements TransferManager.Get().
func (s *syncPool) Get() []byte {
return s.pool.Get().([]byte)
}
// Put implements TransferManager.Put().
// nolint
func (s *syncPool) Put(b []byte) {
s.pool.Put(b)
}
// Run implements TransferManager.Run().
func (s *syncPool) Run(f func()) {
s.threadpool <- f
}
// Close implements TransferManager.Close().
func (s *syncPool) Close() {
close(s.threadpool)
}

View file

@ -11,7 +11,6 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
)
@ -52,10 +51,10 @@ type DownloadBufferOptions = blob.DownloadBufferOptions
type DownloadFileOptions = blob.DownloadFileOptions
// CpkInfo contains a group of parameters for client provided encryption key.
type CpkInfo = generated.CpkInfo
type CpkInfo = blob.CpkInfo
// CpkScopeInfo contains a group of parameters for the ContainerClient.Create method.
type CpkScopeInfo = generated.ContainerCpkScopeInfo
type CpkScopeInfo = container.CpkScopeInfo
// AccessConditions identifies blob-specific access conditions which you optionally set.
type AccessConditions = exported.BlobAccessConditions

View file

@ -12,6 +12,7 @@ import (
"net/http"
"net/url"
"os"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
@ -158,6 +159,13 @@ func (pb *Client) UploadPages(ctx context.Context, body io.ReadSeekCloser, optio
uploadPagesOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format()
if options != nil && options.TransactionalValidation != nil {
body, err = options.TransactionalValidation.Apply(body, uploadPagesOptions)
if err != nil {
return UploadPagesResponse{}, nil
}
}
resp, err := pb.generated().UploadPages(ctx, count, body, uploadPagesOptions, leaseAccessConditions,
cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions)
@ -317,6 +325,24 @@ func (pb *Client) Undelete(ctx context.Context, o *blob.UndeleteOptions) (blob.U
return pb.BlobClient().Undelete(ctx, o)
}
// SetImmutabilityPolicy operation enables users to set the immutability policy on a blob.
// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview
func (pb *Client) SetImmutabilityPolicy(ctx context.Context, expiryTime time.Time, options *blob.SetImmutabilityPolicyOptions) (blob.SetImmutabilityPolicyResponse, error) {
return pb.BlobClient().SetImmutabilityPolicy(ctx, expiryTime, options)
}
// DeleteImmutabilityPolicy operation enables users to delete the immutability policy on a blob.
// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview
func (pb *Client) DeleteImmutabilityPolicy(ctx context.Context, options *blob.DeleteImmutabilityPolicyOptions) (blob.DeleteImmutabilityPolicyResponse, error) {
return pb.BlobClient().DeleteImmutabilityPolicy(ctx, options)
}
// SetLegalHold operation enables users to set legal hold on a blob.
// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview
func (pb *Client) SetLegalHold(ctx context.Context, legalHold bool, options *blob.SetLegalHoldOptions) (blob.SetLegalHoldResponse, error) {
return pb.BlobClient().SetLegalHold(ctx, legalHold, options)
}
// SetTier operation sets the tier on a blob. The operation is allowed on a page
// blob in a premium storage account and on a block blob in a blob storage account (locally
// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and

View file

@ -89,9 +89,9 @@ type UploadPagesOptions struct {
// Range specifies a range of bytes. The default value is all bytes.
Range blob.HTTPRange
TransactionalContentCRC64 []byte
// Specify the transactional md5 for the body, to be validated by the service.
TransactionalContentMD5 []byte
// TransactionalValidation specifies the transfer validation type to use.
// The default is nil (no transfer validation).
TransactionalValidation blob.TransferValidationType
CpkInfo *blob.CpkInfo
CpkScopeInfo *blob.CpkScopeInfo
@ -106,8 +106,6 @@ func (o *UploadPagesOptions) format() (*generated.PageBlobClientUploadPagesOptio
}
options := &generated.PageBlobClientUploadPagesOptions{
TransactionalContentCRC64: o.TransactionalContentCRC64,
TransactionalContentMD5: o.TransactionalContentMD5,
Range: exported.FormatHTTPRange(o.Range),
}
@ -121,10 +119,9 @@ func (o *UploadPagesOptions) format() (*generated.PageBlobClientUploadPagesOptio
type UploadPagesFromURLOptions struct {
// Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source.
CopySourceAuthorization *string
// Specify the md5 calculated for the range of bytes that must be read from the copy source.
SourceContentMD5 []byte
// Specify the crc64 calculated for the range of bytes that must be read from the copy source.
SourceContentCRC64 []byte
// SourceContentValidation contains the validation mechanism used on the range of bytes read from the source.
SourceContentValidation blob.SourceContentValidationType
CpkInfo *blob.CpkInfo
@ -144,11 +141,13 @@ func (o *UploadPagesFromURLOptions) format() (*generated.PageBlobClientUploadPag
}
options := &generated.PageBlobClientUploadPagesFromURLOptions{
SourceContentMD5: o.SourceContentMD5,
SourceContentcrc64: o.SourceContentCRC64,
CopySourceAuthorization: o.CopySourceAuthorization,
}
if o.SourceContentValidation != nil {
o.SourceContentValidation.Apply(options)
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return options, o.CpkInfo, o.CpkScopeInfo, leaseAccessConditions, o.SequenceNumberAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions
}

View file

@ -90,75 +90,10 @@ func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKey
return p, nil
}
// SignWithUserDelegation uses an account's UserDelegationKey to sign this signature values to produce the proper SAS query parameters.
func (v AccountSignatureValues) SignWithUserDelegation(userDelegationCredential *UserDelegationCredential) (QueryParameters, error) {
// https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS
if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" {
return QueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType")
}
if v.Version == "" {
v.Version = Version
}
perms, err := parseAccountPermissions(v.Permissions)
if err != nil {
return QueryParameters{}, err
}
v.Permissions = perms.String()
startTime, expiryTime, _ := formatTimesForSigning(v.StartTime, v.ExpiryTime, time.Time{})
stringToSign := strings.Join([]string{
exported.GetAccountName(userDelegationCredential),
v.Permissions,
v.Services,
v.ResourceTypes,
startTime,
expiryTime,
v.IPRange.String(),
string(v.Protocol),
v.Version,
""}, // That is right, the account SAS requires a terminating extra newline
"\n")
signature, err := exported.ComputeUDCHMACSHA256(userDelegationCredential, stringToSign)
if err != nil {
return QueryParameters{}, err
}
p := QueryParameters{
// Common SAS parameters
version: v.Version,
protocol: v.Protocol,
startTime: v.StartTime,
expiryTime: v.ExpiryTime,
permissions: v.Permissions,
ipRange: v.IPRange,
// Account-specific SAS parameters
services: v.Services,
resourceTypes: v.ResourceTypes,
// Calculated SAS signature
signature: signature,
}
udk := exported.GetUDKParams(userDelegationCredential)
//User delegation SAS specific parameters
p.signedOID = *udk.SignedOID
p.signedTID = *udk.SignedTID
p.signedStart = *udk.SignedStart
p.signedExpiry = *udk.SignedExpiry
p.signedService = *udk.SignedService
p.signedVersion = *udk.SignedVersion
return p, nil
}
// AccountPermissions type simplifies creating the permissions string for an Azure Storage Account SAS.
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field.
type AccountPermissions struct {
Read, Write, Delete, DeletePreviousVersion, List, Add, Create, Update, Process, Tag, FilterByTags bool
Read, Write, Delete, DeletePreviousVersion, List, Add, Create, Update, Process, Tag, FilterByTags, PermanentDelete bool
}
// String produces the SAS permissions string for an Azure Storage account.
@ -177,6 +112,9 @@ func (p *AccountPermissions) String() string {
if p.DeletePreviousVersion {
buffer.WriteRune('x')
}
if p.PermanentDelete {
buffer.WriteRune('y')
}
if p.List {
buffer.WriteRune('l')
}
@ -212,6 +150,8 @@ func parseAccountPermissions(s string) (AccountPermissions, error) {
p.Write = true
case 'd':
p.Delete = true
case 'y':
p.PermanentDelete = true
case 'l':
p.List = true
case 'a':

View file

@ -140,22 +140,22 @@ type QueryParameters struct {
signedExpiry time.Time `param:"ske"`
signedVersion string `param:"skv"`
signedDirectoryDepth string `param:"sdd"`
preauthorizedAgentObjectID string `param:"saoid"`
agentObjectID string `param:"suoid"`
authorizedObjectID string `param:"saoid"`
unauthorizedObjectID string `param:"suoid"`
correlationID string `param:"scid"`
// private member used for startTime and expiryTime formatting.
stTimeFormat string
seTimeFormat string
}
// PreauthorizedAgentObjectID returns preauthorizedAgentObjectID
func (p *QueryParameters) PreauthorizedAgentObjectID() string {
return p.preauthorizedAgentObjectID
// AuthorizedObjectID returns authorizedObjectID
func (p *QueryParameters) AuthorizedObjectID() string {
return p.authorizedObjectID
}
// AgentObjectID returns agentObjectID
func (p *QueryParameters) AgentObjectID() string {
return p.agentObjectID
// UnauthorizedObjectID returns unauthorizedObjectID
func (p *QueryParameters) UnauthorizedObjectID() string {
return p.unauthorizedObjectID
}
// SignedCorrelationID returns signedCorrelationID
@ -346,11 +346,11 @@ func (p *QueryParameters) Encode() string {
if p.signedDirectoryDepth != "" {
v.Add("sdd", p.signedDirectoryDepth)
}
if p.preauthorizedAgentObjectID != "" {
v.Add("saoid", p.preauthorizedAgentObjectID)
if p.authorizedObjectID != "" {
v.Add("saoid", p.authorizedObjectID)
}
if p.agentObjectID != "" {
v.Add("suoid", p.agentObjectID)
if p.unauthorizedObjectID != "" {
v.Add("suoid", p.unauthorizedObjectID)
}
if p.correlationID != "" {
v.Add("scid", p.correlationID)
@ -424,9 +424,9 @@ func NewQueryParameters(values url.Values, deleteSASParametersFromValues bool) Q
case "sdd":
p.signedDirectoryDepth = val
case "saoid":
p.preauthorizedAgentObjectID = val
p.authorizedObjectID = val
case "suoid":
p.agentObjectID = val
p.unauthorizedObjectID = val
case "scid":
p.correlationID = val
default:

View file

@ -16,7 +16,8 @@ import (
)
// BlobSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas
// For more information on creating service sas, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas
// For more information on creating user delegation sas, see https://docs.microsoft.com/rest/api/storageservices/create-user-delegation-sas
type BlobSignatureValues struct {
Version string `param:"sv"` // If not specified, this defaults to Version
Protocol Protocol `param:"spr"` // See the Protocol* constants
@ -35,9 +36,9 @@ type BlobSignatureValues struct {
ContentLanguage string // rscl
ContentType string // rsct
BlobVersion string // sr=bv
PreauthorizedAgentObjectId string
AgentObjectId string
CorrelationId string
AuthorizedObjectID string // saoid
UnauthorizedObjectID string // suoid
CorrelationID string // scid
}
func getDirectoryDepth(path string) string {
@ -124,9 +125,9 @@ func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCre
contentType: v.ContentType,
snapshotTime: v.SnapshotTime,
signedDirectoryDepth: getDirectoryDepth(v.Directory),
preauthorizedAgentObjectID: v.PreauthorizedAgentObjectId,
agentObjectID: v.AgentObjectId,
correlationID: v.CorrelationId,
authorizedObjectID: v.AuthorizedObjectID,
unauthorizedObjectID: v.UnauthorizedObjectID,
correlationID: v.CorrelationID,
// Calculated SAS signature
signature: signature,
}
@ -169,27 +170,21 @@ func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *Us
udk := exported.GetUDKParams(userDelegationCredential)
udkStart, udkExpiry, _ := formatTimesForSigning(*udk.SignedStart, *udk.SignedExpiry, time.Time{})
//I don't like this answer to combining the functions
//But because signedIdentifier and the user delegation key strings share a place, this is an _OK_ way to do it.
signedIdentifier := strings.Join([]string{
stringToSign := strings.Join([]string{
v.Permissions,
startTime,
expiryTime,
getCanonicalName(exported.GetAccountName(userDelegationCredential), v.ContainerName, v.BlobName, v.Directory),
*udk.SignedOID,
*udk.SignedTID,
udkStart,
udkExpiry,
*udk.SignedService,
*udk.SignedVersion,
v.PreauthorizedAgentObjectId,
v.AgentObjectId,
v.CorrelationId,
}, "\n")
// String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
stringToSign := strings.Join([]string{
v.Permissions,
startTime,
expiryTime,
getCanonicalName(exported.GetAccountName(userDelegationCredential), v.ContainerName, v.BlobName, v.Directory),
signedIdentifier,
v.AuthorizedObjectID,
v.UnauthorizedObjectID,
v.CorrelationID,
v.IPRange.String(),
string(v.Protocol),
v.Version,
@ -226,9 +221,9 @@ func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *Us
contentType: v.ContentType,
snapshotTime: v.SnapshotTime,
signedDirectoryDepth: getDirectoryDepth(v.Directory),
preauthorizedAgentObjectID: v.PreauthorizedAgentObjectId,
agentObjectID: v.AgentObjectId,
correlationID: v.CorrelationId,
authorizedObjectID: v.AuthorizedObjectID,
unauthorizedObjectID: v.UnauthorizedObjectID,
correlationID: v.CorrelationID,
// Calculated SAS signature
signature: signature,
}
@ -261,8 +256,8 @@ func getCanonicalName(account string, containerName string, blobName string, dir
// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field.
// All permissions descriptions can be found here: https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas#permissions-for-a-directory-container-or-blob
type ContainerPermissions struct {
Read, Add, Create, Write, Delete, DeletePreviousVersion, List, Tag bool
Execute, ModifyOwnership, ModifyPermissions bool // Hierarchical Namespace only
Read, Add, Create, Write, Delete, DeletePreviousVersion, List, FilterByTags bool
Execute, ModifyOwnership, ModifyPermissions, SetImmutabilityPolicy bool // Hierarchical Namespace only
}
// String produces the SAS permissions string for an Azure Storage container.
@ -290,8 +285,8 @@ func (p *ContainerPermissions) String() string {
if p.List {
b.WriteRune('l')
}
if p.Tag {
b.WriteRune('t')
if p.FilterByTags {
b.WriteRune('f')
}
if p.Execute {
b.WriteRune('e')
@ -302,6 +297,9 @@ func (p *ContainerPermissions) String() string {
if p.ModifyPermissions {
b.WriteRune('p')
}
if p.SetImmutabilityPolicy {
b.WriteRune('i')
}
return b.String()
}
@ -325,7 +323,7 @@ func (p *ContainerPermissions) String() string {
case 'l':
p.List = true
case 't':
p.Tag = true
p.FilterByTags = true
case 'e':
p.Execute = true
case 'o':
@ -342,7 +340,7 @@ func (p *ContainerPermissions) String() string {
// BlobPermissions type simplifies creating the permissions string for an Azure Storage blob SAS.
// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field.
type BlobPermissions struct {
Read, Add, Create, Write, Delete, DeletePreviousVersion, Tag, List, Move, Execute, Ownership, Permissions bool
Read, Add, Create, Write, Delete, DeletePreviousVersion, Tag, List, Move, Execute, Ownership, Permissions, PermanentDelete bool
}
// String produces the SAS permissions string for an Azure Storage blob.
@ -367,12 +365,15 @@ func (p *BlobPermissions) String() string {
if p.DeletePreviousVersion {
b.WriteRune('x')
}
if p.Tag {
b.WriteRune('t')
if p.PermanentDelete {
b.WriteRune('y')
}
if p.List {
b.WriteRune('l')
}
if p.Tag {
b.WriteRune('t')
}
if p.Move {
b.WriteRune('m')
}
@ -407,6 +408,8 @@ func parseBlobPermissions(s string) (BlobPermissions, error) {
p.DeletePreviousVersion = true
case 't':
p.Tag = true
case 'y':
p.PermanentDelete = true
case 'l':
p.List = true
case 'm':

View file

@ -120,11 +120,8 @@ func (s *Client) URL() string {
return s.generated().Endpoint()
}
// NewContainerClient creates a new ContainerClient object by concatenating containerName to the end of
// Client's URL. The new ContainerClient uses the same request policy pipeline as the Client.
// To change the pipeline, create the ContainerClient and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewContainerClient instead of calling this object's
// NewContainerClient method.
// NewContainerClient creates a new container.Client object by concatenating containerName to the end of
// this Client's URL. The new container.Client uses the same request policy pipeline as the Client.
func (s *Client) NewContainerClient(containerName string) *container.Client {
containerURL := runtime.JoinPaths(s.generated().Endpoint(), containerName)
return (*container.Client)(base.NewContainerClient(containerURL, s.generated().Pipeline(), s.sharedKey()))

View file

@ -42,6 +42,15 @@ func (o *GetUserDelegationCredentialOptions) format() *generated.ServiceClientGe
// AccessConditions identifies container-specific access conditions which you optionally set.
type AccessConditions = exported.ContainerAccessConditions
// BlobTag - a key/value pair on a blob
type BlobTag = generated.BlobTag
// ContainerItem - An Azure Storage container returned from method Client.ListContainersSegment.
type ContainerItem = generated.ContainerItem
// ContainerProperties - Properties of a container
type ContainerProperties = generated.ContainerProperties
// CpkInfo contains a group of parameters for the BlobClient.Download method.
type CpkInfo = generated.CpkInfo
@ -63,6 +72,12 @@ type RestoreContainerOptions = container.RestoreOptions
// domain) to call APIs in another domain
type CorsRule = generated.CorsRule
// FilterBlobItem - Blob info returned from method Client.FilterBlobs
type FilterBlobItem = generated.FilterBlobItem
// GeoReplication - Geo-Replication information for the Secondary Storage Service
type GeoReplication = generated.GeoReplication
// RetentionPolicy - the retention policy which determines how long the associated data should persist
type RetentionPolicy = generated.RetentionPolicy

View file

@ -20,12 +20,13 @@
}
},
"variables": {
"mgmtApiVersion": "2019-06-01",
"mgmtApiVersion": "2022-09-01",
"authorizationApiVersion": "2018-09-01-preview",
"blobDataContributorRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/ba92f5b4-2d11-453d-a403-e96b0029c9fe')]",
"contributorRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/b24988ac-6180-42a0-ab88-20f7382dd24c')]",
"blobDataOwnerRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/b7e6dc6d-f1e8-4753-8033-0f276bb0955b')]",
"primaryAccountName": "[concat(parameters('baseName'), 'prim')]",
"immutableAccountName": "[concat(parameters('baseName'), 'imm')]",
"primaryEncryptionScopeName": "encryptionScope",
"primaryEncryptionScope": "[concat(parameters('baseName'), 'prim', concat('/', variables('primaryEncryptionScopeName')))]",
"secondaryAccountName": "[concat(parameters('baseName'), 'sec')]",
@ -120,6 +121,45 @@
"[variables('primaryAccountName')]"
]
},
{
"type": "Microsoft.Storage/storageAccounts",
"apiVersion": "[variables('mgmtApiVersion')]",
"name": "[variables('immutableAccountName')]",
"location": "[variables('location')]",
"sku": {
"name": "Standard_RAGRS",
"tier": "Standard"
},
"kind": "StorageV2",
"properties": {
"networkAcls": "[variables('networkAcls')]",
"supportsHttpsTrafficOnly": true,
"encryption": "[variables('encryption')]",
"accessTier": "Hot",
"immutableStorageWithVersioning": {
"enabled": true
}
}
},
{
"type": "Microsoft.Storage/storageAccounts/blobServices",
"apiVersion": "[variables('mgmtApiVersion')]",
"name": "[concat(variables('immutableAccountName'), '/default')]",
"properties": {
"isVersioningEnabled": true,
"lastAccessTimeTrackingPolicy": {
"enable": true,
"name": "AccessTimeTracking",
"trackingGranularityInDays": 1,
"blobType": [
"blockBlob"
]
}
},
"dependsOn": [
"[variables('immutableAccountName')]"
]
},
{
"type": "Microsoft.Storage/storageAccounts/encryptionScopes",
"apiVersion": "[variables('mgmtApiVersion')]",
@ -220,7 +260,9 @@
"apiVersion": "[variables('mgmtApiVersion')]",
"name": "[concat(variables('softDeleteAccountName'), '/default')]",
"properties": {
"isVersioningEnabled": true,
"deleteRetentionPolicy": {
"allowPermanentDelete": true,
"enabled": true,
"days": 1
},
@ -424,11 +466,11 @@
"type": "string",
"value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('premiumAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]"
},
"DATALAKE_STORAGE_ACCOUNT_NAME": {
"DATALAKE_AZURE_STORAGE_ACCOUNT_NAME": {
"type": "string",
"value": "[variables('dataLakeAccountName')]"
},
"DATALAKE_STORAGE_ACCOUNT_KEY": {
"DATALAKE_AZURE_STORAGE_ACCOUNT_KEY": {
"type": "string",
"value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).keys[0].value]"
},
@ -448,6 +490,30 @@
"type": "string",
"value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]"
},
"IMMUTABLE_AZURE_STORAGE_ACCOUNT_NAME": {
"type": "string",
"value": "[variables('immutableAccountName')]"
},
"IMMUTABLE_AZURE_STORAGE_ACCOUNT_KEY": {
"type": "string",
"value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).keys[0].value]"
},
"IMMUTABLE_AZURE_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": {
"type": "string",
"value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]"
},
"IMMUTABLE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": {
"type": "string",
"value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]"
},
"IMMUTABLE_AZURE_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": {
"type": "string",
"value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]"
},
"IMMUTABLE_AZURE_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": {
"type": "string",
"value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]"
},
"SOFT_DELETE_AZURE_STORAGE_ACCOUNT_NAME": {
"type": "string",
"value": "[variables('softDeleteAccountName')]"

View file

@ -663,6 +663,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "me-central-1",
}: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@ -5629,6 +5632,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@ -5729,6 +5735,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@ -7555,6 +7564,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@ -20003,7 +20015,9 @@ var awsPartition = partition{
},
"resource-explorer-2": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{},
defaultKey{}: endpoint{
DNSSuffix: "api.aws",
},
defaultKey{
Variant: fipsVariant,
}: endpoint{
@ -27661,9 +27675,21 @@ var awscnPartition = partition{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
endpointKey{
Region: "cn-north-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "athena.cn-north-1.api.amazonwebservices.com.cn",
},
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
endpointKey{
Region: "cn-northwest-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "athena.cn-northwest-1.api.amazonwebservices.com.cn",
},
},
},
"autoscaling": service{
@ -28612,7 +28638,9 @@ var awscnPartition = partition{
},
"resource-explorer-2": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{},
defaultKey{}: endpoint{
DNSSuffix: "api.amazonwebservices.com.cn",
},
defaultKey{
Variant: fipsVariant,
}: endpoint{
@ -29779,6 +29807,12 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
endpointKey{
Region: "us-gov-east-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "athena.us-gov-east-1.api.aws",
},
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
@ -29788,6 +29822,12 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
endpointKey{
Region: "us-gov-west-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "athena.us-gov-west-1.api.aws",
},
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
@ -32809,7 +32849,9 @@ var awsusgovPartition = partition{
},
"resource-explorer-2": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{},
defaultKey{}: endpoint{
DNSSuffix: "api.aws",
},
defaultKey{
Variant: fipsVariant,
}: endpoint{

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.44.153"
const SDKVersion = "1.44.157"

View file

@ -15,6 +15,8 @@ import (
"encoding/gob"
"fmt"
"io"
"io/ioutil"
"log"
"net/rpc"
"os"
"os/exec"
@ -42,6 +44,17 @@ func (c *Connection) Close() error {
return werr
}
// If ECP Logging is enabled return true
// Otherwise return false
func enableECPLogging() bool {
if os.Getenv("ENABLE_ENTERPRISE_CERTIFICATE_LOGS") != "" {
return true
}
log.SetOutput(ioutil.Discard)
return false
}
func init() {
gob.Register(crypto.SHA256)
gob.Register(&rsa.PSSOptions{})
@ -72,9 +85,9 @@ func (k *Key) Close() error {
if err := k.cmd.Process.Kill(); err != nil {
return fmt.Errorf("failed to kill signer process: %w", err)
}
if err := k.cmd.Wait(); err.Error() != "signal: killed" {
return fmt.Errorf("signer process was not killed: %w", err)
}
// Wait for cmd to exit and release resources. Since the process is forcefully killed, this
// will return a non-nil error (varies by OS), which we will ignore.
k.cmd.Wait()
// The Pipes connecting the RPC client should have been closed when the signer subprocess was killed.
// Calling `k.client.Close()` before `k.cmd.Process.Kill()` or `k.cmd.Wait()` _will_ cause a segfault.
if err := k.client.Close(); err.Error() != "close |0: file already closed" {
@ -105,6 +118,7 @@ func (k *Key) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signed [
//
// The config file also specifies which certificate the signer should use.
func Cred(configFilePath string) (*Key, error) {
enableECPLogging()
if configFilePath == "" {
configFilePath = util.GetDefaultConfigFilePath()
}

View file

@ -21,10 +21,11 @@ import (
"crypto/x509"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
"sync"
"time"
@ -80,7 +81,7 @@ func (tv *TLSVersion) UnmarshalYAML(unmarshal func(interface{}) error) error {
}
func (tv *TLSVersion) MarshalYAML() (interface{}, error) {
if tv != nil || *tv == 0 {
if tv == nil || *tv == 0 {
return []byte("null"), nil
}
for s, v := range TLSVersions {
@ -106,7 +107,7 @@ func (tv *TLSVersion) UnmarshalJSON(data []byte) error {
// MarshalJSON implements the json.Marshaler interface for TLSVersion.
func (tv *TLSVersion) MarshalJSON() ([]byte, error) {
if tv != nil || *tv == 0 {
if tv == nil || *tv == 0 {
return []byte("null"), nil
}
for s, v := range TLSVersions {
@ -117,6 +118,19 @@ func (tv *TLSVersion) MarshalJSON() ([]byte, error) {
return nil, fmt.Errorf("unknown TLS version: %d", tv)
}
// String implements the fmt.Stringer interface for TLSVersion.
func (tv *TLSVersion) String() string {
if tv == nil || *tv == 0 {
return ""
}
for s, v := range TLSVersions {
if *tv == v {
return s
}
}
return fmt.Sprintf("%d", tv)
}
// BasicAuth contains basic HTTP authentication credentials.
type BasicAuth struct {
Username string `yaml:"username" json:"username"`
@ -235,6 +249,30 @@ func (a *OAuth2) SetDirectory(dir string) {
a.TLSConfig.SetDirectory(dir)
}
// LoadHTTPConfig parses the YAML input s into a HTTPClientConfig.
func LoadHTTPConfig(s string) (*HTTPClientConfig, error) {
cfg := &HTTPClientConfig{}
err := yaml.UnmarshalStrict([]byte(s), cfg)
if err != nil {
return nil, err
}
return cfg, nil
}
// LoadHTTPConfigFile parses the given YAML file into a HTTPClientConfig.
func LoadHTTPConfigFile(filename string) (*HTTPClientConfig, []byte, error) {
content, err := os.ReadFile(filename)
if err != nil {
return nil, nil, err
}
cfg, err := LoadHTTPConfig(string(content))
if err != nil {
return nil, nil, err
}
cfg.SetDirectory(filepath.Dir(filepath.Dir(filename)))
return cfg, content, nil
}
// HTTPClientConfig configures an HTTP client.
type HTTPClientConfig struct {
// The HTTP basic authentication credentials for the targets.
@ -527,7 +565,7 @@ func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HT
return newRT(tlsConfig)
}
return NewTLSRoundTripper(tlsConfig, cfg.TLSConfig.CAFile, newRT)
return NewTLSRoundTripper(tlsConfig, cfg.TLSConfig.CAFile, cfg.TLSConfig.CertFile, cfg.TLSConfig.KeyFile, newRT)
}
type authorizationCredentialsRoundTripper struct {
@ -571,7 +609,7 @@ func NewAuthorizationCredentialsFileRoundTripper(authType, authCredentialsFile s
func (rt *authorizationCredentialsFileRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
if len(req.Header.Get("Authorization")) == 0 {
b, err := ioutil.ReadFile(rt.authCredentialsFile)
b, err := os.ReadFile(rt.authCredentialsFile)
if err != nil {
return nil, fmt.Errorf("unable to read authorization credentials file %s: %s", rt.authCredentialsFile, err)
}
@ -609,7 +647,7 @@ func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, e
}
req = cloneRequest(req)
if rt.passwordFile != "" {
bs, err := ioutil.ReadFile(rt.passwordFile)
bs, err := os.ReadFile(rt.passwordFile)
if err != nil {
return nil, fmt.Errorf("unable to read basic auth password file %s: %s", rt.passwordFile, err)
}
@ -651,7 +689,7 @@ func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, erro
)
if rt.config.ClientSecretFile != "" {
data, err := ioutil.ReadFile(rt.config.ClientSecretFile)
data, err := os.ReadFile(rt.config.ClientSecretFile)
if err != nil {
return nil, fmt.Errorf("unable to read oauth2 client secret file %s: %s", rt.config.ClientSecretFile, err)
}
@ -696,7 +734,7 @@ func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, erro
if len(rt.config.TLSConfig.CAFile) == 0 {
t, _ = tlsTransport(tlsConfig)
} else {
t, err = NewTLSRoundTripper(tlsConfig, rt.config.TLSConfig.CAFile, tlsTransport)
t, err = NewTLSRoundTripper(tlsConfig, rt.config.TLSConfig.CAFile, rt.config.TLSConfig.CertFile, rt.config.TLSConfig.KeyFile, tlsTransport)
if err != nil {
return nil, err
}
@ -766,6 +804,13 @@ func NewTLSConfig(cfg *TLSConfig) (*tls.Config, error) {
tlsConfig := &tls.Config{
InsecureSkipVerify: cfg.InsecureSkipVerify,
MinVersion: uint16(cfg.MinVersion),
MaxVersion: uint16(cfg.MaxVersion),
}
if cfg.MaxVersion != 0 && cfg.MinVersion != 0 {
if cfg.MaxVersion < cfg.MinVersion {
return nil, fmt.Errorf("tls_config.max_version must be greater than or equal to tls_config.min_version if both are specified")
}
}
// If a CA cert is provided then let's read it in so we can validate the
@ -813,6 +858,8 @@ type TLSConfig struct {
InsecureSkipVerify bool `yaml:"insecure_skip_verify" json:"insecure_skip_verify"`
// Minimum TLS version.
MinVersion TLSVersion `yaml:"min_version,omitempty" json:"min_version,omitempty"`
// Maximum TLS version.
MaxVersion TLSVersion `yaml:"max_version,omitempty" json:"max_version,omitempty"`
}
// SetDirectory joins any relative file paths with dir.
@ -825,18 +872,45 @@ func (c *TLSConfig) SetDirectory(dir string) {
c.KeyFile = JoinDir(dir, c.KeyFile)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *TLSConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
type plain TLSConfig
return unmarshal((*plain)(c))
}
// readCertAndKey reads the cert and key files from the disk.
func readCertAndKey(certFile, keyFile string) ([]byte, []byte, error) {
certData, err := os.ReadFile(certFile)
if err != nil {
return nil, nil, err
}
keyData, err := os.ReadFile(keyFile)
if err != nil {
return nil, nil, err
}
return certData, keyData, nil
}
// getClientCertificate reads the pair of client cert and key from disk and returns a tls.Certificate.
func (c *TLSConfig) getClientCertificate(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
cert, err := tls.LoadX509KeyPair(c.CertFile, c.KeyFile)
func (c *TLSConfig) getClientCertificate(_ *tls.CertificateRequestInfo) (*tls.Certificate, error) {
certData, keyData, err := readCertAndKey(c.CertFile, c.KeyFile)
if err != nil {
return nil, fmt.Errorf("unable to read specified client cert (%s) & key (%s): %s", c.CertFile, c.KeyFile, err)
}
cert, err := tls.X509KeyPair(certData, keyData)
if err != nil {
return nil, fmt.Errorf("unable to use specified client cert (%s) & key (%s): %s", c.CertFile, c.KeyFile, err)
}
return &cert, nil
}
// readCAFile reads the CA cert file from disk.
func readCAFile(f string) ([]byte, error) {
data, err := ioutil.ReadFile(f)
data, err := os.ReadFile(f)
if err != nil {
return nil, fmt.Errorf("unable to load specified CA cert %s: %s", f, err)
}
@ -857,22 +931,29 @@ func updateRootCA(cfg *tls.Config, b []byte) bool {
// configuration whenever the content of the CA file changes.
type tlsRoundTripper struct {
caFile string
certFile string
keyFile string
// newRT returns a new RoundTripper.
newRT func(*tls.Config) (http.RoundTripper, error)
mtx sync.RWMutex
rt http.RoundTripper
hashCAFile []byte
hashCertFile []byte
hashKeyFile []byte
tlsConfig *tls.Config
}
func NewTLSRoundTripper(
cfg *tls.Config,
caFile string,
caFile, certFile, keyFile string,
newRT func(*tls.Config) (http.RoundTripper, error),
) (http.RoundTripper, error) {
t := &tlsRoundTripper{
caFile: caFile,
certFile: certFile,
keyFile: keyFile,
newRT: newRT,
tlsConfig: cfg,
}
@ -882,7 +963,7 @@ func NewTLSRoundTripper(
return nil, err
}
t.rt = rt
_, t.hashCAFile, err = t.getCAWithHash()
_, t.hashCAFile, t.hashCertFile, t.hashKeyFile, err = t.getTLSFilesWithHash()
if err != nil {
return nil, err
}
@ -890,25 +971,36 @@ func NewTLSRoundTripper(
return t, nil
}
func (t *tlsRoundTripper) getCAWithHash() ([]byte, []byte, error) {
b, err := readCAFile(t.caFile)
func (t *tlsRoundTripper) getTLSFilesWithHash() ([]byte, []byte, []byte, []byte, error) {
b1, err := readCAFile(t.caFile)
if err != nil {
return nil, nil, err
return nil, nil, nil, nil, err
}
h := sha256.Sum256(b)
return b, h[:], nil
h1 := sha256.Sum256(b1)
var h2, h3 [32]byte
if t.certFile != "" {
b2, b3, err := readCertAndKey(t.certFile, t.keyFile)
if err != nil {
return nil, nil, nil, nil, err
}
h2, h3 = sha256.Sum256(b2), sha256.Sum256(b3)
}
return b1, h1[:], h2[:], h3[:], nil
}
// RoundTrip implements the http.RoundTrip interface.
func (t *tlsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
b, h, err := t.getCAWithHash()
caData, caHash, certHash, keyHash, err := t.getTLSFilesWithHash()
if err != nil {
return nil, err
}
t.mtx.RLock()
equal := bytes.Equal(h[:], t.hashCAFile)
equal := bytes.Equal(caHash[:], t.hashCAFile) &&
bytes.Equal(certHash[:], t.hashCertFile) &&
bytes.Equal(keyHash[:], t.hashKeyFile)
rt := t.rt
t.mtx.RUnlock()
if equal {
@ -917,8 +1009,10 @@ func (t *tlsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
}
// Create a new RoundTripper.
// The cert and key files are read separately by the client
// using GetClientCertificate.
tlsConfig := t.tlsConfig.Clone()
if !updateRootCA(tlsConfig, b) {
if !updateRootCA(tlsConfig, caData) {
return nil, fmt.Errorf("unable to use specified CA cert %s", t.caFile)
}
rt, err = t.newRT(tlsConfig)
@ -929,7 +1023,9 @@ func (t *tlsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
t.mtx.Lock()
t.rt = rt
t.hashCAFile = h[:]
t.hashCAFile = caHash[:]
t.hashCertFile = certHash[:]
t.hashKeyFile = keyHash[:]
t.mtx.Unlock()
return rt.RoundTrip(req)

View file

@ -17,7 +17,6 @@ import (
"bufio"
"fmt"
"io"
"io/ioutil"
"math"
"strconv"
"strings"
@ -44,7 +43,7 @@ const (
var (
bufPool = sync.Pool{
New: func() interface{} {
return bufio.NewWriter(ioutil.Discard)
return bufio.NewWriter(io.Discard)
},
}
numBufPool = sync.Pool{

View file

@ -46,7 +46,7 @@ func NewCollector(program string) prometheus.Collector {
),
ConstLabels: prometheus.Labels{
"version": Version,
"revision": Revision,
"revision": getRevision(),
"branch": Branch,
"goversion": GoVersion,
},
@ -69,7 +69,7 @@ func Print(program string) string {
m := map[string]string{
"program": program,
"version": Version,
"revision": Revision,
"revision": getRevision(),
"branch": Branch,
"buildUser": BuildUser,
"buildDate": BuildDate,
@ -87,7 +87,7 @@ func Print(program string) string {
// Info returns version, branch and revision information.
func Info() string {
return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, Revision)
return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, getRevision())
}
// BuildContext returns goVersion, buildUser and buildDate information.

View file

@ -0,0 +1,21 @@
// Copyright 2022 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !go1.18
// +build !go1.18
package version
func getRevision() string {
return Revision
}

View file

@ -0,0 +1,58 @@
// Copyright 2022 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build go1.18
// +build go1.18
package version
import "runtime/debug"
var computedRevision string
func getRevision() string {
if Revision != "" {
return Revision
}
return computedRevision
}
func init() {
computedRevision = computeRevision()
}
func computeRevision() string {
var (
rev = "unknown"
modified bool
)
buildInfo, ok := debug.ReadBuildInfo()
if !ok {
return rev
}
for _, v := range buildInfo.Settings {
if v.Key == "vcs.revision" {
rev = v.Value
}
if v.Key == "vcs.modified" {
if v.Value == "true" {
modified = true
}
}
}
if modified {
return rev + "-modified"
}
return rev
}

View file

@ -136,6 +136,10 @@ func (c *Command) setup(ctx *Context) {
newCmds = append(newCmds, scmd)
}
c.Subcommands = newCmds
if c.BashComplete == nil {
c.BashComplete = DefaultCompleteWithFlags(c)
}
}
func (c *Command) Run(cCtx *Context, arguments ...string) (err error) {
@ -148,13 +152,9 @@ func (c *Command) Run(cCtx *Context, arguments ...string) (err error) {
set, err := c.parseFlags(&a, cCtx.shellComplete)
cCtx.flagSet = set
if c.isRoot {
if checkCompletions(cCtx) {
return nil
}
} else if checkCommandCompletions(cCtx, c.Name) {
return nil
}
if err != nil {
if c.OnUsageError != nil {

View file

@ -227,7 +227,7 @@ func DefaultCompleteWithFlags(cmd *Command) func(cCtx *Context) {
return
}
printCommandSuggestions(cCtx.App.Commands, cCtx.App.Writer)
printCommandSuggestions(cCtx.Command.Subcommands, cCtx.App.Writer)
}
}
@ -308,15 +308,15 @@ func printVersion(cCtx *Context) {
// ShowCompletions prints the lists of commands within a given context
func ShowCompletions(cCtx *Context) {
a := cCtx.App
if a != nil && a.BashComplete != nil {
a.BashComplete(cCtx)
c := cCtx.Command
if c != nil && c.BashComplete != nil {
c.BashComplete(cCtx)
}
}
// ShowCommandCompletions prints the custom completions for a given command
func ShowCommandCompletions(ctx *Context, command string) {
c := ctx.App.Command(command)
c := ctx.Command.Command(command)
if c != nil {
if c.BashComplete != nil {
c.BashComplete(ctx)
@ -453,7 +453,7 @@ func checkCompletions(cCtx *Context) bool {
if args := cCtx.Args(); args.Present() {
name := args.First()
if cmd := cCtx.App.Command(name); cmd != nil {
if cmd := cCtx.Command.Command(name); cmd != nil {
// let the command handle the completion
return false
}
@ -463,15 +463,6 @@ func checkCompletions(cCtx *Context) bool {
return true
}
func checkCommandCompletions(c *Context, name string) bool {
if !c.shellComplete {
return false
}
ShowCommandCompletions(c, name)
return true
}
func subtract(a, b int) int {
return a - b
}

View file

@ -30,7 +30,7 @@ func SortFunc[E any](x []E, less func(a, b E) bool) {
pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less)
}
// SortStable sorts the slice x while keeping the original order of equal
// SortStableFunc sorts the slice x while keeping the original order of equal
// elements, using less to compare elements.
func SortStableFunc[E any](x []E, less func(a, b E) bool) {
stableLessFunc(x, len(x), less)

View file

@ -588,6 +588,7 @@ type serverConn struct {
maxFrameSize int32
peerMaxHeaderListSize uint32 // zero means unknown (default)
canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
canonHeaderKeysSize int // canonHeader keys size in bytes
writingFrame bool // started writing a frame (on serve goroutine or separate)
writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh
needsFrameFlush bool // last frame write wasn't a flush
@ -766,6 +767,13 @@ func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
}
}
// maxCachedCanonicalHeadersKeysSize is an arbitrarily-chosen limit on the size
// of the entries in the canonHeader cache.
// This should be larger than the size of unique, uncommon header keys likely to
// be sent by the peer, while not so high as to permit unreasonable memory usage
// if the peer sends an unbounded number of unique header keys.
const maxCachedCanonicalHeadersKeysSize = 2048
func (sc *serverConn) canonicalHeader(v string) string {
sc.serveG.check()
buildCommonHeaderMapsOnce()
@ -781,14 +789,10 @@ func (sc *serverConn) canonicalHeader(v string) string {
sc.canonHeader = make(map[string]string)
}
cv = http.CanonicalHeaderKey(v)
// maxCachedCanonicalHeaders is an arbitrarily-chosen limit on the number of
// entries in the canonHeader cache. This should be larger than the number
// of unique, uncommon header keys likely to be sent by the peer, while not
// so high as to permit unreasonable memory usage if the peer sends an unbounded
// number of unique header keys.
const maxCachedCanonicalHeaders = 32
if len(sc.canonHeader) < maxCachedCanonicalHeaders {
size := 100 + len(v)*2 // 100 bytes of map overhead + key + value
if sc.canonHeaderKeysSize+size <= maxCachedCanonicalHeadersKeysSize {
sc.canonHeader[v] = cv
sc.canonHeaderKeysSize += size
}
return cv
}

View file

@ -62,6 +62,13 @@ const (
// The AWS authorization header name for the auto-generated date.
awsDateHeader = "x-amz-date"
// Supported AWS configuration environment variables.
awsAccessKeyId = "AWS_ACCESS_KEY_ID"
awsDefaultRegion = "AWS_DEFAULT_REGION"
awsRegion = "AWS_REGION"
awsSecretAccessKey = "AWS_SECRET_ACCESS_KEY"
awsSessionToken = "AWS_SESSION_TOKEN"
awsTimeFormatLong = "20060102T150405Z"
awsTimeFormatShort = "20060102"
)
@ -267,6 +274,49 @@ type awsRequest struct {
Headers []awsRequestHeader `json:"headers"`
}
func (cs awsCredentialSource) validateMetadataServers() error {
if err := cs.validateMetadataServer(cs.RegionURL, "region_url"); err != nil {
return err
}
if err := cs.validateMetadataServer(cs.CredVerificationURL, "url"); err != nil {
return err
}
return cs.validateMetadataServer(cs.IMDSv2SessionTokenURL, "imdsv2_session_token_url")
}
var validHostnames []string = []string{"169.254.169.254", "fd00:ec2::254"}
func (cs awsCredentialSource) isValidMetadataServer(metadataUrl string) bool {
if metadataUrl == "" {
// Zero value means use default, which is valid.
return true
}
u, err := url.Parse(metadataUrl)
if err != nil {
// Unparseable URL means invalid
return false
}
for _, validHostname := range validHostnames {
if u.Hostname() == validHostname {
// If it's one of the valid hostnames, everything is good
return true
}
}
// hostname not found in our allowlist, so not valid
return false
}
func (cs awsCredentialSource) validateMetadataServer(metadataUrl, urlName string) error {
if !cs.isValidMetadataServer(metadataUrl) {
return fmt.Errorf("oauth2/google: invalid hostname %s for %s", metadataUrl, urlName)
}
return nil
}
func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, error) {
if cs.client == nil {
cs.client = oauth2.NewClient(cs.ctx, nil)
@ -274,17 +324,34 @@ func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, erro
return cs.client.Do(req.WithContext(cs.ctx))
}
func canRetrieveRegionFromEnvironment() bool {
// The AWS region can be provided through AWS_REGION or AWS_DEFAULT_REGION. Only one is
// required.
return getenv(awsRegion) != "" || getenv(awsDefaultRegion) != ""
}
func canRetrieveSecurityCredentialFromEnvironment() bool {
// Check if both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are available.
return getenv(awsAccessKeyId) != "" && getenv(awsSecretAccessKey) != ""
}
func shouldUseMetadataServer() bool {
return !canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment()
}
func (cs awsCredentialSource) subjectToken() (string, error) {
if cs.requestSigner == nil {
headers := make(map[string]string)
if shouldUseMetadataServer() {
awsSessionToken, err := cs.getAWSSessionToken()
if err != nil {
return "", err
}
headers := make(map[string]string)
if awsSessionToken != "" {
headers[awsIMDSv2SessionTokenHeader] = awsSessionToken
}
}
awsSecurityCredentials, err := cs.getSecurityCredentials(headers)
if err != nil {
@ -389,11 +456,11 @@ func (cs *awsCredentialSource) getAWSSessionToken() (string, error) {
}
func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, error) {
if envAwsRegion := getenv("AWS_REGION"); envAwsRegion != "" {
if canRetrieveRegionFromEnvironment() {
if envAwsRegion := getenv(awsRegion); envAwsRegion != "" {
return envAwsRegion, nil
}
if envAwsRegion := getenv("AWS_DEFAULT_REGION"); envAwsRegion != "" {
return envAwsRegion, nil
return getenv("AWS_DEFAULT_REGION"), nil
}
if cs.RegionURL == "" {
@ -434,15 +501,13 @@ func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, err
}
func (cs *awsCredentialSource) getSecurityCredentials(headers map[string]string) (result awsSecurityCredentials, err error) {
if accessKeyID := getenv("AWS_ACCESS_KEY_ID"); accessKeyID != "" {
if secretAccessKey := getenv("AWS_SECRET_ACCESS_KEY"); secretAccessKey != "" {
if canRetrieveSecurityCredentialFromEnvironment() {
return awsSecurityCredentials{
AccessKeyID: accessKeyID,
SecretAccessKey: secretAccessKey,
SecurityToken: getenv("AWS_SESSION_TOKEN"),
AccessKeyID: getenv(awsAccessKeyId),
SecretAccessKey: getenv(awsSecretAccessKey),
SecurityToken: getenv(awsSessionToken),
}, nil
}
}
roleName, err := cs.getMetadataRoleName(headers)
if err != nil {

View file

@ -213,6 +213,10 @@ func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) {
awsCredSource.IMDSv2SessionTokenURL = c.CredentialSource.IMDSv2SessionTokenURL
}
if err := awsCredSource.validateMetadataServers(); err != nil {
return nil, err
}
return awsCredSource, nil
}
} else if c.CredentialSource.File != "" {

View file

@ -5,4 +5,4 @@
package internal
// Version is the current tagged release of the library.
const Version = "0.103.0"
const Version = "0.104.0"

View file

@ -96,7 +96,9 @@ func (w withScopes) Apply(o *internal.DialSettings) {
copy(o.Scopes, w)
}
// WithUserAgent returns a ClientOption that sets the User-Agent.
// WithUserAgent returns a ClientOption that sets the User-Agent. This option
// is incompatible with the [WithHTTPClient] option. If you wish to provide a
// custom client you will need to add this header via RoundTripper middleware.
func WithUserAgent(ua string) ClientOption {
return withUA(ua)
}

View file

@ -25,6 +25,7 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
grpcgoogle "google.golang.org/grpc/credentials/google"
grpcinsecure "google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/credentials/oauth"
// Install grpclb, which is required for direct path.
@ -126,10 +127,26 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C
if err != nil {
return nil, err
}
var grpcOpts []grpc.DialOption
var transportCreds credentials.TransportCredentials
if insecure {
grpcOpts = []grpc.DialOption{grpc.WithInsecure()}
} else if !o.NoAuth {
transportCreds = grpcinsecure.NewCredentials()
} else {
transportCreds = credentials.NewTLS(&tls.Config{
GetClientCertificate: clientCertSource,
})
}
// Initialize gRPC dial options with transport-level security options.
grpcOpts := []grpc.DialOption{
grpc.WithTransportCredentials(transportCreds),
}
// Authentication can only be sent when communicating over a secure connection.
//
// TODO: Should we be more lenient in the future and allow sending credentials
// when dialing an insecure connection?
if !o.NoAuth && !insecure {
if o.APIKey != "" {
log.Print("API keys are not supported for gRPC APIs. Remove the WithAPIKey option from your client-creating call.")
}
@ -142,8 +159,17 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C
o.QuotaProject = internal.QuotaProjectFromCreds(creds)
}
grpcOpts = append(grpcOpts,
grpc.WithPerRPCCredentials(grpcTokenSource{
TokenSource: oauth.TokenSource{creds.TokenSource},
quotaProject: o.QuotaProject,
requestReason: o.RequestReason,
}),
)
// Attempt Direct Path:
if isDirectPathEnabled(endpoint, o) && isTokenSourceDirectPathCompatible(creds.TokenSource, o) && metadata.OnGCE() {
// Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates.
grpcOpts = []grpc.DialOption{
grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{oauth.TokenSource{creds.TokenSource}}))}
if timeoutDialerOption != nil {
@ -153,9 +179,9 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C
if strings.EqualFold(os.Getenv(enableDirectPathXds), "true") {
// google-c2p resolver target must not have a port number
if addr, _, err := net.SplitHostPort(endpoint); err == nil {
endpoint = "google-c2p-experimental:///" + addr
endpoint = "google-c2p:///" + addr
} else {
endpoint = "google-c2p-experimental:///" + endpoint
endpoint = "google-c2p:///" + endpoint
}
} else {
if !strings.HasPrefix(endpoint, "dns:///") {
@ -169,18 +195,6 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C
grpc.WithDefaultServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`))
}
// TODO(cbro): add support for system parameters (quota project, request reason) via chained interceptor.
} else {
tlsConfig := &tls.Config{
GetClientCertificate: clientCertSource,
}
grpcOpts = []grpc.DialOption{
grpc.WithPerRPCCredentials(grpcTokenSource{
TokenSource: oauth.TokenSource{creds.TokenSource},
quotaProject: o.QuotaProject,
requestReason: o.RequestReason,
}),
grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)),
}
}
}

30
vendor/modules.txt vendored
View file

@ -45,7 +45,7 @@ github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo
github.com/Azure/azure-sdk-for-go/sdk/internal/log
github.com/Azure/azure-sdk-for-go/sdk/internal/temporal
github.com/Azure/azure-sdk-for-go/sdk/internal/uuid
# github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1
# github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.6.1
## explicit; go 1.18
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob
@ -80,7 +80,7 @@ github.com/VividCortex/ewma
# github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137
## explicit; go 1.15
github.com/alecthomas/units
# github.com/aws/aws-sdk-go v1.44.153
# github.com/aws/aws-sdk-go v1.44.157
## explicit; go 1.11
github.com/aws/aws-sdk-go/aws
github.com/aws/aws-sdk-go/aws/awserr
@ -308,8 +308,8 @@ github.com/google/go-cmp/cmp/internal/value
# github.com/google/uuid v1.3.0
## explicit
github.com/google/uuid
# github.com/googleapis/enterprise-certificate-proxy v0.2.0
## explicit; go 1.18
# github.com/googleapis/enterprise-certificate-proxy v0.2.1
## explicit; go 1.19
github.com/googleapis/enterprise-certificate-proxy/client
github.com/googleapis/enterprise-certificate-proxy/client/util
# github.com/googleapis/gax-go/v2 v2.7.0
@ -326,8 +326,8 @@ github.com/grafana/regexp/syntax
## explicit; go 1.13
# github.com/hashicorp/go-immutable-radix v1.3.1
## explicit
# github.com/influxdata/influxdb v1.10.0
## explicit; go 1.18
# github.com/influxdata/influxdb v1.11.0
## explicit; go 1.19
github.com/influxdata/influxdb/client/v2
github.com/influxdata/influxdb/models
github.com/influxdata/influxdb/pkg/escape
@ -384,8 +384,8 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint
# github.com/prometheus/client_model v0.3.0
## explicit; go 1.9
github.com/prometheus/client_model/go
# github.com/prometheus/common v0.37.0
## explicit; go 1.16
# github.com/prometheus/common v0.38.0
## explicit; go 1.17
github.com/prometheus/common/config
github.com/prometheus/common/expfmt
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
@ -399,7 +399,7 @@ github.com/prometheus/common/sigv4
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util
# github.com/prometheus/prometheus v0.40.5
# github.com/prometheus/prometheus v0.40.6
## explicit; go 1.18
github.com/prometheus/prometheus/config
github.com/prometheus/prometheus/discovery
@ -444,7 +444,7 @@ github.com/russross/blackfriday/v2
## explicit; go 1.13
github.com/stretchr/testify/assert
github.com/stretchr/testify/require
# github.com/urfave/cli/v2 v2.23.6
# github.com/urfave/cli/v2 v2.23.7
## explicit; go 1.18
github.com/urfave/cli/v2
# github.com/valyala/bytebufferpool v1.0.0
@ -528,11 +528,11 @@ go.uber.org/atomic
## explicit; go 1.18
go.uber.org/goleak
go.uber.org/goleak/internal/stack
# golang.org/x/exp v0.0.0-20221205204356-47842c84f3db
# golang.org/x/exp v0.0.0-20221208152030-732eee02a75a
## explicit; go 1.18
golang.org/x/exp/constraints
golang.org/x/exp/slices
# golang.org/x/net v0.3.0
# golang.org/x/net v0.4.0
## explicit; go 1.17
golang.org/x/net/context
golang.org/x/net/context/ctxhttp
@ -544,7 +544,7 @@ golang.org/x/net/internal/socks
golang.org/x/net/internal/timeseries
golang.org/x/net/proxy
golang.org/x/net/trace
# golang.org/x/oauth2 v0.2.0
# golang.org/x/oauth2 v0.3.0
## explicit; go 1.17
golang.org/x/oauth2
golang.org/x/oauth2/authhandler
@ -575,7 +575,7 @@ golang.org/x/time/rate
## explicit; go 1.17
golang.org/x/xerrors
golang.org/x/xerrors/internal
# google.golang.org/api v0.103.0
# google.golang.org/api v0.104.0
## explicit; go 1.19
google.golang.org/api/googleapi
google.golang.org/api/googleapi/transport
@ -608,7 +608,7 @@ google.golang.org/appengine/internal/socket
google.golang.org/appengine/internal/urlfetch
google.golang.org/appengine/socket
google.golang.org/appengine/urlfetch
# google.golang.org/genproto v0.0.0-20221205194025-8222ab48f5fc
# google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37
## explicit; go 1.19
google.golang.org/genproto/googleapis/api
google.golang.org/genproto/googleapis/api/annotations