lib/backup: add support of Azure Blob Storage (#460)

* lib/backup: add support of Azure Blob Storage

* lib/backup: add enterprise support of Azure Blob Storage
This commit is contained in:
Zakhar Bessarab 2022-10-06 00:10:00 +03:00 committed by Aliaksandr Valialkin
parent f596e49881
commit 262ce77e2d
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
122 changed files with 27206 additions and 2 deletions

View file

@ -6,6 +6,7 @@ Supported storage systems for backups:
* [GCS](https://cloud.google.com/storage/). Example: `gs://<bucket>/<path/to/backup>` * [GCS](https://cloud.google.com/storage/). Example: `gs://<bucket>/<path/to/backup>`
* [S3](https://aws.amazon.com/s3/). Example: `s3://<bucket>/<path/to/backup>` * [S3](https://aws.amazon.com/s3/). Example: `s3://<bucket>/<path/to/backup>`
* [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs/). Example: `azblob://<bucket>/<path/to/backup>`
* Any S3-compatible storage such as [MinIO](https://github.com/minio/minio), [Ceph](https://docs.ceph.com/en/pacific/radosgw/s3/) or [Swift](https://platform.swiftstack.com/docs/admin/middleware/s3_middleware.html). See [these docs](#advanced-usage) for details. * Any S3-compatible storage such as [MinIO](https://github.com/minio/minio), [Ceph](https://docs.ceph.com/en/pacific/radosgw/s3/) or [Swift](https://platform.swiftstack.com/docs/admin/middleware/s3_middleware.html). See [these docs](#advanced-usage) for details.
* Local filesystem. Example: `fs://</absolute/path/to/backup>`. Note that `vmbackup` prevents from storing the backup into the directory pointed by `-storageDataPath` command-line flag, since this directory should be managed solely by VictoriaMetrics or `vmstorage`. * Local filesystem. Example: `fs://</absolute/path/to/backup>`. Note that `vmbackup` prevents from storing the backup into the directory pointed by `-storageDataPath` command-line flag, since this directory should be managed solely by VictoriaMetrics or `vmstorage`.

3
go.mod
View file

@ -4,6 +4,7 @@ go 1.19
require ( require (
cloud.google.com/go/storage v1.27.0 cloud.google.com/go/storage v1.27.0
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1
github.com/VictoriaMetrics/fastcache v1.12.0 github.com/VictoriaMetrics/fastcache v1.12.0
// Do not use the original github.com/valyala/fasthttp because of issues // Do not use the original github.com/valyala/fasthttp because of issues
@ -39,6 +40,8 @@ require (
cloud.google.com/go v0.104.0 // indirect cloud.google.com/go v0.104.0 // indirect
cloud.google.com/go/compute v1.10.0 // indirect cloud.google.com/go/compute v1.10.0 // indirect
cloud.google.com/go/iam v0.5.0 // indirect cloud.google.com/go/iam v0.5.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect github.com/VividCortex/ewma v1.2.0 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.8 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.8 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.12.21 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.12.21 // indirect

16
go.sum
View file

@ -66,7 +66,15 @@ cloud.google.com/go/storage v1.27.0 h1:YOO045NZI9RKfCj1c5A/ZtuuENUc8OAW+gHdGnDgy
cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s=
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go v48.2.0+incompatible h1:+t2P1j1r5N6lYgPiiz7ZbEVZFkWjVe9WhHbMm0gg8hw=
github.com/Azure/azure-sdk-for-go v48.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v48.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0 h1:sVPhtT2qjO86rTUaWMr4WoES4TkjGnzcioXcnHV9s5k=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.0.0 h1:Yoicul8bnVdQrhDMTHxdEckRGX01XvwXDHUT9zYZ3k0=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 h1:jp0dGvZ7ZK0mgqnTSClMxa5xuRL7NZgHameVYF6BurY=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 h1:QSdcrd/UFJv6Bp/CfoVf2SrENpFn9P6Yh8yb+xNhYMM=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1/go.mod h1:eZ4g6GUvXiGulfIbbhh1Xr4XwUYaYaWMqzGD/284wCA=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
@ -88,6 +96,7 @@ github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6L
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0 h1:WVsrXCnHlDDX8ls+tootqRE87/hL9S/g4ewig9RsD/c=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
@ -241,6 +250,7 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ=
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/digitalocean/godo v1.52.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= github.com/digitalocean/godo v1.52.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU=
github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
@ -412,6 +422,7 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
@ -632,6 +643,7 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
@ -749,6 +761,7 @@ github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHu
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@ -861,8 +874,8 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
@ -954,6 +967,7 @@ golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88 h1:Tgea0cVUD0ivh5ADBX4WwuI12DUd2to3nCYe2eayMIw=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=

View file

@ -7,6 +7,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/azremote"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/common" "github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/common"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/fsremote" "github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/fsremote"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/gcsremote" "github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/gcsremote"
@ -183,7 +184,7 @@ func NewRemoteFS(path string) (common.RemoteFS, error) {
} }
n := strings.Index(path, "://") n := strings.Index(path, "://")
if n < 0 { if n < 0 {
return nil, fmt.Errorf("Missing scheme in path %q. Supported schemes: `gs://`, `s3://`, `fs://`", path) return nil, fmt.Errorf("Missing scheme in path %q. Supported schemes: `gs://`, `s3://`, `azblob://`, `fs://`", path)
} }
scheme := path[:n] scheme := path[:n]
dir := path[n+len("://"):] dir := path[n+len("://"):]
@ -212,6 +213,21 @@ func NewRemoteFS(path string) (common.RemoteFS, error) {
return nil, fmt.Errorf("cannot initialize connection to gcs: %w", err) return nil, fmt.Errorf("cannot initialize connection to gcs: %w", err)
} }
return fs, nil return fs, nil
case "azblob":
n := strings.Index(dir, "/")
if n < 0 {
return nil, fmt.Errorf("missing directory on the AZBlob container %q", dir)
}
bucket := dir[:n]
dir = dir[n:]
fs := &azremote.FS{
Container: bucket,
Dir: dir,
}
if err := fs.Init(); err != nil {
return nil, fmt.Errorf("cannot initialize connection to AZBlob: %w", err)
}
return fs, nil
case "s3": case "s3":
n := strings.Index(dir, "/") n := strings.Index(dir, "/")
if n < 0 { if n < 0 {

View file

@ -0,0 +1,405 @@
package azremote
import (
"context"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/common"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/fscommon"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
)
const (
envStorageAcctName = "AZURE_STORAGE_ACCOUNT_NAME"
envStorageAccKey = "AZURE_STORAGE_ACCOUNT_KEY"
envStorageAccCs = "AZURE_STORAGE_ACCOUNT_CONNECTION_STRING"
)
// FS represents filesystem for backups in Azure Blob Storage.
//
// Init must be called before calling other FS methods.
type FS struct {
// Azure Blob Storage bucket to use.
Container string
// Directory in the bucket to write to.
Dir string
client *azblob.ContainerClient
}
// Init initializes fs.
//
// The returned fs must be stopped when no long needed with MustStop call.
func (fs *FS) Init() error {
if fs.client != nil {
logger.Panicf("BUG: fs.Init has been already called")
}
for strings.HasPrefix(fs.Dir, "/") {
fs.Dir = fs.Dir[1:]
}
if !strings.HasSuffix(fs.Dir, "/") {
fs.Dir += "/"
}
var sc *azblob.ServiceClient
var err error
if cs, ok := os.LookupEnv(envStorageAccCs); ok {
sc, err = azblob.NewServiceClientFromConnectionString(cs, nil)
if err != nil {
return fmt.Errorf("failed to create AZBlob service client from connection string: %w", err)
}
}
accountName, ok1 := os.LookupEnv(envStorageAcctName)
accountKey, ok2 := os.LookupEnv(envStorageAccKey)
if ok1 && ok2 {
creds, err := azblob.NewSharedKeyCredential(accountName, accountKey)
if err != nil {
return fmt.Errorf("failed to create AZBlob credentials from account name and key: %w", err)
}
serviceURL := fmt.Sprintf("https://%s.blob.core.windows.net/", accountName)
sc, err = azblob.NewServiceClientWithSharedKey(serviceURL, creds, nil)
if err != nil {
return fmt.Errorf("failed to create AZBlob service client from account name and key: %w", err)
}
}
if sc == nil {
return fmt.Errorf(`failed to detect any credentials type for AZBlob. Ensure there is connection string set at %q, or shared key at %q and %q`, envStorageAccCs, envStorageAcctName, envStorageAccKey)
}
containerClient, err := sc.NewContainerClient(fs.Container)
if err != nil {
return fmt.Errorf("failed to create AZBlob container client: %w", err)
}
fs.client = containerClient
return nil
}
// MustStop stops fs.
func (fs *FS) MustStop() {
fs.client = nil
}
// String returns human-readable description for fs.
func (fs *FS) String() string {
return fmt.Sprintf("AZBlob{container: %q, dir: %q}", fs.Container, fs.Dir)
}
// ListParts returns all the parts for fs.
func (fs *FS) ListParts() ([]common.Part, error) {
dir := fs.Dir
ctx := context.Background()
opts := &azblob.ContainerListBlobsFlatOptions{
Prefix: &dir,
}
pager := fs.client.ListBlobsFlat(opts)
var parts []common.Part
for pager.NextPage(ctx) {
resp := pager.PageResponse()
for _, v := range resp.Segment.BlobItems {
file := *v.Name
if !strings.HasPrefix(file, dir) {
return nil, fmt.Errorf("unexpected prefix for AZBlob key %q; want %q", file, dir)
}
if fscommon.IgnorePath(file) {
continue
}
var p common.Part
if !p.ParseFromRemotePath(file[len(dir):]) {
logger.Infof("skipping unknown object %q", file)
continue
}
p.ActualSize = uint64(*v.Properties.ContentLength)
parts = append(parts, p)
}
}
if err := pager.Err(); err != nil {
return nil, fmt.Errorf("error when iterating objects at %q: %w", dir, err)
}
return parts, nil
}
// DeletePart deletes part p from fs.
func (fs *FS) DeletePart(p common.Part) error {
bc, err := fs.clientForPart(p)
if err != nil {
return err
}
ctx := context.Background()
if _, err := bc.Delete(ctx, &azblob.BlobDeleteOptions{}); err != nil {
return fmt.Errorf("cannot delete %q at %s (remote path %q): %w", p.Path, fs, bc.URL(), err)
}
return nil
}
// RemoveEmptyDirs recursively removes empty dirs in fs.
func (fs *FS) RemoveEmptyDirs() error {
// Blob storage has no directories, so nothing to remove.
return nil
}
// CopyPart copies p from srcFS to fs.
func (fs *FS) CopyPart(srcFS common.OriginFS, p common.Part) error {
src, ok := srcFS.(*FS)
if !ok {
return fmt.Errorf("cannot perform server-side copying from %s to %s: both of them must be AZBlob", srcFS, fs)
}
sbc, err := src.clientForPart(p)
if err != nil {
return fmt.Errorf("failed to initialize server-side copy of src %q: %w", p.Path, err)
}
dbc, err := fs.clientForPart(p)
if err != nil {
return fmt.Errorf("failed to initialize server-side copy of dst %q: %w", p.Path, err)
}
ssCopyPermission := azblob.BlobSASPermissions{
Read: true,
Create: true,
Write: true,
}
t, err := sbc.GetSASToken(ssCopyPermission, time.Now(), time.Now().Add(30*time.Minute))
if err != nil {
return fmt.Errorf("failed to generate SAS token of src %q: %w", p.Path, err)
}
srcURL := sbc.URL() + "?" + t.Encode()
ctx := context.Background()
_, err = dbc.CopyFromURL(ctx, srcURL, &azblob.BlockBlobCopyFromURLOptions{})
if err != nil {
return fmt.Errorf("cannot copy %q from %s to %s: %w", p.Path, src, fs, err)
}
return nil
}
// DownloadPart downloads part p from fs to w.
func (fs *FS) DownloadPart(p common.Part, w io.Writer) error {
bc, err := fs.clientForPart(p)
if err != nil {
return err
}
ctx := context.Background()
r, err := bc.Download(ctx, &azblob.BlobDownloadOptions{})
if err != nil {
return fmt.Errorf("cannot open reader for %q at %s (remote path %q): %w", p.Path, fs, bc.URL(), err)
}
body := r.Body(&azblob.RetryReaderOptions{})
n, err := io.Copy(w, body)
if err1 := body.Close(); err1 != nil && err == nil {
err = err1
}
if err != nil {
return fmt.Errorf("cannot download %q from at %s (remote path %q): %w", p.Path, fs, bc.URL(), err)
}
if uint64(n) != p.Size {
return fmt.Errorf("wrong data size downloaded from %q at %s; got %d bytes; want %d bytes", p.Path, fs, n, p.Size)
}
return nil
}
// UploadPart uploads part p from r to fs.
func (fs *FS) UploadPart(p common.Part, r io.Reader) error {
bc, err := fs.clientForPart(p)
if err != nil {
return err
}
ctx := context.Background()
_, err = bc.UploadStream(ctx, r, azblob.UploadStreamOptions{})
if err != nil {
return fmt.Errorf("cannot upload data to %q at %s (remote path %q): %w", p.Path, fs, bc.URL(), err)
}
return nil
}
func (fs *FS) clientForPart(p common.Part) (*azblob.BlockBlobClient, error) {
path := p.RemotePath(fs.Dir)
return fs.clientForPath(path)
}
func (fs *FS) clientForPath(path string) (*azblob.BlockBlobClient, error) {
bc, err := fs.client.NewBlockBlobClient(path)
if err != nil {
return nil, fmt.Errorf("unexpected error when creating client for blob %q: %w", path, err)
}
return bc, nil
}
// DeleteFile deletes filePath at fs if it exists.
//
// The function does nothing if the filePath doesn't exists.
func (fs *FS) DeleteFile(filePath string) error {
v, err := fs.HasFile(filePath)
if err != nil {
return err
}
if !v {
return nil
}
path := fs.Dir + filePath
bc, err := fs.clientForPath(path)
if err != nil {
return err
}
ctx := context.Background()
if _, err := bc.Delete(ctx, nil); err != nil {
return fmt.Errorf("cannot delete %q at %s (remote path %q): %w", filePath, fs, bc.URL(), err)
}
return nil
}
// CreateFile creates filePath at fs and puts data into it.
//
// The file is overwritten if it exists.
func (fs *FS) CreateFile(filePath string, data []byte) error {
path := fs.Dir + filePath
bc, err := fs.clientForPath(path)
if err != nil {
return err
}
ctx := context.Background()
r, err := bc.UploadBuffer(ctx, data, azblob.UploadOption{
Parallelism: 1,
})
defer func() { _ = r.Body.Close() }()
if err != nil {
return fmt.Errorf("cannot upload %d bytes to %q at %s (remote path %q): %w", len(data), filePath, fs, bc.URL(), err)
}
return nil
}
// HasFile returns ture if filePath exists at fs.
func (fs *FS) HasFile(filePath string) (bool, error) {
path := fs.Dir + filePath
bc, err := fs.clientForPath(path)
if err != nil {
return false, err
}
ctx := context.Background()
_, err = bc.GetProperties(ctx, nil)
var azerr *azblob.InternalError
var sterr *azblob.StorageError
if errors.As(err, &azerr) && azerr.As(&sterr) {
if sterr.ErrorCode == azblob.StorageErrorCodeBlobNotFound {
return false, nil
}
return false, fmt.Errorf("unexpected error when obtaining properties for %q at %s (remote path %q): %w", filePath, fs, bc.URL(), err)
}
return true, nil
}
// ListDirs returns list of subdirectories in given directory
func (fs *FS) ListDirs(subpath string) ([]string, error) {
path := strings.TrimPrefix(filepath.Join(fs.Dir, subpath), "/")
if path != "" && !strings.HasSuffix(path, "/") {
path += "/"
}
var dirs []string
const dirsDelimiter = "/"
pager := fs.client.ListBlobsHierarchy(dirsDelimiter, &azblob.ContainerListBlobsHierarchyOptions{
Prefix: &fs.Container,
})
ctx := context.Background()
for pager.NextPage(ctx) {
resp := pager.PageResponse()
const dirsDelimiter = "/"
for _, v := range resp.Segment.BlobPrefixes {
dir := *v.Name
if !strings.HasPrefix(dir, path) {
return nil, fmt.Errorf("unexpected prefix for AZBlob key %q; want %q", dir, dir)
}
dir = strings.TrimPrefix(dir, path)
if fscommon.IgnorePath(dir) || !strings.Contains(dir, dirsDelimiter) {
continue
}
dirs = append(dirs, strings.TrimSuffix(dir, dirsDelimiter))
}
}
return dirs, nil
}
// DeleteFiles deletes files at fs.
//
// The function does nothing if the files don't exist.
func (fs *FS) DeleteFiles(filePaths []string) error {
if len(filePaths) == 0 {
return nil
}
for _, filePath := range filePaths {
path := filePath
if fs.Dir != "/" {
path = filepath.Join(fs.Dir + path)
}
ctx := context.Background()
opts := &azblob.ContainerListBlobsFlatOptions{
Prefix: &path,
}
pager := fs.client.ListBlobsFlat(opts)
for pager.NextPage(ctx) {
resp := pager.PageResponse()
for _, v := range resp.Segment.BlobItems {
file := *v.Name
bc, err := fs.clientForPath(file)
if err != nil {
return err
}
_, err = bc.Delete(ctx, &azblob.BlobDeleteOptions{})
if err != nil {
return fmt.Errorf("cannot delete %q at %s (remote dir %q): %w", file, fs, fs.Dir, err)
}
}
}
}
return nil
}

View file

@ -0,0 +1,456 @@
# Release History
## 1.0.0 (2022-05-12)
### Features Added
* Added interface `runtime.PollingHandler` to support custom poller implementations.
* Added field `PollingHandler` of this type to `runtime.NewPollerOptions[T]` and `runtime.NewPollerFromResumeTokenOptions[T]`.
### Breaking Changes
* Renamed `cloud.Configuration.LoginEndpoint` to `.ActiveDirectoryAuthorityHost`
* Renamed `cloud.AzurePublicCloud` to `cloud.AzurePublic`
* Removed `AuxiliaryTenants` field from `arm/ClientOptions` and `arm/policy/BearerTokenOptions`
* Removed `TokenRequestOptions.TenantID`
* `Poller[T].PollUntilDone()` now takes an `options *PollUntilDoneOptions` param instead of `freq time.Duration`
* Removed `arm/runtime.Poller[T]`, `arm/runtime.NewPoller[T]()` and `arm/runtime.NewPollerFromResumeToken[T]()`
* Removed `arm/runtime.FinalStateVia` and related `const` values
* Renamed `runtime.PageProcessor` to `runtime.PagingHandler`
* The `arm/runtime.ProviderRepsonse` and `arm/runtime.Provider` types are no longer exported.
* Renamed `NewRequestIdPolicy()` to `NewRequestIDPolicy()`
* `TokenCredential.GetToken` now returns `AccessToken` by value.
### Bugs Fixed
* When per-try timeouts are enabled, only cancel the context after the body has been read and closed.
* The `Operation-Location` poller now properly handles `final-state-via` values.
* Improvements in `runtime.Poller[T]`
* `Poll()` shouldn't cache errors, allowing for additional retries when in a non-terminal state.
* `Result()` will cache the terminal result or error but not transient errors, allowing for additional retries.
### Other Changes
* Updated to latest `internal` module and absorbed breaking changes.
* Use `temporal.Resource` and deleted copy.
* The internal poller implementation has been refactored.
* The implementation in `internal/pollers/poller.go` has been merged into `runtime/poller.go` with some slight modification.
* The internal poller types had their methods updated to conform to the `runtime.PollingHandler` interface.
* The creation of resume tokens has been refactored so that implementers of `runtime.PollingHandler` don't need to know about it.
* `NewPipeline()` places policies from `ClientOptions` after policies from `PipelineOptions`
* Default User-Agent headers no longer include `azcore` version information
## 0.23.1 (2022-04-14)
### Bugs Fixed
* Include XML header when marshalling XML content.
* Handle XML namespaces when searching for error code.
* Handle `odata.error` when searching for error code.
## 0.23.0 (2022-04-04)
### Features Added
* Added `runtime.Pager[T any]` and `runtime.Poller[T any]` supporting types for central, generic, implementations.
* Added `cloud` package with a new API for cloud configuration
* Added `FinalStateVia` field to `runtime.NewPollerOptions[T any]` type.
### Breaking Changes
* Removed the `Poller` type-alias to the internal poller implementation.
* Added `Ptr[T any]` and `SliceOfPtrs[T any]` in the `to` package and removed all non-generic implementations.
* `NullValue` and `IsNullValue` now take a generic type parameter instead of an interface func parameter.
* Replaced `arm.Endpoint` with `cloud` API
* Removed the `endpoint` parameter from `NewRPRegistrationPolicy()`
* `arm/runtime.NewPipeline()` and `.NewRPRegistrationPolicy()` now return an `error`
* Refactored `NewPoller` and `NewPollerFromResumeToken` funcs in `arm/runtime` and `runtime` packages.
* Removed the `pollerID` parameter as it's no longer required.
* Created optional parameter structs and moved optional parameters into them.
* Changed `FinalStateVia` field to a `const` type.
### Other Changes
* Converted expiring resource and dependent types to use generics.
## 0.22.0 (2022-03-03)
### Features Added
* Added header `WWW-Authenticate` to the default allow-list of headers for logging.
* Added a pipeline policy that enables the retrieval of HTTP responses from API calls.
* Added `runtime.WithCaptureResponse` to enable the policy at the API level (off by default).
### Breaking Changes
* Moved `WithHTTPHeader` and `WithRetryOptions` from the `policy` package to the `runtime` package.
## 0.21.1 (2022-02-04)
### Bugs Fixed
* Restore response body after reading in `Poller.FinalResponse()`. (#16911)
* Fixed bug in `NullValue` that could lead to incorrect comparisons for empty maps/slices (#16969)
### Other Changes
* `BearerTokenPolicy` is more resilient to transient authentication failures. (#16789)
## 0.21.0 (2022-01-11)
### Features Added
* Added `AllowedHeaders` and `AllowedQueryParams` to `policy.LogOptions` to control which headers and query parameters are written to the logger.
* Added `azcore.ResponseError` type which is returned from APIs when a non-success HTTP status code is received.
### Breaking Changes
* Moved `[]policy.Policy` parameters of `arm/runtime.NewPipeline` and `runtime.NewPipeline` into a new struct, `runtime.PipelineOptions`
* Renamed `arm/ClientOptions.Host` to `.Endpoint`
* Moved `Request.SkipBodyDownload` method to function `runtime.SkipBodyDownload`
* Removed `azcore.HTTPResponse` interface type
* `arm.NewPoller()` and `runtime.NewPoller()` no longer require an `eu` parameter
* `runtime.NewResponseError()` no longer requires an `error` parameter
## 0.20.0 (2021-10-22)
### Breaking Changes
* Removed `arm.Connection`
* Removed `azcore.Credential` and `.NewAnonymousCredential()`
* `NewRPRegistrationPolicy` now requires an `azcore.TokenCredential`
* `runtime.NewPipeline` has a new signature that simplifies implementing custom authentication
* `arm/runtime.RegistrationOptions` embeds `policy.ClientOptions`
* Contents in the `log` package have been slightly renamed.
* Removed `AuthenticationOptions` in favor of `policy.BearerTokenOptions`
* Changed parameters for `NewBearerTokenPolicy()`
* Moved policy config options out of `arm/runtime` and into `arm/policy`
### Features Added
* Updating Documentation
* Added string typdef `arm.Endpoint` to provide a hint toward expected ARM client endpoints
* `azcore.ClientOptions` contains common pipeline configuration settings
* Added support for multi-tenant authorization in `arm/runtime`
* Require one second minimum when calling `PollUntilDone()`
### Bug Fixes
* Fixed a potential panic when creating the default Transporter.
* Close LRO initial response body when creating a poller.
* Fixed a panic when recursively cloning structs that contain time.Time.
## 0.19.0 (2021-08-25)
### Breaking Changes
* Split content out of `azcore` into various packages. The intent is to separate content based on its usage (common, uncommon, SDK authors).
* `azcore` has all core functionality.
* `log` contains facilities for configuring in-box logging.
* `policy` is used for configuring pipeline options and creating custom pipeline policies.
* `runtime` contains various helpers used by SDK authors and generated content.
* `streaming` has helpers for streaming IO operations.
* `NewTelemetryPolicy()` now requires module and version parameters and the `Value` option has been removed.
* As a result, the `Request.Telemetry()` method has been removed.
* The telemetry policy now includes the SDK prefix `azsdk-go-` so callers no longer need to provide it.
* The `*http.Request` in `runtime.Request` is no longer anonymously embedded. Use the `Raw()` method to access it.
* The `UserAgent` and `Version` constants have been made internal, `Module` and `Version` respectively.
### Bug Fixes
* Fixed an issue in the retry policy where the request body could be overwritten after a rewind.
### Other Changes
* Moved modules `armcore` and `to` content into `arm` and `to` packages respectively.
* The `Pipeline()` method on `armcore.Connection` has been replaced by `NewPipeline()` in `arm.Connection`. It takes module and version parameters used by the telemetry policy.
* Poller logic has been consolidated across ARM and core implementations.
* This required some changes to the internal interfaces for core pollers.
* The core poller types have been improved, including more logging and test coverage.
## 0.18.1 (2021-08-20)
### Features Added
* Adds an `ETag` type for comparing etags and handling etags on requests
* Simplifies the `requestBodyProgess` and `responseBodyProgress` into a single `progress` object
### Bugs Fixed
* `JoinPaths` will preserve query parameters encoded in the `root` url.
### Other Changes
* Bumps dependency on `internal` module to the latest version (v0.7.0)
## 0.18.0 (2021-07-29)
### Features Added
* Replaces methods from Logger type with two package methods for interacting with the logging functionality.
* `azcore.SetClassifications` replaces `azcore.Logger().SetClassifications`
* `azcore.SetListener` replaces `azcore.Logger().SetListener`
### Breaking Changes
* Removes `Logger` type from `azcore`
## 0.17.0 (2021-07-27)
### Features Added
* Adding TenantID to TokenRequestOptions (https://github.com/Azure/azure-sdk-for-go/pull/14879)
* Adding AuxiliaryTenants to AuthenticationOptions (https://github.com/Azure/azure-sdk-for-go/pull/15123)
### Breaking Changes
* Rename `AnonymousCredential` to `NewAnonymousCredential` (https://github.com/Azure/azure-sdk-for-go/pull/15104)
* rename `AuthenticationPolicyOptions` to `AuthenticationOptions` (https://github.com/Azure/azure-sdk-for-go/pull/15103)
* Make Header constants private (https://github.com/Azure/azure-sdk-for-go/pull/15038)
## 0.16.2 (2021-05-26)
### Features Added
* Improved support for byte arrays [#14715](https://github.com/Azure/azure-sdk-for-go/pull/14715)
## 0.16.1 (2021-05-19)
### Features Added
* Add license.txt to azcore module [#14682](https://github.com/Azure/azure-sdk-for-go/pull/14682)
## 0.16.0 (2021-05-07)
### Features Added
* Remove extra `*` in UnmarshalAsByteArray() [#14642](https://github.com/Azure/azure-sdk-for-go/pull/14642)
## 0.15.1 (2021-05-06)
### Features Added
* Cache the original request body on Request [#14634](https://github.com/Azure/azure-sdk-for-go/pull/14634)
## 0.15.0 (2021-05-05)
### Features Added
* Add support for null map and slice
* Export `Response.Payload` method
### Breaking Changes
* remove `Response.UnmarshalError` as it's no longer required
## 0.14.5 (2021-04-23)
### Features Added
* Add `UnmarshalError()` on `azcore.Response`
## 0.14.4 (2021-04-22)
### Features Added
* Support for basic LRO polling
* Added type `LROPoller` and supporting types for basic polling on long running operations.
* rename poller param and added doc comment
### Bugs Fixed
* Fixed content type detection bug in logging.
## 0.14.3 (2021-03-29)
### Features Added
* Add support for multi-part form data
* Added method `WriteMultipartFormData()` to Request.
## 0.14.2 (2021-03-17)
### Features Added
* Add support for encoding JSON null values
* Adds `NullValue()` and `IsNullValue()` functions for setting and detecting sentinel values used for encoding a JSON null.
* Documentation fixes
### Bugs Fixed
* Fixed improper error wrapping
## 0.14.1 (2021-02-08)
### Features Added
* Add `Pager` and `Poller` interfaces to azcore
## 0.14.0 (2021-01-12)
### Features Added
* Accept zero-value options for default values
* Specify zero-value options structs to accept default values.
* Remove `DefaultXxxOptions()` methods.
* Do not silently change TryTimeout on negative values
* make per-try timeout opt-in
## 0.13.4 (2020-11-20)
### Features Added
* Include telemetry string in User Agent
## 0.13.3 (2020-11-20)
### Features Added
* Updating response body handling on `azcore.Response`
## 0.13.2 (2020-11-13)
### Features Added
* Remove implementation of stateless policies as first-class functions.
## 0.13.1 (2020-11-05)
### Features Added
* Add `Telemetry()` method to `azcore.Request()`
## 0.13.0 (2020-10-14)
### Features Added
* Rename `log` to `logger` to avoid name collision with the log package.
* Documentation improvements
* Simplified `DefaultHTTPClientTransport()` implementation
## 0.12.1 (2020-10-13)
### Features Added
* Update `internal` module dependence to `v0.5.0`
## 0.12.0 (2020-10-08)
### Features Added
* Removed storage specific content
* Removed internal content to prevent API clutter
* Refactored various policy options to conform with our options pattern
## 0.11.0 (2020-09-22)
### Features Added
* Removed `LogError` and `LogSlowResponse`.
* Renamed `options` in `RequestLogOptions`.
* Updated `NewRequestLogPolicy()` to follow standard pattern for options.
* Refactored `requestLogPolicy.Do()` per above changes.
* Cleaned up/added logging in retry policy.
* Export `NewResponseError()`
* Fix `RequestLogOptions` comment
## 0.10.1 (2020-09-17)
### Features Added
* Add default console logger
* Default console logger writes to stderr. To enable it, set env var `AZURE_SDK_GO_LOGGING` to the value 'all'.
* Added `Logger.Writef()` to reduce the need for `ShouldLog()` checks.
* Add `LogLongRunningOperation`
## 0.10.0 (2020-09-10)
### Features Added
* The `request` and `transport` interfaces have been refactored to align with the patterns in the standard library.
* `NewRequest()` now uses `http.NewRequestWithContext()` and performs additional validation, it also requires a context parameter.
* The `Policy` and `Transport` interfaces have had their context parameter removed as the context is associated with the underlying `http.Request`.
* `Pipeline.Do()` will validate the HTTP request before sending it through the pipeline, avoiding retries on a malformed request.
* The `Retrier` interface has been replaced with the `NonRetriableError` interface, and the retry policy updated to test for this.
* `Request.SetBody()` now requires a content type parameter for setting the request's MIME type.
* moved path concatenation into `JoinPaths()` func
## 0.9.6 (2020-08-18)
### Features Added
* Improvements to body download policy
* Always download the response body for error responses, i.e. HTTP status codes >= 400.
* Simplify variable declarations
## 0.9.5 (2020-08-11)
### Features Added
* Set the Content-Length header in `Request.SetBody`
## 0.9.4 (2020-08-03)
### Features Added
* Fix cancellation of per try timeout
* Per try timeout is used to ensure that an HTTP operation doesn't take too long, e.g. that a GET on some URL doesn't take an inordinant amount of time.
* Once the HTTP request returns, the per try timeout should be cancelled, not when the response has been read to completion.
* Do not drain response body if there are no more retries
* Do not retry non-idempotent operations when body download fails
## 0.9.3 (2020-07-28)
### Features Added
* Add support for custom HTTP request headers
* Inserts an internal policy into the pipeline that can extract HTTP header values from the caller's context, adding them to the request.
* Use `azcore.WithHTTPHeader` to add HTTP headers to a context.
* Remove method specific to Go 1.14
## 0.9.2 (2020-07-28)
### Features Added
* Omit read-only content from request payloads
* If any field in a payload's object graph contains `azure:"ro"`, make a clone of the object graph, omitting all fields with this annotation.
* Verify no fields were dropped
* Handle embedded struct types
* Added test for cloning by value
* Add messages to failures
## 0.9.1 (2020-07-22)
### Features Added
* Updated dependency on internal module to fix race condition.
## 0.9.0 (2020-07-09)
### Features Added
* Add `HTTPResponse` interface to be used by callers to access the raw HTTP response from an error in the event of an API call failure.
* Updated `sdk/internal` dependency to latest version.
* Rename package alias
## 0.8.2 (2020-06-29)
### Features Added
* Added missing documentation comments
### Bugs Fixed
* Fixed a bug in body download policy.
## 0.8.1 (2020-06-26)
### Features Added
* Miscellaneous clean-up reported by linters
## 0.8.0 (2020-06-01)
### Features Added
* Differentiate between standard and URL encoding.
## 0.7.1 (2020-05-27)
### Features Added
* Add support for for base64 encoding and decoding of payloads.
## 0.7.0 (2020-05-12)
### Features Added
* Change `RetryAfter()` to a function.
## 0.6.0 (2020-04-29)
### Features Added
* Updating `RetryAfter` to only return the detaion in the RetryAfter header
## 0.5.0 (2020-03-23)
### Features Added
* Export `TransportFunc`
### Breaking Changes
* Removed `IterationDone`
## 0.4.1 (2020-02-25)
### Features Added
* Ensure per-try timeout is properly cancelled
* Explicitly call cancel the per-try timeout when the response body has been read/closed by the body download policy.
* When the response body is returned to the caller for reading/closing, wrap it in a `responseBodyReader` that will cancel the timeout when the body is closed.
* `Logger.Should()` will return false if no listener is set.
## 0.4.0 (2020-02-18)
### Features Added
* Enable custom `RetryOptions` to be specified per API call
* Added `WithRetryOptions()` that adds a custom `RetryOptions` to the provided context, allowing custom settings per API call.
* Remove 429 from the list of default HTTP status codes for retry.
* Change StatusCodesForRetry to a slice so consumers can append to it.
* Added support for retry-after in HTTP-date format.
* Cleaned up some comments specific to storage.
* Remove `Request.SetQueryParam()`
* Renamed `MaxTries` to `MaxRetries`
## 0.3.0 (2020-01-16)
### Features Added
* Added `DefaultRetryOptions` to create initialized default options.
### Breaking Changes
* Removed `Response.CheckStatusCode()`
## 0.2.0 (2020-01-15)
### Features Added
* Add support for marshalling and unmarshalling JSON
* Removed `Response.Payload` field
* Exit early when unmarsahlling if there is no payload
## 0.1.0 (2020-01-10)
### Features Added
* Initial release

View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) Microsoft Corporation.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE

View file

@ -0,0 +1,39 @@
# Azure Core Client Module for Go
[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azcore)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore)
[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/go%20-%20azcore%20-%20ci?branchName=main)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=1843&branchName=main)
[![Code Coverage](https://img.shields.io/azure-devops/coverage/azure-sdk/public/1843/main)](https://img.shields.io/azure-devops/coverage/azure-sdk/public/1843/main)
The `azcore` module provides a set of common interfaces and types for Go SDK client modules.
These modules follow the [Azure SDK Design Guidelines for Go](https://azure.github.io/azure-sdk/golang_introduction.html).
## Getting started
This project uses [Go modules](https://github.com/golang/go/wiki/Modules) for versioning and dependency management.
Typically, you will not need to explicitly install `azcore` as it will be installed as a client module dependency.
To add the latest version to your `go.mod` file, execute the following command.
```bash
go get github.com/Azure/azure-sdk-for-go/sdk/azcore
```
General documentation and examples can be found on [pkg.go.dev](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore).
## Contributing
This project welcomes contributions and suggestions. Most contributions require
you to agree to a Contributor License Agreement (CLA) declaring that you have
the right to, and actually do, grant us the rights to use your contribution.
For details, visit [https://cla.microsoft.com](https://cla.microsoft.com).
When you submit a pull request, a CLA-bot will automatically determine whether
you need to provide a CLA and decorate the PR appropriately (e.g., label,
comment). Simply follow the instructions provided by the bot. You will only
need to do this once across all repos using our CLA.
This project has adopted the
[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information, see the
[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any
additional questions or comments.

View file

@ -0,0 +1,29 @@
# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file.
trigger:
branches:
include:
- main
- feature/*
- hotfix/*
- release/*
paths:
include:
- sdk/azcore/
- eng/
pr:
branches:
include:
- main
- feature/*
- hotfix/*
- release/*
paths:
include:
- sdk/azcore/
- eng/
stages:
- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml
parameters:
ServiceDirectory: azcore

View file

@ -0,0 +1,44 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package cloud
var (
// AzureChina contains configuration for Azure China.
AzureChina = Configuration{
ActiveDirectoryAuthorityHost: "https://login.chinacloudapi.cn/", Services: map[ServiceName]ServiceConfiguration{},
}
// AzureGovernment contains configuration for Azure Government.
AzureGovernment = Configuration{
ActiveDirectoryAuthorityHost: "https://login.microsoftonline.us/", Services: map[ServiceName]ServiceConfiguration{},
}
// AzurePublic contains configuration for Azure Public Cloud.
AzurePublic = Configuration{
ActiveDirectoryAuthorityHost: "https://login.microsoftonline.com/", Services: map[ServiceName]ServiceConfiguration{},
}
)
// ServiceName identifies a cloud service.
type ServiceName string
// ResourceManager is a global constant identifying Azure Resource Manager.
const ResourceManager ServiceName = "resourceManager"
// ServiceConfiguration configures a specific cloud service such as Azure Resource Manager.
type ServiceConfiguration struct {
// Audience is the audience the client will request for its access tokens.
Audience string
// Endpoint is the service's base URL.
Endpoint string
}
// Configuration configures a cloud.
type Configuration struct {
// ActiveDirectoryAuthorityHost is the base URL of the cloud's Azure Active Directory.
ActiveDirectoryAuthorityHost string
// Services contains configuration for the cloud's services.
Services map[ServiceName]ServiceConfiguration
}

View file

@ -0,0 +1,53 @@
//go:build go1.16
// +build go1.16
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
/*
Package cloud implements a configuration API for applications deployed to sovereign or private Azure clouds.
Azure SDK client configuration defaults are appropriate for Azure Public Cloud (sometimes referred to as
"Azure Commercial" or simply "Microsoft Azure"). This package enables applications deployed to other
Azure Clouds to configure clients appropriately.
This package contains predefined configuration for well-known sovereign clouds such as Azure Government and
Azure China. Azure SDK clients accept this configuration via the Cloud field of azcore.ClientOptions. For
example, configuring a credential and ARM client for Azure Government:
opts := azcore.ClientOptions{Cloud: cloud.AzureGovernment}
cred, err := azidentity.NewDefaultAzureCredential(
&azidentity.DefaultAzureCredentialOptions{ClientOptions: opts},
)
handle(err)
client, err := armsubscription.NewClient(
cred, &arm.ClientOptions{ClientOptions: opts},
)
handle(err)
Applications deployed to a private cloud such as Azure Stack create a Configuration object with
appropriate values:
c := cloud.Configuration{
ActiveDirectoryAuthorityHost: "https://...",
Services: map[cloud.ServiceName]cloud.ServiceConfiguration{
cloud.ResourceManager: {
Audience: "...",
Endpoint: "https://...",
},
},
}
opts := azcore.ClientOptions{Cloud: c}
cred, err := azidentity.NewDefaultAzureCredential(
&azidentity.DefaultAzureCredentialOptions{ClientOptions: opts},
)
handle(err)
client, err := armsubscription.NewClient(
cred, &arm.ClientOptions{ClientOptions: opts},
)
handle(err)
*/
package cloud

View file

@ -0,0 +1,75 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azcore
import (
"context"
"reflect"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
)
// AccessToken represents an Azure service bearer access token with expiry information.
type AccessToken struct {
Token string
ExpiresOn time.Time
}
// TokenCredential represents a credential capable of providing an OAuth token.
type TokenCredential interface {
// GetToken requests an access token for the specified set of scopes.
GetToken(ctx context.Context, options policy.TokenRequestOptions) (AccessToken, error)
}
// holds sentinel values used to send nulls
var nullables map[reflect.Type]interface{} = map[reflect.Type]interface{}{}
// NullValue is used to send an explicit 'null' within a request.
// This is typically used in JSON-MERGE-PATCH operations to delete a value.
func NullValue[T any]() T {
t := shared.TypeOfT[T]()
v, found := nullables[t]
if !found {
var o reflect.Value
if k := t.Kind(); k == reflect.Map {
o = reflect.MakeMap(t)
} else if k == reflect.Slice {
// empty slices appear to all point to the same data block
// which causes comparisons to become ambiguous. so we create
// a slice with len/cap of one which ensures a unique address.
o = reflect.MakeSlice(t, 1, 1)
} else {
o = reflect.New(t.Elem())
}
v = o.Interface()
nullables[t] = v
}
// return the sentinel object
return v.(T)
}
// IsNullValue returns true if the field contains a null sentinel value.
// This is used by custom marshallers to properly encode a null value.
func IsNullValue[T any](v T) bool {
// see if our map has a sentinel object for this *T
t := reflect.TypeOf(v)
if o, found := nullables[t]; found {
o1 := reflect.ValueOf(o)
v1 := reflect.ValueOf(v)
// we found it; return true if v points to the sentinel object.
// NOTE: maps and slices can only be compared to nil, else you get
// a runtime panic. so we compare addresses instead.
return o1.Pointer() == v1.Pointer()
}
// no sentinel object for this *t
return false
}
// ClientOptions contains configuration settings for a client's pipeline.
type ClientOptions = policy.ClientOptions

View file

@ -0,0 +1,257 @@
//go:build go1.18
// +build go1.18
// Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT
// license that can be found in the LICENSE file.
/*
Package azcore implements an HTTP request/response middleware pipeline used by Azure SDK clients.
The middleware consists of three components.
- One or more Policy instances.
- A Transporter instance.
- A Pipeline instance that combines the Policy and Transporter instances.
Implementing the Policy Interface
A Policy can be implemented in two ways; as a first-class function for a stateless Policy, or as
a method on a type for a stateful Policy. Note that HTTP requests made via the same pipeline share
the same Policy instances, so if a Policy mutates its state it MUST be properly synchronized to
avoid race conditions.
A Policy's Do method is called when an HTTP request wants to be sent over the network. The Do method can
perform any operation(s) it desires. For example, it can log the outgoing request, mutate the URL, headers,
and/or query parameters, inject a failure, etc. Once the Policy has successfully completed its request
work, it must call the Next() method on the *policy.Request instance in order to pass the request to the
next Policy in the chain.
When an HTTP response comes back, the Policy then gets a chance to process the response/error. The Policy instance
can log the response, retry the operation if it failed due to a transient error or timeout, unmarshal the response
body, etc. Once the Policy has successfully completed its response work, it must return the *http.Response
and error instances to its caller.
Template for implementing a stateless Policy:
type policyFunc func(*policy.Request) (*http.Response, error)
// Do implements the Policy interface on policyFunc.
func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) {
return pf(req)
}
func NewMyStatelessPolicy() policy.Policy {
return policyFunc(func(req *policy.Request) (*http.Response, error) {
// TODO: mutate/process Request here
// forward Request to next Policy & get Response/error
resp, err := req.Next()
// TODO: mutate/process Response/error here
// return Response/error to previous Policy
return resp, err
})
}
Template for implementing a stateful Policy:
type MyStatefulPolicy struct {
// TODO: add configuration/setting fields here
}
// TODO: add initialization args to NewMyStatefulPolicy()
func NewMyStatefulPolicy() policy.Policy {
return &MyStatefulPolicy{
// TODO: initialize configuration/setting fields here
}
}
func (p *MyStatefulPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
// TODO: mutate/process Request here
// forward Request to next Policy & get Response/error
resp, err := req.Next()
// TODO: mutate/process Response/error here
// return Response/error to previous Policy
return resp, err
}
Implementing the Transporter Interface
The Transporter interface is responsible for sending the HTTP request and returning the corresponding
HTTP response or error. The Transporter is invoked by the last Policy in the chain. The default Transporter
implementation uses a shared http.Client from the standard library.
The same stateful/stateless rules for Policy implementations apply to Transporter implementations.
Using Policy and Transporter Instances Via a Pipeline
To use the Policy and Transporter instances, an application passes them to the runtime.NewPipeline function.
func NewPipeline(transport Transporter, policies ...Policy) Pipeline
The specified Policy instances form a chain and are invoked in the order provided to NewPipeline
followed by the Transporter.
Once the Pipeline has been created, create a runtime.Request instance and pass it to Pipeline's Do method.
func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error)
func (p Pipeline) Do(req *Request) (*http.Request, error)
The Pipeline.Do method sends the specified Request through the chain of Policy and Transporter
instances. The response/error is then sent through the same chain of Policy instances in reverse
order. For example, assuming there are Policy types PolicyA, PolicyB, and PolicyC along with
TransportA.
pipeline := NewPipeline(TransportA, PolicyA, PolicyB, PolicyC)
The flow of Request and Response looks like the following:
policy.Request -> PolicyA -> PolicyB -> PolicyC -> TransportA -----+
|
HTTP(S) endpoint
|
caller <--------- PolicyA <- PolicyB <- PolicyC <- http.Response-+
Creating a Request Instance
The Request instance passed to Pipeline's Do method is a wrapper around an *http.Request. It also
contains some internal state and provides various convenience methods. You create a Request instance
by calling the runtime.NewRequest function:
func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error)
If the Request should contain a body, call the SetBody method.
func (req *Request) SetBody(body ReadSeekCloser, contentType string) error
A seekable stream is required so that upon retry, the retry Policy instance can seek the stream
back to the beginning before retrying the network request and re-uploading the body.
Sending an Explicit Null
Operations like JSON-MERGE-PATCH send a JSON null to indicate a value should be deleted.
{
"delete-me": null
}
This requirement conflicts with the SDK's default marshalling that specifies "omitempty" as
a means to resolve the ambiguity between a field to be excluded and its zero-value.
type Widget struct {
Name *string `json:",omitempty"`
Count *int `json:",omitempty"`
}
In the above example, Name and Count are defined as pointer-to-type to disambiguate between
a missing value (nil) and a zero-value (0) which might have semantic differences.
In a PATCH operation, any fields left as nil are to have their values preserved. When updating
a Widget's count, one simply specifies the new value for Count, leaving Name nil.
To fulfill the requirement for sending a JSON null, the NullValue() function can be used.
w := Widget{
Count: azcore.NullValue[*int](),
}
This sends an explict "null" for Count, indicating that any current value for Count should be deleted.
Processing the Response
When the HTTP response is received, the *http.Response is returned directly. Each Policy instance
can inspect/mutate the *http.Response.
Built-in Logging
To enable logging, set environment variable AZURE_SDK_GO_LOGGING to "all" before executing your program.
By default the logger writes to stderr. This can be customized by calling log.SetListener, providing
a callback that writes to the desired location. Any custom logging implementation MUST provide its
own synchronization to handle concurrent invocations.
See the docs for the log package for further details.
Pageable Operations
Pageable operations return potentially large data sets spread over multiple GET requests. The result of
each GET is a "page" of data consisting of a slice of items.
Pageable operations can be identified by their New*Pager naming convention and return type of *runtime.Pager[T].
func (c *WidgetClient) NewListWidgetsPager(o *Options) *runtime.Pager[PageResponse]
The call to WidgetClient.NewListWidgetsPager() returns an instance of *runtime.Pager[T] for fetching pages
and determining if there are more pages to fetch. No IO calls are made until the NextPage() method is invoked.
pager := widgetClient.NewListWidgetsPager(nil)
for pager.More() {
page, err := pager.NextPage(context.TODO())
// handle err
for _, widget := range page.Values {
// process widget
}
}
Long-Running Operations
Long-running operations (LROs) are operations consisting of an initial request to start the operation followed
by polling to determine when the operation has reached a terminal state. An LRO's terminal state is one
of the following values.
* Succeeded - the LRO completed successfully
* Failed - the LRO failed to complete
* Canceled - the LRO was canceled
LROs can be identified by their Begin* prefix and their return type of *runtime.Poller[T].
func (c *WidgetClient) BeginCreateOrUpdate(ctx context.Context, w Widget, o *Options) (*runtime.Poller[Response], error)
When a call to WidgetClient.BeginCreateOrUpdate() returns a nil error, it means that the LRO has started.
It does _not_ mean that the widget has been created or updated (or failed to be created/updated).
The *runtime.Poller[T] provides APIs for determining the state of the LRO. To wait for the LRO to complete,
call the PollUntilDone() method.
poller, err := widgetClient.BeginCreateOrUpdate(context.TODO(), Widget{}, nil)
// handle err
result, err := poller.PollUntilDone(context.TODO(), nil)
// handle err
// use result
The call to PollUntilDone() will block the current goroutine until the LRO has reached a terminal state or the
context is canceled/timed out.
Note that LROs can take anywhere from several seconds to several minutes. The duration is operation-dependent. Due to
this variant behavior, pollers do _not_ have a preconfigured time-out. Use a context with the appropriate cancellation
mechanism as required.
Resume Tokens
Pollers provide the ability to serialize their state into a "resume token" which can be used by another process to
recreate the poller. This is achieved via the runtime.Poller[T].ResumeToken() method.
token, err := poller.ResumeToken()
// handle error
Note that a token can only be obtained for a poller that's in a non-terminal state. Also note that any subsequent calls
to poller.Poll() might change the poller's state. In this case, a new token should be created.
After the token has been obtained, it can be used to recreate an instance of the originating poller.
poller, err := widgetClient.BeginCreateOrUpdate(nil, Widget{}, &Options{
ResumeToken: token,
})
When resuming a poller, no IO is performed, and zero-value arguments can be used for everything but the Options.ResumeToken.
Resume tokens are unique per service client and operation. Attempting to resume a poller for LRO BeginB() with a token from LRO
BeginA() will result in an error.
*/
package azcore

View file

@ -0,0 +1,14 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azcore
import "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
// ResponseError is returned when a request is made to a service and
// the service returns a non-success HTTP status code.
// Use errors.As() to access this type in the error chain.
type ResponseError = exported.ResponseError

View file

@ -0,0 +1,48 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azcore
import (
"strings"
)
// ETag is a property used for optimistic concurrency during updates
// ETag is a validator based on https://tools.ietf.org/html/rfc7232#section-2.3.2
// An ETag can be empty ("").
type ETag string
// ETagAny is an ETag that represents everything, the value is "*"
const ETagAny ETag = "*"
// Equals does a strong comparison of two ETags. Equals returns true when both
// ETags are not weak and the values of the underlying strings are equal.
func (e ETag) Equals(other ETag) bool {
return !e.IsWeak() && !other.IsWeak() && e == other
}
// WeakEquals does a weak comparison of two ETags. Two ETags are equivalent if their opaque-tags match
// character-by-character, regardless of either or both being tagged as "weak".
func (e ETag) WeakEquals(other ETag) bool {
getStart := func(e1 ETag) int {
if e1.IsWeak() {
return 2
}
return 0
}
aStart := getStart(e)
bStart := getStart(other)
aVal := e[aStart:]
bVal := other[bStart:]
return aVal == bVal
}
// IsWeak specifies whether the ETag is strong or weak.
func (e ETag) IsWeak() bool {
return len(e) >= 4 && strings.HasPrefix(string(e), "W/\"") && strings.HasSuffix(string(e), "\"")
}

View file

@ -0,0 +1,61 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package exported
import (
"io"
"io/ioutil"
"net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
)
type nopCloser struct {
io.ReadSeeker
}
func (n nopCloser) Close() error {
return nil
}
// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker.
// Exported as streaming.NopCloser().
func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser {
return nopCloser{rs}
}
// HasStatusCode returns true if the Response's status code is one of the specified values.
// Exported as runtime.HasStatusCode().
func HasStatusCode(resp *http.Response, statusCodes ...int) bool {
if resp == nil {
return false
}
for _, sc := range statusCodes {
if resp.StatusCode == sc {
return true
}
}
return false
}
// Payload reads and returns the response body or an error.
// On a successful read, the response body is cached.
// Subsequent reads will access the cached value.
// Exported as runtime.Payload().
func Payload(resp *http.Response) ([]byte, error) {
// r.Body won't be a nopClosingBytesReader if downloading was skipped
if buf, ok := resp.Body.(*shared.NopClosingBytesReader); ok {
return buf.Bytes(), nil
}
bytesBody, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
return nil, err
}
resp.Body = shared.NewNopClosingBytesReader(bytesBody)
return bytesBody, nil
}

View file

@ -0,0 +1,97 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package exported
import (
"errors"
"fmt"
"net/http"
"golang.org/x/net/http/httpguts"
)
// Policy represents an extensibility point for the Pipeline that can mutate the specified
// Request and react to the received Response.
// Exported as policy.Policy.
type Policy interface {
// Do applies the policy to the specified Request. When implementing a Policy, mutate the
// request before calling req.Next() to move on to the next policy, and respond to the result
// before returning to the caller.
Do(req *Request) (*http.Response, error)
}
// Pipeline represents a primitive for sending HTTP requests and receiving responses.
// Its behavior can be extended by specifying policies during construction.
// Exported as runtime.Pipeline.
type Pipeline struct {
policies []Policy
}
// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses.
// Exported as policy.Transporter.
type Transporter interface {
// Do sends the HTTP request and returns the HTTP response or error.
Do(req *http.Request) (*http.Response, error)
}
// used to adapt a TransportPolicy to a Policy
type transportPolicy struct {
trans Transporter
}
func (tp transportPolicy) Do(req *Request) (*http.Response, error) {
if tp.trans == nil {
return nil, errors.New("missing transporter")
}
resp, err := tp.trans.Do(req.Raw())
if err != nil {
return nil, err
} else if resp == nil {
// there was no response and no error (rare but can happen)
// this ensures the retry policy will retry the request
return nil, errors.New("received nil response")
}
return resp, nil
}
// NewPipeline creates a new Pipeline object from the specified Policies.
// Not directly exported, but used as part of runtime.NewPipeline().
func NewPipeline(transport Transporter, policies ...Policy) Pipeline {
// transport policy must always be the last in the slice
policies = append(policies, transportPolicy{trans: transport})
return Pipeline{
policies: policies,
}
}
// Do is called for each and every HTTP request. It passes the request through all
// the Policy objects (which can transform the Request's URL/query parameters/headers)
// and ultimately sends the transformed HTTP request over the network.
func (p Pipeline) Do(req *Request) (*http.Response, error) {
if req == nil {
return nil, errors.New("request cannot be nil")
}
// check copied from Transport.roundTrip()
for k, vv := range req.Raw().Header {
if !httpguts.ValidHeaderFieldName(k) {
if req.Raw().Body != nil {
req.Raw().Body.Close()
}
return nil, fmt.Errorf("invalid header field name %q", k)
}
for _, v := range vv {
if !httpguts.ValidHeaderFieldValue(v) {
if req.Raw().Body != nil {
req.Raw().Body.Close()
}
return nil, fmt.Errorf("invalid header field value %q for key %v", v, k)
}
}
}
req.policies = p.policies
return req.Next()
}

View file

@ -0,0 +1,156 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package exported
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"reflect"
"strconv"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
)
// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline.
// Don't use this type directly, use NewRequest() instead.
// Exported as policy.Request.
type Request struct {
req *http.Request
body io.ReadSeekCloser
policies []Policy
values opValues
}
type opValues map[reflect.Type]interface{}
// Set adds/changes a value
func (ov opValues) set(value interface{}) {
ov[reflect.TypeOf(value)] = value
}
// Get looks for a value set by SetValue first
func (ov opValues) get(value interface{}) bool {
v, ok := ov[reflect.ValueOf(value).Elem().Type()]
if ok {
reflect.ValueOf(value).Elem().Set(reflect.ValueOf(v))
}
return ok
}
// NewRequest creates a new Request with the specified input.
// Exported as runtime.NewRequest().
func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) {
req, err := http.NewRequestWithContext(ctx, httpMethod, endpoint, nil)
if err != nil {
return nil, err
}
if req.URL.Host == "" {
return nil, errors.New("no Host in request URL")
}
if !(req.URL.Scheme == "http" || req.URL.Scheme == "https") {
return nil, fmt.Errorf("unsupported protocol scheme %s", req.URL.Scheme)
}
return &Request{req: req}, nil
}
// Body returns the original body specified when the Request was created.
func (req *Request) Body() io.ReadSeekCloser {
return req.body
}
// Raw returns the underlying HTTP request.
func (req *Request) Raw() *http.Request {
return req.req
}
// Next calls the next policy in the pipeline.
// If there are no more policies, nil and an error are returned.
// This method is intended to be called from pipeline policies.
// To send a request through a pipeline call Pipeline.Do().
func (req *Request) Next() (*http.Response, error) {
if len(req.policies) == 0 {
return nil, errors.New("no more policies")
}
nextPolicy := req.policies[0]
nextReq := *req
nextReq.policies = nextReq.policies[1:]
return nextPolicy.Do(&nextReq)
}
// SetOperationValue adds/changes a mutable key/value associated with a single operation.
func (req *Request) SetOperationValue(value interface{}) {
if req.values == nil {
req.values = opValues{}
}
req.values.set(value)
}
// OperationValue looks for a value set by SetOperationValue().
func (req *Request) OperationValue(value interface{}) bool {
if req.values == nil {
return false
}
return req.values.get(value)
}
// SetBody sets the specified ReadSeekCloser as the HTTP request body.
func (req *Request) SetBody(body io.ReadSeekCloser, contentType string) error {
// Set the body and content length.
size, err := body.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size
if err != nil {
return err
}
if size == 0 {
body.Close()
return nil
}
_, err = body.Seek(0, io.SeekStart)
if err != nil {
return err
}
req.Raw().GetBody = func() (io.ReadCloser, error) {
_, err := body.Seek(0, io.SeekStart) // Seek back to the beginning of the stream
return body, err
}
// keep a copy of the original body. this is to handle cases
// where req.Body is replaced, e.g. httputil.DumpRequest and friends.
req.body = body
req.req.Body = body
req.req.ContentLength = size
req.req.Header.Set(shared.HeaderContentType, contentType)
req.req.Header.Set(shared.HeaderContentLength, strconv.FormatInt(size, 10))
return nil
}
// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation.
func (req *Request) RewindBody() error {
if req.body != nil {
// Reset the stream back to the beginning and restore the body
_, err := req.body.Seek(0, io.SeekStart)
req.req.Body = req.body
return err
}
return nil
}
// Close closes the request body.
func (req *Request) Close() error {
if req.body == nil {
return nil
}
return req.body.Close()
}
// Clone returns a deep copy of the request with its context changed to ctx.
func (req *Request) Clone(ctx context.Context) *Request {
r2 := *req
r2.req = req.req.Clone(ctx)
return &r2
}

View file

@ -0,0 +1,142 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package exported
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"regexp"
)
// NewResponseError creates a new *ResponseError from the provided HTTP response.
// Exported as runtime.NewResponseError().
func NewResponseError(resp *http.Response) error {
respErr := &ResponseError{
StatusCode: resp.StatusCode,
RawResponse: resp,
}
// prefer the error code in the response header
if ec := resp.Header.Get("x-ms-error-code"); ec != "" {
respErr.ErrorCode = ec
return respErr
}
// if we didn't get x-ms-error-code, check in the response body
body, err := Payload(resp)
if err != nil {
return err
}
if len(body) > 0 {
if code := extractErrorCodeJSON(body); code != "" {
respErr.ErrorCode = code
} else if code := extractErrorCodeXML(body); code != "" {
respErr.ErrorCode = code
}
}
return respErr
}
func extractErrorCodeJSON(body []byte) string {
var rawObj map[string]interface{}
if err := json.Unmarshal(body, &rawObj); err != nil {
// not a JSON object
return ""
}
// check if this is a wrapped error, i.e. { "error": { ... } }
// if so then unwrap it
if wrapped, ok := rawObj["error"]; ok {
unwrapped, ok := wrapped.(map[string]interface{})
if !ok {
return ""
}
rawObj = unwrapped
} else if wrapped, ok := rawObj["odata.error"]; ok {
// check if this a wrapped odata error, i.e. { "odata.error": { ... } }
unwrapped, ok := wrapped.(map[string]any)
if !ok {
return ""
}
rawObj = unwrapped
}
// now check for the error code
code, ok := rawObj["code"]
if !ok {
return ""
}
codeStr, ok := code.(string)
if !ok {
return ""
}
return codeStr
}
func extractErrorCodeXML(body []byte) string {
// regular expression is much easier than dealing with the XML parser
rx := regexp.MustCompile(`<(?:\w+:)?[c|C]ode>\s*(\w+)\s*<\/(?:\w+:)?[c|C]ode>`)
res := rx.FindStringSubmatch(string(body))
if len(res) != 2 {
return ""
}
// first submatch is the entire thing, second one is the captured error code
return res[1]
}
// ResponseError is returned when a request is made to a service and
// the service returns a non-success HTTP status code.
// Use errors.As() to access this type in the error chain.
// Exported as azcore.ResponseError.
type ResponseError struct {
// ErrorCode is the error code returned by the resource provider if available.
ErrorCode string
// StatusCode is the HTTP status code as defined in https://pkg.go.dev/net/http#pkg-constants.
StatusCode int
// RawResponse is the underlying HTTP response.
RawResponse *http.Response
}
// Error implements the error interface for type ResponseError.
// Note that the message contents are not contractual and can change over time.
func (e *ResponseError) Error() string {
// write the request method and URL with response status code
msg := &bytes.Buffer{}
fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path)
fmt.Fprintln(msg, "--------------------------------------------------------------------------------")
fmt.Fprintf(msg, "RESPONSE %d: %s\n", e.RawResponse.StatusCode, e.RawResponse.Status)
if e.ErrorCode != "" {
fmt.Fprintf(msg, "ERROR CODE: %s\n", e.ErrorCode)
} else {
fmt.Fprintln(msg, "ERROR CODE UNAVAILABLE")
}
fmt.Fprintln(msg, "--------------------------------------------------------------------------------")
body, err := Payload(e.RawResponse)
if err != nil {
// this really shouldn't fail at this point as the response
// body is already cached (it was read in NewResponseError)
fmt.Fprintf(msg, "Error reading response body: %v", err)
} else if len(body) > 0 {
if err := json.Indent(msg, body, "", " "); err != nil {
// failed to pretty-print so just dump it verbatim
fmt.Fprint(msg, string(body))
}
// the standard library doesn't have a pretty-printer for XML
fmt.Fprintln(msg)
} else {
fmt.Fprintln(msg, "Response contained no body")
}
fmt.Fprintln(msg, "--------------------------------------------------------------------------------")
return msg.String()
}

View file

@ -0,0 +1,38 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// This is an internal helper package to combine the complete logging APIs.
package log
import (
azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log"
"github.com/Azure/azure-sdk-for-go/sdk/internal/log"
)
type Event = log.Event
const (
EventRequest = azlog.EventRequest
EventResponse = azlog.EventResponse
EventRetryPolicy = azlog.EventRetryPolicy
EventLRO = azlog.EventLRO
)
func Write(cls log.Event, msg string) {
log.Write(cls, msg)
}
func Writef(cls log.Event, format string, a ...interface{}) {
log.Writef(cls, format, a...)
}
func SetListener(lst func(Event, string)) {
log.SetListener(lst)
}
func Should(cls log.Event) bool {
return log.Should(cls)
}

View file

@ -0,0 +1,147 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package async
import (
"context"
"errors"
"fmt"
"net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
)
// see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/async-api-reference.md
// Applicable returns true if the LRO is using Azure-AsyncOperation.
func Applicable(resp *http.Response) bool {
return resp.Header.Get(shared.HeaderAzureAsync) != ""
}
// CanResume returns true if the token can rehydrate this poller type.
func CanResume(token map[string]interface{}) bool {
_, ok := token["asyncURL"]
return ok
}
// Poller is an LRO poller that uses the Azure-AsyncOperation pattern.
type Poller[T any] struct {
pl exported.Pipeline
resp *http.Response
// The URL from Azure-AsyncOperation header.
AsyncURL string `json:"asyncURL"`
// The URL from Location header.
LocURL string `json:"locURL"`
// The URL from the initial LRO request.
OrigURL string `json:"origURL"`
// The HTTP method from the initial LRO request.
Method string `json:"method"`
// The value of final-state-via from swagger, can be the empty string.
FinalState pollers.FinalStateVia `json:"finalState"`
// The LRO's current state.
CurState string `json:"state"`
}
// New creates a new Poller from the provided initial response and final-state type.
// Pass nil for response to create an empty Poller for rehydration.
func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) {
if resp == nil {
log.Write(log.EventLRO, "Resuming Azure-AsyncOperation poller.")
return &Poller[T]{pl: pl}, nil
}
log.Write(log.EventLRO, "Using Azure-AsyncOperation poller.")
asyncURL := resp.Header.Get(shared.HeaderAzureAsync)
if asyncURL == "" {
return nil, errors.New("response is missing Azure-AsyncOperation header")
}
if !pollers.IsValidURL(asyncURL) {
return nil, fmt.Errorf("invalid polling URL %s", asyncURL)
}
p := &Poller[T]{
pl: pl,
resp: resp,
AsyncURL: asyncURL,
LocURL: resp.Header.Get(shared.HeaderLocation),
OrigURL: resp.Request.URL.String(),
Method: resp.Request.Method,
FinalState: finalState,
CurState: pollers.StatusInProgress,
}
return p, nil
}
// Done returns true if the LRO is in a terminal state.
func (p *Poller[T]) Done() bool {
return pollers.IsTerminalState(p.CurState)
}
// Poll retrieves the current state of the LRO.
func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
err := pollers.PollHelper(ctx, p.AsyncURL, p.pl, func(resp *http.Response) (string, error) {
state, err := pollers.GetStatus(resp)
if err != nil {
return "", err
} else if state == "" {
return "", errors.New("the response did not contain a status")
}
p.resp = resp
p.CurState = state
return p.CurState, nil
})
if err != nil {
return nil, err
}
return p.resp, nil
}
func (p *Poller[T]) Result(ctx context.Context, out *T) error {
if p.resp.StatusCode == http.StatusNoContent {
return nil
} else if pollers.Failed(p.CurState) {
return exported.NewResponseError(p.resp)
}
var req *exported.Request
var err error
if p.Method == http.MethodPatch || p.Method == http.MethodPut {
// for PATCH and PUT, the final GET is on the original resource URL
req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL)
} else if p.Method == http.MethodPost {
if p.FinalState == pollers.FinalStateViaAzureAsyncOp {
// no final GET required
} else if p.FinalState == pollers.FinalStateViaOriginalURI {
req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL)
} else if p.LocURL != "" {
// ideally FinalState would be set to "location" but it isn't always.
// must check last due to more permissive condition.
req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL)
}
}
if err != nil {
return err
}
// if a final GET request has been created, execute it
if req != nil {
resp, err := p.pl.Do(req)
if err != nil {
return err
}
p.resp = resp
}
return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out)
}

View file

@ -0,0 +1,130 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package body
import (
"context"
"errors"
"net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
)
// Kind is the identifier of this type in a resume token.
const kind = "body"
// Applicable returns true if the LRO is using no headers, just provisioning state.
// This is only applicable to PATCH and PUT methods and assumes no polling headers.
func Applicable(resp *http.Response) bool {
// we can't check for absense of headers due to some misbehaving services
// like redis that return a Location header but don't actually use that protocol
return resp.Request.Method == http.MethodPatch || resp.Request.Method == http.MethodPut
}
// CanResume returns true if the token can rehydrate this poller type.
func CanResume(token map[string]interface{}) bool {
t, ok := token["type"]
if !ok {
return false
}
tt, ok := t.(string)
if !ok {
return false
}
return tt == kind
}
// Poller is an LRO poller that uses the Body pattern.
type Poller[T any] struct {
pl exported.Pipeline
resp *http.Response
// The poller's type, used for resume token processing.
Type string `json:"type"`
// The URL for polling.
PollURL string `json:"pollURL"`
// The LRO's current state.
CurState string `json:"state"`
}
// New creates a new Poller from the provided initial response.
// Pass nil for response to create an empty Poller for rehydration.
func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) {
if resp == nil {
log.Write(log.EventLRO, "Resuming Body poller.")
return &Poller[T]{pl: pl}, nil
}
log.Write(log.EventLRO, "Using Body poller.")
p := &Poller[T]{
pl: pl,
resp: resp,
Type: kind,
PollURL: resp.Request.URL.String(),
}
// default initial state to InProgress. depending on the HTTP
// status code and provisioning state, we might change the value.
curState := pollers.StatusInProgress
provState, err := pollers.GetProvisioningState(resp)
if err != nil && !errors.Is(err, pollers.ErrNoBody) {
return nil, err
}
if resp.StatusCode == http.StatusCreated && provState != "" {
// absense of provisioning state is ok for a 201, means the operation is in progress
curState = provState
} else if resp.StatusCode == http.StatusOK {
if provState != "" {
curState = provState
} else if provState == "" {
// for a 200, absense of provisioning state indicates success
curState = pollers.StatusSucceeded
}
} else if resp.StatusCode == http.StatusNoContent {
curState = pollers.StatusSucceeded
}
p.CurState = curState
return p, nil
}
func (p *Poller[T]) Done() bool {
return pollers.IsTerminalState(p.CurState)
}
func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) {
if resp.StatusCode == http.StatusNoContent {
p.resp = resp
p.CurState = pollers.StatusSucceeded
return p.CurState, nil
}
state, err := pollers.GetProvisioningState(resp)
if errors.Is(err, pollers.ErrNoBody) {
// a missing response body in non-204 case is an error
return "", err
} else if state == "" {
// a response body without provisioning state is considered terminal success
state = pollers.StatusSucceeded
} else if err != nil {
return "", err
}
p.resp = resp
p.CurState = state
return p.CurState, nil
})
if err != nil {
return nil, err
}
return p.resp, nil
}
func (p *Poller[T]) Result(ctx context.Context, out *T) error {
return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out)
}

View file

@ -0,0 +1,111 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package loc
import (
"context"
"errors"
"fmt"
"net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
)
// Kind is the identifier of this type in a resume token.
const kind = "loc"
// Applicable returns true if the LRO is using Location.
func Applicable(resp *http.Response) bool {
return resp.Header.Get(shared.HeaderLocation) != ""
}
// CanResume returns true if the token can rehydrate this poller type.
func CanResume(token map[string]interface{}) bool {
t, ok := token["type"]
if !ok {
return false
}
tt, ok := t.(string)
if !ok {
return false
}
return tt == kind
}
// Poller is an LRO poller that uses the Location pattern.
type Poller[T any] struct {
pl exported.Pipeline
resp *http.Response
Type string `json:"type"`
PollURL string `json:"pollURL"`
CurState string `json:"state"`
}
// New creates a new Poller from the provided initial response.
// Pass nil for response to create an empty Poller for rehydration.
func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) {
if resp == nil {
log.Write(log.EventLRO, "Resuming Location poller.")
return &Poller[T]{pl: pl}, nil
}
log.Write(log.EventLRO, "Using Location poller.")
locURL := resp.Header.Get(shared.HeaderLocation)
if locURL == "" {
return nil, errors.New("response is missing Location header")
}
if !pollers.IsValidURL(locURL) {
return nil, fmt.Errorf("invalid polling URL %s", locURL)
}
return &Poller[T]{
pl: pl,
resp: resp,
Type: kind,
PollURL: locURL,
CurState: pollers.StatusInProgress,
}, nil
}
func (p *Poller[T]) Done() bool {
return pollers.IsTerminalState(p.CurState)
}
func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) {
// location polling can return an updated polling URL
if h := resp.Header.Get(shared.HeaderLocation); h != "" {
p.PollURL = h
}
// if provisioning state is available, use that. this is only
// for some ARM LRO scenarios (e.g. DELETE with a Location header)
// so if it's missing then use HTTP status code.
provState, _ := pollers.GetProvisioningState(resp)
p.resp = resp
if provState != "" {
p.CurState = provState
} else if resp.StatusCode == http.StatusAccepted {
p.CurState = pollers.StatusInProgress
} else if resp.StatusCode > 199 && resp.StatusCode < 300 {
// any 2xx other than a 202 indicates success
p.CurState = pollers.StatusSucceeded
} else {
p.CurState = pollers.StatusFailed
}
return p.CurState, nil
})
if err != nil {
return nil, err
}
return p.resp, nil
}
func (p *Poller[T]) Result(ctx context.Context, out *T) error {
return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out)
}

View file

@ -0,0 +1,140 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package op
import (
"context"
"errors"
"fmt"
"net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
)
// Applicable returns true if the LRO is using Operation-Location.
func Applicable(resp *http.Response) bool {
return resp.Header.Get(shared.HeaderOperationLocation) != ""
}
// CanResume returns true if the token can rehydrate this poller type.
func CanResume(token map[string]interface{}) bool {
_, ok := token["oplocURL"]
return ok
}
// Poller is an LRO poller that uses the Operation-Location pattern.
type Poller[T any] struct {
pl exported.Pipeline
resp *http.Response
OpLocURL string `json:"oplocURL"`
LocURL string `json:"locURL"`
OrigURL string `json:"origURL"`
Method string `json:"method"`
FinalState pollers.FinalStateVia `json:"finalState"`
CurState string `json:"state"`
}
// New creates a new Poller from the provided initial response.
// Pass nil for response to create an empty Poller for rehydration.
func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) {
if resp == nil {
log.Write(log.EventLRO, "Resuming Operation-Location poller.")
return &Poller[T]{pl: pl}, nil
}
log.Write(log.EventLRO, "Using Operation-Location poller.")
opURL := resp.Header.Get(shared.HeaderOperationLocation)
if opURL == "" {
return nil, errors.New("response is missing Operation-Location header")
}
if !pollers.IsValidURL(opURL) {
return nil, fmt.Errorf("invalid Operation-Location URL %s", opURL)
}
locURL := resp.Header.Get(shared.HeaderLocation)
// Location header is optional
if locURL != "" && !pollers.IsValidURL(locURL) {
return nil, fmt.Errorf("invalid Location URL %s", locURL)
}
// default initial state to InProgress. if the
// service sent us a status then use that instead.
curState := pollers.StatusInProgress
status, err := pollers.GetStatus(resp)
if err != nil && !errors.Is(err, pollers.ErrNoBody) {
return nil, err
}
if status != "" {
curState = status
}
return &Poller[T]{
pl: pl,
resp: resp,
OpLocURL: opURL,
LocURL: locURL,
OrigURL: resp.Request.URL.String(),
Method: resp.Request.Method,
FinalState: finalState,
CurState: curState,
}, nil
}
func (p *Poller[T]) Done() bool {
return pollers.IsTerminalState(p.CurState)
}
func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
err := pollers.PollHelper(ctx, p.OpLocURL, p.pl, func(resp *http.Response) (string, error) {
state, err := pollers.GetStatus(resp)
if err != nil {
return "", err
} else if state == "" {
return "", errors.New("the response did not contain a status")
}
p.resp = resp
p.CurState = state
return p.CurState, nil
})
if err != nil {
return nil, err
}
return p.resp, nil
}
func (p *Poller[T]) Result(ctx context.Context, out *T) error {
var req *exported.Request
var err error
if p.FinalState == pollers.FinalStateViaLocation && p.LocURL != "" {
req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL)
} else if p.FinalState == pollers.FinalStateViaOpLocation && p.Method == http.MethodPost {
// no final GET required, terminal response should have it
} else if rl, rlErr := pollers.GetResourceLocation(p.resp); rlErr != nil && !errors.Is(rlErr, pollers.ErrNoBody) {
return rlErr
} else if rl != "" {
req, err = exported.NewRequest(ctx, http.MethodGet, rl)
} else if p.Method == http.MethodPatch || p.Method == http.MethodPut {
req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL)
} else if p.Method == http.MethodPost && p.LocURL != "" {
req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL)
}
if err != nil {
return err
}
// if a final GET request has been created, execute it
if req != nil {
resp, err := p.pl.Do(req)
if err != nil {
return err
}
p.resp = resp
}
return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out)
}

View file

@ -0,0 +1,24 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package pollers
// FinalStateVia is the enumerated type for the possible final-state-via values.
type FinalStateVia string
const (
// FinalStateViaAzureAsyncOp indicates the final payload comes from the Azure-AsyncOperation URL.
FinalStateViaAzureAsyncOp FinalStateVia = "azure-async-operation"
// FinalStateViaLocation indicates the final payload comes from the Location URL.
FinalStateViaLocation FinalStateVia = "location"
// FinalStateViaOriginalURI indicates the final payload comes from the original URL.
FinalStateViaOriginalURI FinalStateVia = "original-uri"
// FinalStateViaOpLocation indicates the final payload comes from the Operation-Location URL.
FinalStateViaOpLocation FinalStateVia = "operation-location"
)

View file

@ -0,0 +1,317 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package pollers
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"reflect"
"strings"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
)
// the well-known set of LRO status/provisioning state values.
const (
StatusSucceeded = "Succeeded"
StatusCanceled = "Canceled"
StatusFailed = "Failed"
StatusInProgress = "InProgress"
)
// IsTerminalState returns true if the LRO's state is terminal.
func IsTerminalState(s string) bool {
return strings.EqualFold(s, StatusSucceeded) || strings.EqualFold(s, StatusFailed) || strings.EqualFold(s, StatusCanceled)
}
// Failed returns true if the LRO's state is terminal failure.
func Failed(s string) bool {
return strings.EqualFold(s, StatusFailed) || strings.EqualFold(s, StatusCanceled)
}
// Succeeded returns true if the LRO's state is terminal success.
func Succeeded(s string) bool {
return strings.EqualFold(s, StatusSucceeded)
}
// returns true if the LRO response contains a valid HTTP status code
func StatusCodeValid(resp *http.Response) bool {
return exported.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusCreated, http.StatusNoContent)
}
// IsValidURL verifies that the URL is valid and absolute.
func IsValidURL(s string) bool {
u, err := url.Parse(s)
return err == nil && u.IsAbs()
}
// getTokenTypeName creates a type name from the type parameter T.
func getTokenTypeName[T any]() (string, error) {
tt := shared.TypeOfT[T]()
var n string
if tt.Kind() == reflect.Pointer {
n = "*"
tt = tt.Elem()
}
n += tt.Name()
if n == "" {
return "", errors.New("nameless types are not allowed")
}
return n, nil
}
type resumeTokenWrapper[T any] struct {
Type string `json:"type"`
Token T `json:"token"`
}
// NewResumeToken creates a resume token from the specified type.
// An error is returned if the generic type has no name (e.g. struct{}).
func NewResumeToken[TResult, TSource any](from TSource) (string, error) {
n, err := getTokenTypeName[TResult]()
if err != nil {
return "", err
}
b, err := json.Marshal(resumeTokenWrapper[TSource]{
Type: n,
Token: from,
})
if err != nil {
return "", err
}
return string(b), nil
}
// ExtractToken returns the poller-specific token information from the provided token value.
func ExtractToken(token string) ([]byte, error) {
raw := map[string]json.RawMessage{}
if err := json.Unmarshal([]byte(token), &raw); err != nil {
return nil, err
}
// this is dependent on the type resumeTokenWrapper[T]
tk, ok := raw["token"]
if !ok {
return nil, errors.New("missing token value")
}
return tk, nil
}
// IsTokenValid returns an error if the specified token isn't applicable for generic type T.
func IsTokenValid[T any](token string) error {
raw := map[string]interface{}{}
if err := json.Unmarshal([]byte(token), &raw); err != nil {
return err
}
t, ok := raw["type"]
if !ok {
return errors.New("missing type value")
}
tt, ok := t.(string)
if !ok {
return fmt.Errorf("invalid type format %T", t)
}
n, err := getTokenTypeName[T]()
if err != nil {
return err
}
if tt != n {
return fmt.Errorf("cannot resume from this poller token. token is for type %s, not %s", tt, n)
}
return nil
}
// ErrNoBody is returned if the response didn't contain a body.
var ErrNoBody = errors.New("the response did not contain a body")
// GetJSON reads the response body into a raw JSON object.
// It returns ErrNoBody if there was no content.
func GetJSON(resp *http.Response) (map[string]interface{}, error) {
body, err := exported.Payload(resp)
if err != nil {
return nil, err
}
if len(body) == 0 {
return nil, ErrNoBody
}
// unmarshall the body to get the value
var jsonBody map[string]interface{}
if err = json.Unmarshal(body, &jsonBody); err != nil {
return nil, err
}
return jsonBody, nil
}
// provisioningState returns the provisioning state from the response or the empty string.
func provisioningState(jsonBody map[string]interface{}) string {
jsonProps, ok := jsonBody["properties"]
if !ok {
return ""
}
props, ok := jsonProps.(map[string]interface{})
if !ok {
return ""
}
rawPs, ok := props["provisioningState"]
if !ok {
return ""
}
ps, ok := rawPs.(string)
if !ok {
return ""
}
return ps
}
// status returns the status from the response or the empty string.
func status(jsonBody map[string]interface{}) string {
rawStatus, ok := jsonBody["status"]
if !ok {
return ""
}
status, ok := rawStatus.(string)
if !ok {
return ""
}
return status
}
// GetStatus returns the LRO's status from the response body.
// Typically used for Azure-AsyncOperation flows.
// If there is no status in the response body the empty string is returned.
func GetStatus(resp *http.Response) (string, error) {
jsonBody, err := GetJSON(resp)
if err != nil {
return "", err
}
return status(jsonBody), nil
}
// GetProvisioningState returns the LRO's state from the response body.
// If there is no state in the response body the empty string is returned.
func GetProvisioningState(resp *http.Response) (string, error) {
jsonBody, err := GetJSON(resp)
if err != nil {
return "", err
}
return provisioningState(jsonBody), nil
}
// GetResourceLocation returns the LRO's resourceLocation value from the response body.
// Typically used for Operation-Location flows.
// If there is no resourceLocation in the response body the empty string is returned.
func GetResourceLocation(resp *http.Response) (string, error) {
jsonBody, err := GetJSON(resp)
if err != nil {
return "", err
}
v, ok := jsonBody["resourceLocation"]
if !ok {
// it might be ok if the field doesn't exist, the caller must make that determination
return "", nil
}
vv, ok := v.(string)
if !ok {
return "", fmt.Errorf("the resourceLocation value %v was not in string format", v)
}
return vv, nil
}
// used if the operation synchronously completed
type NopPoller[T any] struct {
resp *http.Response
result T
}
// NewNopPoller creates a NopPoller from the provided response.
// It unmarshals the response body into an instance of T.
func NewNopPoller[T any](resp *http.Response) (*NopPoller[T], error) {
np := &NopPoller[T]{resp: resp}
if resp.StatusCode == http.StatusNoContent {
return np, nil
}
payload, err := exported.Payload(resp)
if err != nil {
return nil, err
}
if len(payload) == 0 {
return np, nil
}
if err = json.Unmarshal(payload, &np.result); err != nil {
return nil, err
}
return np, nil
}
func (*NopPoller[T]) Done() bool {
return true
}
func (p *NopPoller[T]) Poll(context.Context) (*http.Response, error) {
return p.resp, nil
}
func (p *NopPoller[T]) Result(ctx context.Context, out *T) error {
*out = p.result
return nil
}
// PollHelper creates and executes the request, calling update() with the response.
// If the request fails, the update func is not called.
// The update func returns the state of the operation for logging purposes or an error
// if it fails to extract the required state from the response.
func PollHelper(ctx context.Context, endpoint string, pl exported.Pipeline, update func(resp *http.Response) (string, error)) error {
req, err := exported.NewRequest(ctx, http.MethodGet, endpoint)
if err != nil {
return err
}
resp, err := pl.Do(req)
if err != nil {
return err
}
state, err := update(resp)
if err != nil {
return err
}
log.Writef(log.EventLRO, "State %s", state)
return nil
}
// ResultHelper processes the response as success or failure.
// In the success case, it unmarshals the payload into either a new instance of T or out.
// In the failure case, it creates an *azcore.Response error from the response.
func ResultHelper[T any](resp *http.Response, failed bool, out *T) error {
// short-circuit the simple success case with no response body to unmarshal
if resp.StatusCode == http.StatusNoContent {
return nil
}
defer resp.Body.Close()
if !StatusCodeValid(resp) || failed {
// the LRO failed. unmarshall the error and update state
return exported.NewResponseError(resp)
}
// success case
payload, err := exported.Payload(resp)
if err != nil {
return err
}
if len(payload) == 0 {
return nil
}
if err = json.Unmarshal(payload, out); err != nil {
return err
}
return nil
}

View file

@ -0,0 +1,34 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package shared
const (
ContentTypeAppJSON = "application/json"
ContentTypeAppXML = "application/xml"
)
const (
HeaderAuthorization = "Authorization"
HeaderAuxiliaryAuthorization = "x-ms-authorization-auxiliary"
HeaderAzureAsync = "Azure-AsyncOperation"
HeaderContentLength = "Content-Length"
HeaderContentType = "Content-Type"
HeaderLocation = "Location"
HeaderOperationLocation = "Operation-Location"
HeaderRetryAfter = "Retry-After"
HeaderUserAgent = "User-Agent"
)
const BearerTokenPrefix = "Bearer "
const (
// Module is the name of the calling module used in telemetry data.
Module = "azcore"
// Version is the semantic version (see http://semver.org) of this module.
Version = "v1.0.0"
)

View file

@ -0,0 +1,135 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package shared
import (
"context"
"errors"
"io"
"net/http"
"reflect"
"strconv"
"time"
)
// CtxWithHTTPHeaderKey is used as a context key for adding/retrieving http.Header.
type CtxWithHTTPHeaderKey struct{}
// CtxWithRetryOptionsKey is used as a context key for adding/retrieving RetryOptions.
type CtxWithRetryOptionsKey struct{}
// CtxIncludeResponseKey is used as a context key for retrieving the raw response.
type CtxIncludeResponseKey struct{}
// Delay waits for the duration to elapse or the context to be cancelled.
func Delay(ctx context.Context, delay time.Duration) error {
select {
case <-time.After(delay):
return nil
case <-ctx.Done():
return ctx.Err()
}
}
// RetryAfter returns non-zero if the response contains a Retry-After header value.
func RetryAfter(resp *http.Response) time.Duration {
if resp == nil {
return 0
}
ra := resp.Header.Get(HeaderRetryAfter)
if ra == "" {
return 0
}
// retry-after values are expressed in either number of
// seconds or an HTTP-date indicating when to try again
if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 {
return time.Duration(retryAfter) * time.Second
} else if t, err := time.Parse(time.RFC1123, ra); err == nil {
return time.Until(t)
}
return 0
}
// TypeOfT returns the type of the generic type param.
func TypeOfT[T any]() reflect.Type {
// you can't, at present, obtain the type of
// a type parameter, so this is the trick
return reflect.TypeOf((*T)(nil)).Elem()
}
// BytesSetter abstracts replacing a byte slice on some type.
type BytesSetter interface {
Set(b []byte)
}
// NewNopClosingBytesReader creates a new *NopClosingBytesReader for the specified slice.
func NewNopClosingBytesReader(data []byte) *NopClosingBytesReader {
return &NopClosingBytesReader{s: data}
}
// NopClosingBytesReader is an io.ReadSeekCloser around a byte slice.
// It also provides direct access to the byte slice to avoid rereading.
type NopClosingBytesReader struct {
s []byte
i int64
}
// Bytes returns the underlying byte slice.
func (r *NopClosingBytesReader) Bytes() []byte {
return r.s
}
// Close implements the io.Closer interface.
func (*NopClosingBytesReader) Close() error {
return nil
}
// Read implements the io.Reader interface.
func (r *NopClosingBytesReader) Read(b []byte) (n int, err error) {
if r.i >= int64(len(r.s)) {
return 0, io.EOF
}
n = copy(b, r.s[r.i:])
r.i += int64(n)
return
}
// Set replaces the existing byte slice with the specified byte slice and resets the reader.
func (r *NopClosingBytesReader) Set(b []byte) {
r.s = b
r.i = 0
}
// Seek implements the io.Seeker interface.
func (r *NopClosingBytesReader) Seek(offset int64, whence int) (int64, error) {
var i int64
switch whence {
case io.SeekStart:
i = offset
case io.SeekCurrent:
i = r.i + offset
case io.SeekEnd:
i = int64(len(r.s)) + offset
default:
return 0, errors.New("nopClosingBytesReader: invalid whence")
}
if i < 0 {
return 0, errors.New("nopClosingBytesReader: negative position")
}
r.i = i
return i, nil
}
var _ BytesSetter = (*NopClosingBytesReader)(nil)
// TransportFunc is a helper to use a first-class func to satisfy the Transporter interface.
type TransportFunc func(*http.Request) (*http.Response, error)
// Do implements the Transporter interface for the TransportFunc type.
func (pf TransportFunc) Do(req *http.Request) (*http.Response, error) {
return pf(req)
}

View file

@ -0,0 +1,10 @@
//go:build go1.18
// +build go1.18
// Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT
// license that can be found in the LICENSE file.
// Package log contains functionality for configuring logging behavior.
// Default logging to stderr can be enabled by setting environment variable AZURE_SDK_GO_LOGGING to "all".
package log

View file

@ -0,0 +1,50 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Package log provides functionality for configuring logging facilities.
package log
import (
"github.com/Azure/azure-sdk-for-go/sdk/internal/log"
)
// Event is used to group entries. Each group can be toggled on or off.
type Event = log.Event
const (
// EventRequest entries contain information about HTTP requests.
// This includes information like the URL, query parameters, and headers.
EventRequest Event = "Request"
// EventResponse entries contain information about HTTP responses.
// This includes information like the HTTP status code, headers, and request URL.
EventResponse Event = "Response"
// EventRetryPolicy entries contain information specific to the retry policy in use.
EventRetryPolicy Event = "Retry"
// EventLRO entries contain information specific to long-running operations.
// This includes information like polling location, operation state, and sleep intervals.
EventLRO Event = "LongRunningOperation"
)
// SetEvents is used to control which events are written to
// the log. By default all log events are writen.
// NOTE: this is not goroutine safe and should be called before using SDK clients.
func SetEvents(cls ...Event) {
log.SetEvents(cls...)
}
// SetListener will set the Logger to write to the specified Listener.
// NOTE: this is not goroutine safe and should be called before using SDK clients.
func SetListener(lst func(Event, string)) {
log.SetListener(lst)
}
// for testing purposes
func resetEvents() {
log.TestResetEvents()
}

View file

@ -0,0 +1,10 @@
//go:build go1.18
// +build go1.18
// Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT
// license that can be found in the LICENSE file.
// Package policy contains the definitions needed for configuring in-box pipeline policies
// and creating custom policies.
package policy

View file

@ -0,0 +1,119 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package policy
import (
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
)
// Policy represents an extensibility point for the Pipeline that can mutate the specified
// Request and react to the received Response.
type Policy = exported.Policy
// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses.
type Transporter = exported.Transporter
// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline.
// Don't use this type directly, use runtime.NewRequest() instead.
type Request = exported.Request
// ClientOptions contains optional settings for a client's pipeline.
// All zero-value fields will be initialized with default values.
type ClientOptions struct {
// Cloud specifies a cloud for the client. The default is Azure Public Cloud.
Cloud cloud.Configuration
// Logging configures the built-in logging policy.
Logging LogOptions
// Retry configures the built-in retry policy.
Retry RetryOptions
// Telemetry configures the built-in telemetry policy.
Telemetry TelemetryOptions
// Transport sets the transport for HTTP requests.
Transport Transporter
// PerCallPolicies contains custom policies to inject into the pipeline.
// Each policy is executed once per request.
PerCallPolicies []Policy
// PerRetryPolicies contains custom policies to inject into the pipeline.
// Each policy is executed once per request, and for each retry of that request.
PerRetryPolicies []Policy
}
// LogOptions configures the logging policy's behavior.
type LogOptions struct {
// IncludeBody indicates if request and response bodies should be included in logging.
// The default value is false.
// NOTE: enabling this can lead to disclosure of sensitive information, use with care.
IncludeBody bool
// AllowedHeaders is the slice of headers to log with their values intact.
// All headers not in the slice will have their values REDACTED.
// Applies to request and response headers.
AllowedHeaders []string
// AllowedQueryParams is the slice of query parameters to log with their values intact.
// All query parameters not in the slice will have their values REDACTED.
AllowedQueryParams []string
}
// RetryOptions configures the retry policy's behavior.
// Call NewRetryOptions() to create an instance with default values.
type RetryOptions struct {
// MaxRetries specifies the maximum number of attempts a failed operation will be retried
// before producing an error.
// The default value is three. A value less than zero means one try and no retries.
MaxRetries int32
// TryTimeout indicates the maximum time allowed for any single try of an HTTP request.
// This is disabled by default. Specify a value greater than zero to enable.
// NOTE: Setting this to a small value might cause premature HTTP request time-outs.
TryTimeout time.Duration
// RetryDelay specifies the initial amount of delay to use before retrying an operation.
// The delay increases exponentially with each retry up to the maximum specified by MaxRetryDelay.
// The default value is four seconds. A value less than zero means no delay between retries.
RetryDelay time.Duration
// MaxRetryDelay specifies the maximum delay allowed before retrying an operation.
// Typically the value is greater than or equal to the value specified in RetryDelay.
// The default Value is 120 seconds. A value less than zero means there is no cap.
MaxRetryDelay time.Duration
// StatusCodes specifies the HTTP status codes that indicate the operation should be retried.
// The default value is the status codes in StatusCodesForRetry.
// Specifying an empty slice will cause retries to happen only for transport errors.
StatusCodes []int
}
// TelemetryOptions configures the telemetry policy's behavior.
type TelemetryOptions struct {
// ApplicationID is an application-specific identification string to add to the User-Agent.
// It has a maximum length of 24 characters and must not contain any spaces.
ApplicationID string
// Disabled will prevent the addition of any telemetry data to the User-Agent.
Disabled bool
}
// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token.
type TokenRequestOptions struct {
// Scopes contains the list of permission scopes required for the token.
Scopes []string
}
// BearerTokenOptions configures the bearer token policy's behavior.
type BearerTokenOptions struct {
// placeholder for future options
}

View file

@ -0,0 +1,10 @@
//go:build go1.18
// +build go1.18
// Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT
// license that can be found in the LICENSE file.
// Package runtime contains various facilities for creating requests and handling responses.
// The content is intended for SDK authors.
package runtime

View file

@ -0,0 +1,19 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
)
// NewResponseError creates an *azcore.ResponseError from the provided HTTP response.
// Call this when a service request returns a non-successful status code.
func NewResponseError(resp *http.Response) error {
return exported.NewResponseError(resp)
}

View file

@ -0,0 +1,77 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"context"
"encoding/json"
"errors"
)
// PagingHandler contains the required data for constructing a Pager.
type PagingHandler[T any] struct {
// More returns a boolean indicating if there are more pages to fetch.
// It uses the provided page to make the determination.
More func(T) bool
// Fetcher fetches the first and subsequent pages.
Fetcher func(context.Context, *T) (T, error)
}
// Pager provides operations for iterating over paged responses.
type Pager[T any] struct {
current *T
handler PagingHandler[T]
firstPage bool
}
// NewPager creates an instance of Pager using the specified PagingHandler.
// Pass a non-nil T for firstPage if the first page has already been retrieved.
func NewPager[T any](handler PagingHandler[T]) *Pager[T] {
return &Pager[T]{
handler: handler,
firstPage: true,
}
}
// More returns true if there are more pages to retrieve.
func (p *Pager[T]) More() bool {
if p.current != nil {
return p.handler.More(*p.current)
}
return true
}
// NextPage advances the pager to the next page.
func (p *Pager[T]) NextPage(ctx context.Context) (T, error) {
var resp T
var err error
if p.current != nil {
if p.firstPage {
// we get here if it's an LRO-pager, we already have the first page
p.firstPage = false
return *p.current, nil
} else if !p.handler.More(*p.current) {
return *new(T), errors.New("no more pages")
}
resp, err = p.handler.Fetcher(ctx, p.current)
} else {
// non-LRO case, first page
p.firstPage = false
resp, err = p.handler.Fetcher(ctx, nil)
}
if err != nil {
return *new(T), err
}
p.current = &resp
return *p.current, nil
}
// UnmarshalJSON implements the json.Unmarshaler interface for Pager[T].
func (p *Pager[T]) UnmarshalJSON(data []byte) error {
return json.Unmarshal(data, &p.current)
}

View file

@ -0,0 +1,73 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
)
// PipelineOptions contains Pipeline options for SDK developers
type PipelineOptions struct {
AllowedHeaders, AllowedQueryParameters []string
PerCall, PerRetry []policy.Policy
}
// Pipeline represents a primitive for sending HTTP requests and receiving responses.
// Its behavior can be extended by specifying policies during construction.
type Pipeline = exported.Pipeline
// NewPipeline creates a pipeline from connection options, with any additional policies as specified.
// Policies from ClientOptions are placed after policies from PipelineOptions.
// The module and version parameters are used by the telemetry policy, when enabled.
func NewPipeline(module, version string, plOpts PipelineOptions, options *policy.ClientOptions) Pipeline {
cp := policy.ClientOptions{}
if options != nil {
cp = *options
}
if len(plOpts.AllowedHeaders) > 0 {
headers := make([]string, 0, len(plOpts.AllowedHeaders)+len(cp.Logging.AllowedHeaders))
copy(headers, plOpts.AllowedHeaders)
headers = append(headers, cp.Logging.AllowedHeaders...)
cp.Logging.AllowedHeaders = headers
}
if len(plOpts.AllowedQueryParameters) > 0 {
qp := make([]string, 0, len(plOpts.AllowedQueryParameters)+len(cp.Logging.AllowedQueryParams))
copy(qp, plOpts.AllowedQueryParameters)
qp = append(qp, cp.Logging.AllowedQueryParams...)
cp.Logging.AllowedQueryParams = qp
}
// we put the includeResponsePolicy at the very beginning so that the raw response
// is populated with the final response (some policies might mutate the response)
policies := []policy.Policy{policyFunc(includeResponsePolicy)}
if !cp.Telemetry.Disabled {
policies = append(policies, NewTelemetryPolicy(module, version, &cp.Telemetry))
}
policies = append(policies, plOpts.PerCall...)
policies = append(policies, cp.PerCallPolicies...)
policies = append(policies, NewRetryPolicy(&cp.Retry))
policies = append(policies, plOpts.PerRetry...)
policies = append(policies, cp.PerRetryPolicies...)
policies = append(policies, NewLogPolicy(&cp.Logging))
policies = append(policies, policyFunc(httpHeaderPolicy), policyFunc(bodyDownloadPolicy))
transport := cp.Transport
if transport == nil {
transport = defaultHTTPClient
}
return exported.NewPipeline(transport, policies...)
}
// policyFunc is a type that implements the Policy interface.
// Use this type when implementing a stateless policy as a first-class function.
type policyFunc func(*policy.Request) (*http.Response, error)
// Do implements the Policy interface on policyFunc.
func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) {
return pf(req)
}

View file

@ -0,0 +1,64 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"net/http"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/internal/temporal"
)
// BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential.
type BearerTokenPolicy struct {
// mainResource is the resource to be retreived using the tenant specified in the credential
mainResource *temporal.Resource[azcore.AccessToken, acquiringResourceState]
// the following fields are read-only
cred azcore.TokenCredential
scopes []string
}
type acquiringResourceState struct {
req *policy.Request
p *BearerTokenPolicy
}
// acquire acquires or updates the resource; only one
// thread/goroutine at a time ever calls this function
func acquire(state acquiringResourceState) (newResource azcore.AccessToken, newExpiration time.Time, err error) {
tk, err := state.p.cred.GetToken(state.req.Raw().Context(), policy.TokenRequestOptions{Scopes: state.p.scopes})
if err != nil {
return azcore.AccessToken{}, time.Time{}, err
}
return tk, tk.ExpiresOn, nil
}
// NewBearerTokenPolicy creates a policy object that authorizes requests with bearer tokens.
// cred: an azcore.TokenCredential implementation such as a credential object from azidentity
// scopes: the list of permission scopes required for the token.
// opts: optional settings. Pass nil to accept default values; this is the same as passing a zero-value options.
func NewBearerTokenPolicy(cred azcore.TokenCredential, scopes []string, opts *policy.BearerTokenOptions) *BearerTokenPolicy {
return &BearerTokenPolicy{
cred: cred,
scopes: scopes,
mainResource: temporal.NewResource(acquire),
}
}
// Do authorizes a request with a bearer token
func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) {
as := acquiringResourceState{
p: b,
req: req,
}
tk, err := b.mainResource.Get(as)
if err != nil {
return nil, err
}
req.Raw().Header.Set(shared.HeaderAuthorization, shared.BearerTokenPrefix+tk.Token)
return req.Next()
}

View file

@ -0,0 +1,73 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"fmt"
"net/http"
"strings"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo"
)
// bodyDownloadPolicy creates a policy object that downloads the response's body to a []byte.
func bodyDownloadPolicy(req *policy.Request) (*http.Response, error) {
resp, err := req.Next()
if err != nil {
return resp, err
}
var opValues bodyDownloadPolicyOpValues
// don't skip downloading error response bodies
if req.OperationValue(&opValues); opValues.Skip && resp.StatusCode < 400 {
return resp, err
}
// Either bodyDownloadPolicyOpValues was not specified (so skip is false)
// or it was specified and skip is false: don't skip downloading the body
_, err = exported.Payload(resp)
if err != nil {
return resp, newBodyDownloadError(err, req)
}
return resp, err
}
// bodyDownloadPolicyOpValues is the struct containing the per-operation values
type bodyDownloadPolicyOpValues struct {
Skip bool
}
type bodyDownloadError struct {
err error
}
func newBodyDownloadError(err error, req *policy.Request) error {
// on failure, only retry the request for idempotent operations.
// we currently identify them as DELETE, GET, and PUT requests.
if m := strings.ToUpper(req.Raw().Method); m == http.MethodDelete || m == http.MethodGet || m == http.MethodPut {
// error is safe for retry
return err
}
// wrap error to avoid retries
return &bodyDownloadError{
err: err,
}
}
func (b *bodyDownloadError) Error() string {
return fmt.Sprintf("body download policy: %s", b.err.Error())
}
func (b *bodyDownloadError) NonRetriable() {
// marker method
}
func (b *bodyDownloadError) Unwrap() error {
return b.err
}
var _ errorinfo.NonRetriable = (*bodyDownloadError)(nil)

View file

@ -0,0 +1,39 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"context"
"net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
)
// newHTTPHeaderPolicy creates a policy object that adds custom HTTP headers to a request
func httpHeaderPolicy(req *policy.Request) (*http.Response, error) {
// check if any custom HTTP headers have been specified
if header := req.Raw().Context().Value(shared.CtxWithHTTPHeaderKey{}); header != nil {
for k, v := range header.(http.Header) {
// use Set to replace any existing value
// it also canonicalizes the header key
req.Raw().Header.Set(k, v[0])
// add any remaining values
for i := 1; i < len(v); i++ {
req.Raw().Header.Add(k, v[i])
}
}
}
return req.Next()
}
// WithHTTPHeader adds the specified http.Header to the parent context.
// Use this to specify custom HTTP headers at the API-call level.
// Any overlapping headers will have their values replaced with the values specified here.
func WithHTTPHeader(parent context.Context, header http.Header) context.Context {
return context.WithValue(parent, shared.CtxWithHTTPHeaderKey{}, header)
}

View file

@ -0,0 +1,34 @@
//go:build go1.16
// +build go1.16
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"context"
"net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
)
// includeResponsePolicy creates a policy that retrieves the raw HTTP response upon request
func includeResponsePolicy(req *policy.Request) (*http.Response, error) {
resp, err := req.Next()
if resp == nil {
return resp, err
}
if httpOutRaw := req.Raw().Context().Value(shared.CtxIncludeResponseKey{}); httpOutRaw != nil {
httpOut := httpOutRaw.(**http.Response)
*httpOut = resp
}
return resp, err
}
// WithCaptureResponse applies the HTTP response retrieval annotation to the parent context.
// The resp parameter will contain the HTTP response after the request has completed.
func WithCaptureResponse(parent context.Context, resp **http.Response) context.Context {
return context.WithValue(parent, shared.CtxIncludeResponseKey{}, resp)
}

View file

@ -0,0 +1,251 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"sort"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/internal/diag"
)
type logPolicy struct {
includeBody bool
allowedHeaders map[string]struct{}
allowedQP map[string]struct{}
}
// NewLogPolicy creates a request/response logging policy object configured using the specified options.
// Pass nil to accept the default values; this is the same as passing a zero-value options.
func NewLogPolicy(o *policy.LogOptions) policy.Policy {
if o == nil {
o = &policy.LogOptions{}
}
// construct default hash set of allowed headers
allowedHeaders := map[string]struct{}{
"accept": {},
"cache-control": {},
"connection": {},
"content-length": {},
"content-type": {},
"date": {},
"etag": {},
"expires": {},
"if-match": {},
"if-modified-since": {},
"if-none-match": {},
"if-unmodified-since": {},
"last-modified": {},
"ms-cv": {},
"pragma": {},
"request-id": {},
"retry-after": {},
"server": {},
"traceparent": {},
"transfer-encoding": {},
"user-agent": {},
"www-authenticate": {},
"x-ms-request-id": {},
"x-ms-client-request-id": {},
"x-ms-return-client-request-id": {},
}
// add any caller-specified allowed headers to the set
for _, ah := range o.AllowedHeaders {
allowedHeaders[strings.ToLower(ah)] = struct{}{}
}
// now do the same thing for query params
allowedQP := map[string]struct{}{
"api-version": {},
}
for _, qp := range o.AllowedQueryParams {
allowedQP[strings.ToLower(qp)] = struct{}{}
}
return &logPolicy{
includeBody: o.IncludeBody,
allowedHeaders: allowedHeaders,
allowedQP: allowedQP,
}
}
// logPolicyOpValues is the struct containing the per-operation values
type logPolicyOpValues struct {
try int32
start time.Time
}
func (p *logPolicy) Do(req *policy.Request) (*http.Response, error) {
// Get the per-operation values. These are saved in the Message's map so that they persist across each retry calling into this policy object.
var opValues logPolicyOpValues
if req.OperationValue(&opValues); opValues.start.IsZero() {
opValues.start = time.Now() // If this is the 1st try, record this operation's start time
}
opValues.try++ // The first try is #1 (not #0)
req.SetOperationValue(opValues)
// Log the outgoing request as informational
if log.Should(log.EventRequest) {
b := &bytes.Buffer{}
fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", opValues.try)
p.writeRequestWithResponse(b, req, nil, nil)
var err error
if p.includeBody {
err = writeReqBody(req, b)
}
log.Write(log.EventRequest, b.String())
if err != nil {
return nil, err
}
}
// Set the time for this particular retry operation and then Do the operation.
tryStart := time.Now()
response, err := req.Next() // Make the request
tryEnd := time.Now()
tryDuration := tryEnd.Sub(tryStart)
opDuration := tryEnd.Sub(opValues.start)
if log.Should(log.EventResponse) {
// We're going to log this; build the string to log
b := &bytes.Buffer{}
fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v, OpTime=%v) -- ", opValues.try, tryDuration, opDuration)
if err != nil { // This HTTP request did not get a response from the service
fmt.Fprint(b, "REQUEST ERROR\n")
} else {
fmt.Fprint(b, "RESPONSE RECEIVED\n")
}
p.writeRequestWithResponse(b, req, response, err)
if err != nil {
// skip frames runtime.Callers() and runtime.StackTrace()
b.WriteString(diag.StackTrace(2, 32))
} else if p.includeBody {
err = writeRespBody(response, b)
}
log.Write(log.EventResponse, b.String())
}
return response, err
}
const redactedValue = "REDACTED"
// writeRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are
// not nil, then these are also written into the Buffer.
func (p *logPolicy) writeRequestWithResponse(b *bytes.Buffer, req *policy.Request, resp *http.Response, err error) {
// redact applicable query params
cpURL := *req.Raw().URL
qp := cpURL.Query()
for k := range qp {
if _, ok := p.allowedQP[strings.ToLower(k)]; !ok {
qp.Set(k, redactedValue)
}
}
cpURL.RawQuery = qp.Encode()
// Write the request into the buffer.
fmt.Fprint(b, " "+req.Raw().Method+" "+cpURL.String()+"\n")
p.writeHeader(b, req.Raw().Header)
if resp != nil {
fmt.Fprintln(b, " --------------------------------------------------------------------------------")
fmt.Fprint(b, " RESPONSE Status: "+resp.Status+"\n")
p.writeHeader(b, resp.Header)
}
if err != nil {
fmt.Fprintln(b, " --------------------------------------------------------------------------------")
fmt.Fprint(b, " ERROR:\n"+err.Error()+"\n")
}
}
// formatHeaders appends an HTTP request's or response's header into a Buffer.
func (p *logPolicy) writeHeader(b *bytes.Buffer, header http.Header) {
if len(header) == 0 {
b.WriteString(" (no headers)\n")
return
}
keys := make([]string, 0, len(header))
// Alphabetize the headers
for k := range header {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
value := header.Get(k)
// redact all header values not in the allow-list
if _, ok := p.allowedHeaders[strings.ToLower(k)]; !ok {
value = redactedValue
}
fmt.Fprintf(b, " %s: %+v\n", k, value)
}
}
// returns true if the request/response body should be logged.
// this is determined by looking at the content-type header value.
func shouldLogBody(b *bytes.Buffer, contentType string) bool {
contentType = strings.ToLower(contentType)
if strings.HasPrefix(contentType, "text") ||
strings.Contains(contentType, "json") ||
strings.Contains(contentType, "xml") {
return true
}
fmt.Fprintf(b, " Skip logging body for %s\n", contentType)
return false
}
// writes to a buffer, used for logging purposes
func writeReqBody(req *policy.Request, b *bytes.Buffer) error {
if req.Raw().Body == nil {
fmt.Fprint(b, " Request contained no body\n")
return nil
}
if ct := req.Raw().Header.Get(shared.HeaderContentType); !shouldLogBody(b, ct) {
return nil
}
body, err := ioutil.ReadAll(req.Raw().Body)
if err != nil {
fmt.Fprintf(b, " Failed to read request body: %s\n", err.Error())
return err
}
if err := req.RewindBody(); err != nil {
return err
}
logBody(b, body)
return nil
}
// writes to a buffer, used for logging purposes
func writeRespBody(resp *http.Response, b *bytes.Buffer) error {
ct := resp.Header.Get(shared.HeaderContentType)
if ct == "" {
fmt.Fprint(b, " Response contained no body\n")
return nil
} else if !shouldLogBody(b, ct) {
return nil
}
body, err := Payload(resp)
if err != nil {
fmt.Fprintf(b, " Failed to read response body: %s\n", err.Error())
return err
}
if len(body) > 0 {
logBody(b, body)
} else {
fmt.Fprint(b, " Response contained no body\n")
}
return nil
}
func logBody(b *bytes.Buffer, body []byte) {
fmt.Fprintln(b, " --------------------------------------------------------------------------------")
fmt.Fprintln(b, string(body))
fmt.Fprintln(b, " --------------------------------------------------------------------------------")
}

View file

@ -0,0 +1,34 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
)
type requestIDPolicy struct{}
// NewRequestIDPolicy returns a policy that add the x-ms-client-request-id header
func NewRequestIDPolicy() policy.Policy {
return &requestIDPolicy{}
}
func (r *requestIDPolicy) Do(req *policy.Request) (*http.Response, error) {
const requestIdHeader = "x-ms-client-request-id"
if req.Raw().Header.Get(requestIdHeader) == "" {
id, err := uuid.New()
if err != nil {
return nil, err
}
req.Raw().Header.Set(requestIdHeader, id.String())
}
return req.Next()
}

View file

@ -0,0 +1,242 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"context"
"errors"
"io"
"math"
"math/rand"
"net/http"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo"
)
const (
defaultMaxRetries = 3
)
func setDefaults(o *policy.RetryOptions) {
if o.MaxRetries == 0 {
o.MaxRetries = defaultMaxRetries
} else if o.MaxRetries < 0 {
o.MaxRetries = 0
}
if o.MaxRetryDelay == 0 {
o.MaxRetryDelay = 120 * time.Second
} else if o.MaxRetryDelay < 0 {
// not really an unlimited cap, but sufficiently large enough to be considered as such
o.MaxRetryDelay = math.MaxInt64
}
if o.RetryDelay == 0 {
o.RetryDelay = 4 * time.Second
} else if o.RetryDelay < 0 {
o.RetryDelay = 0
}
if o.StatusCodes == nil {
o.StatusCodes = []int{
http.StatusRequestTimeout, // 408
http.StatusTooManyRequests, // 429
http.StatusInternalServerError, // 500
http.StatusBadGateway, // 502
http.StatusServiceUnavailable, // 503
http.StatusGatewayTimeout, // 504
}
}
}
func calcDelay(o policy.RetryOptions, try int32) time.Duration { // try is >=1; never 0
pow := func(number int64, exponent int32) int64 { // pow is nested helper function
var result int64 = 1
for n := int32(0); n < exponent; n++ {
result *= number
}
return result
}
delay := time.Duration(pow(2, try)-1) * o.RetryDelay
// Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3)
delay = time.Duration(delay.Seconds() * (rand.Float64()/2 + 0.8) * float64(time.Second)) // NOTE: We want math/rand; not crypto/rand
if delay > o.MaxRetryDelay {
delay = o.MaxRetryDelay
}
return delay
}
// NewRetryPolicy creates a policy object configured using the specified options.
// Pass nil to accept the default values; this is the same as passing a zero-value options.
func NewRetryPolicy(o *policy.RetryOptions) policy.Policy {
if o == nil {
o = &policy.RetryOptions{}
}
p := &retryPolicy{options: *o}
return p
}
type retryPolicy struct {
options policy.RetryOptions
}
func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
options := p.options
// check if the retry options have been overridden for this call
if override := req.Raw().Context().Value(shared.CtxWithRetryOptionsKey{}); override != nil {
options = override.(policy.RetryOptions)
}
setDefaults(&options)
// Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2)
// When to retry: connection failure or temporary/timeout.
var rwbody *retryableRequestBody
if req.Body() != nil {
// wrap the body so we control when it's actually closed.
// do this outside the for loop so defers don't accumulate.
rwbody = &retryableRequestBody{body: req.Body()}
defer rwbody.realClose()
}
try := int32(1)
for {
resp = nil // reset
log.Writef(log.EventRetryPolicy, "\n=====> Try=%d %s %s", try, req.Raw().Method, req.Raw().URL.String())
// For each try, seek to the beginning of the Body stream. We do this even for the 1st try because
// the stream may not be at offset 0 when we first get it and we want the same behavior for the
// 1st try as for additional tries.
err = req.RewindBody()
if err != nil {
return
}
// RewindBody() restores Raw().Body to its original state, so set our rewindable after
if rwbody != nil {
req.Raw().Body = rwbody
}
if options.TryTimeout == 0 {
resp, err = req.Next()
} else {
// Set the per-try time for this particular retry operation and then Do the operation.
tryCtx, tryCancel := context.WithTimeout(req.Raw().Context(), options.TryTimeout)
clone := req.Clone(tryCtx)
resp, err = clone.Next() // Make the request
// if the body was already downloaded or there was an error it's safe to cancel the context now
if err != nil {
tryCancel()
} else if _, ok := resp.Body.(*shared.NopClosingBytesReader); ok {
tryCancel()
} else {
// must cancel the context after the body has been read and closed
resp.Body = &contextCancelReadCloser{cf: tryCancel, body: resp.Body}
}
}
if err == nil {
log.Writef(log.EventRetryPolicy, "response %d", resp.StatusCode)
} else {
log.Writef(log.EventRetryPolicy, "error %v", err)
}
if err == nil && !HasStatusCode(resp, options.StatusCodes...) {
// if there is no error and the response code isn't in the list of retry codes then we're done.
return
} else if ctxErr := req.Raw().Context().Err(); ctxErr != nil {
// don't retry if the parent context has been cancelled or its deadline exceeded
err = ctxErr
log.Writef(log.EventRetryPolicy, "abort due to %v", err)
return
}
// check if the error is not retriable
var nre errorinfo.NonRetriable
if errors.As(err, &nre) {
// the error says it's not retriable so don't retry
log.Writef(log.EventRetryPolicy, "non-retriable error %T", nre)
return
}
if try == options.MaxRetries+1 {
// max number of tries has been reached, don't sleep again
log.Writef(log.EventRetryPolicy, "MaxRetries %d exceeded", options.MaxRetries)
return
}
// drain before retrying so nothing is leaked
Drain(resp)
// use the delay from retry-after if available
delay := shared.RetryAfter(resp)
if delay <= 0 {
delay = calcDelay(options, try)
}
log.Writef(log.EventRetryPolicy, "End Try #%d, Delay=%v", try, delay)
select {
case <-time.After(delay):
try++
case <-req.Raw().Context().Done():
err = req.Raw().Context().Err()
log.Writef(log.EventRetryPolicy, "abort due to %v", err)
return
}
}
}
// WithRetryOptions adds the specified RetryOptions to the parent context.
// Use this to specify custom RetryOptions at the API-call level.
func WithRetryOptions(parent context.Context, options policy.RetryOptions) context.Context {
return context.WithValue(parent, shared.CtxWithRetryOptionsKey{}, options)
}
// ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser)
// This struct is used when sending a body to the network
type retryableRequestBody struct {
body io.ReadSeeker // Seeking is required to support retries
}
// Read reads a block of data from an inner stream and reports progress
func (b *retryableRequestBody) Read(p []byte) (n int, err error) {
return b.body.Read(p)
}
func (b *retryableRequestBody) Seek(offset int64, whence int) (offsetFromStart int64, err error) {
return b.body.Seek(offset, whence)
}
func (b *retryableRequestBody) Close() error {
// We don't want the underlying transport to close the request body on transient failures so this is a nop.
// The retry policy closes the request body upon success.
return nil
}
func (b *retryableRequestBody) realClose() error {
if c, ok := b.body.(io.Closer); ok {
return c.Close()
}
return nil
}
// ********** The following type/methods implement the contextCancelReadCloser
// contextCancelReadCloser combines an io.ReadCloser with a cancel func.
// it ensures the cancel func is invoked once the body has been read and closed.
type contextCancelReadCloser struct {
cf context.CancelFunc
body io.ReadCloser
}
func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) {
return rc.body.Read(p)
}
func (rc *contextCancelReadCloser) Close() error {
err := rc.body.Close()
rc.cf()
return err
}

View file

@ -0,0 +1,79 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"bytes"
"fmt"
"net/http"
"os"
"runtime"
"strings"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
)
type telemetryPolicy struct {
telemetryValue string
}
// NewTelemetryPolicy creates a telemetry policy object that adds telemetry information to outgoing HTTP requests.
// The format is [<application_id> ]azsdk-go-<mod>/<ver> <platform_info>.
// Pass nil to accept the default values; this is the same as passing a zero-value options.
func NewTelemetryPolicy(mod, ver string, o *policy.TelemetryOptions) policy.Policy {
if o == nil {
o = &policy.TelemetryOptions{}
}
tp := telemetryPolicy{}
if o.Disabled {
return &tp
}
b := &bytes.Buffer{}
// normalize ApplicationID
if o.ApplicationID != "" {
o.ApplicationID = strings.ReplaceAll(o.ApplicationID, " ", "/")
if len(o.ApplicationID) > 24 {
o.ApplicationID = o.ApplicationID[:24]
}
b.WriteString(o.ApplicationID)
b.WriteRune(' ')
}
b.WriteString(formatTelemetry(mod, ver))
b.WriteRune(' ')
b.WriteString(platformInfo)
tp.telemetryValue = b.String()
return &tp
}
func formatTelemetry(comp, ver string) string {
return fmt.Sprintf("azsdk-go-%s/%s", comp, ver)
}
func (p telemetryPolicy) Do(req *policy.Request) (*http.Response, error) {
if p.telemetryValue == "" {
return req.Next()
}
// preserve the existing User-Agent string
if ua := req.Raw().Header.Get(shared.HeaderUserAgent); ua != "" {
p.telemetryValue = fmt.Sprintf("%s %s", p.telemetryValue, ua)
}
req.Raw().Header.Set(shared.HeaderUserAgent, p.telemetryValue)
return req.Next()
}
// NOTE: the ONLY function that should write to this variable is this func
var platformInfo = func() string {
operatingSystem := runtime.GOOS // Default OS string
switch operatingSystem {
case "windows":
operatingSystem = os.Getenv("OS") // Get more specific OS information
case "linux": // accept default OS info
case "freebsd": // accept default OS info
}
return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem)
}()

View file

@ -0,0 +1,324 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
)
// FinalStateVia is the enumerated type for the possible final-state-via values.
type FinalStateVia = pollers.FinalStateVia
const (
// FinalStateViaAzureAsyncOp indicates the final payload comes from the Azure-AsyncOperation URL.
FinalStateViaAzureAsyncOp = pollers.FinalStateViaAzureAsyncOp
// FinalStateViaLocation indicates the final payload comes from the Location URL.
FinalStateViaLocation = pollers.FinalStateViaLocation
// FinalStateViaOriginalURI indicates the final payload comes from the original URL.
FinalStateViaOriginalURI = pollers.FinalStateViaOriginalURI
// FinalStateViaOpLocation indicates the final payload comes from the Operation-Location URL.
FinalStateViaOpLocation = pollers.FinalStateViaOpLocation
)
// NewPollerOptions contains the optional parameters for NewPoller.
type NewPollerOptions[T any] struct {
// FinalStateVia contains the final-state-via value for the LRO.
FinalStateVia FinalStateVia
// Response contains a preconstructed response type.
// The final payload will be unmarshaled into it and returned.
Response *T
// Handler[T] contains a custom polling implementation.
Handler PollingHandler[T]
}
// NewPoller creates a Poller based on the provided initial response.
func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPollerOptions[T]) (*Poller[T], error) {
if options == nil {
options = &NewPollerOptions[T]{}
}
result := options.Response
if result == nil {
result = new(T)
}
if options.Handler != nil {
return &Poller[T]{
op: options.Handler,
resp: resp,
result: result,
}, nil
}
defer resp.Body.Close()
// this is a back-stop in case the swagger is incorrect (i.e. missing one or more status codes for success).
// ideally the codegen should return an error if the initial response failed and not even create a poller.
if !pollers.StatusCodeValid(resp) {
return nil, errors.New("the operation failed or was cancelled")
}
// determine the polling method
var opr PollingHandler[T]
var err error
if async.Applicable(resp) {
// async poller must be checked first as it can also have a location header
opr, err = async.New[T](pl, resp, options.FinalStateVia)
} else if op.Applicable(resp) {
// op poller must be checked before loc as it can also have a location header
opr, err = op.New[T](pl, resp, options.FinalStateVia)
} else if loc.Applicable(resp) {
opr, err = loc.New[T](pl, resp)
} else if body.Applicable(resp) {
// must test body poller last as it's a subset of the other pollers.
// TODO: this is ambiguous for PATCH/PUT if it returns a 200 with no polling headers (sync completion)
opr, err = body.New[T](pl, resp)
} else if m := resp.Request.Method; resp.StatusCode == http.StatusAccepted && (m == http.MethodDelete || m == http.MethodPost) {
// if we get here it means we have a 202 with no polling headers.
// for DELETE and POST this is a hard error per ARM RPC spec.
return nil, errors.New("response is missing polling URL")
} else {
opr, err = pollers.NewNopPoller[T](resp)
}
if err != nil {
return nil, err
}
return &Poller[T]{
op: opr,
resp: resp,
result: result,
}, nil
}
// NewPollerFromResumeTokenOptions contains the optional parameters for NewPollerFromResumeToken.
type NewPollerFromResumeTokenOptions[T any] struct {
// Response contains a preconstructed response type.
// The final payload will be unmarshaled into it and returned.
Response *T
// Handler[T] contains a custom polling implementation.
Handler PollingHandler[T]
}
// NewPollerFromResumeToken creates a Poller from a resume token string.
func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options *NewPollerFromResumeTokenOptions[T]) (*Poller[T], error) {
if options == nil {
options = &NewPollerFromResumeTokenOptions[T]{}
}
result := options.Response
if result == nil {
result = new(T)
}
if err := pollers.IsTokenValid[T](token); err != nil {
return nil, err
}
raw, err := pollers.ExtractToken(token)
if err != nil {
return nil, err
}
var asJSON map[string]interface{}
if err := json.Unmarshal(raw, &asJSON); err != nil {
return nil, err
}
opr := options.Handler
// now rehydrate the poller based on the encoded poller type
if async.CanResume(asJSON) {
opr, _ = async.New[T](pl, nil, "")
} else if body.CanResume(asJSON) {
opr, _ = body.New[T](pl, nil)
} else if loc.CanResume(asJSON) {
opr, _ = loc.New[T](pl, nil)
} else if op.CanResume(asJSON) {
opr, _ = op.New[T](pl, nil, "")
} else if opr != nil {
log.Writef(log.EventLRO, "Resuming custom poller %T.", opr)
} else {
return nil, fmt.Errorf("unhandled poller token %s", string(raw))
}
if err := json.Unmarshal(raw, &opr); err != nil {
return nil, err
}
return &Poller[T]{
op: opr,
result: result,
}, nil
}
// PollingHandler[T] abstracts the differences among poller implementations.
type PollingHandler[T any] interface {
// Done returns true if the LRO has reached a terminal state.
Done() bool
// Poll fetches the latest state of the LRO.
Poll(context.Context) (*http.Response, error)
// Result is called once the LRO has reached a terminal state. It populates the out parameter
// with the result of the operation.
Result(ctx context.Context, out *T) error
}
// Poller encapsulates a long-running operation, providing polling facilities until the operation reaches a terminal state.
type Poller[T any] struct {
op PollingHandler[T]
resp *http.Response
err error
result *T
done bool
}
// PollUntilDoneOptions contains the optional values for the Poller[T].PollUntilDone() method.
type PollUntilDoneOptions struct {
// Frequency is the time to wait between polling intervals in absence of a Retry-After header. Allowed minimum is one second.
// Pass zero to accept the default value (30s).
Frequency time.Duration
}
// PollUntilDone will poll the service endpoint until a terminal state is reached, an error is received, or the context expires.
// It internally uses Poll(), Done(), and Result() in its polling loop, sleeping for the specified duration between intervals.
// options: pass nil to accept the default values.
// NOTE: the default polling frequency is 30 seconds which works well for most operations. However, some operations might
// benefit from a shorter or longer duration.
func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOptions) (T, error) {
if options == nil {
options = &PollUntilDoneOptions{}
}
cp := *options
if cp.Frequency == 0 {
cp.Frequency = 30 * time.Second
}
if cp.Frequency < time.Second {
return *new(T), errors.New("polling frequency minimum is one second")
}
start := time.Now()
logPollUntilDoneExit := func(v interface{}) {
log.Writef(log.EventLRO, "END PollUntilDone() for %T: %v, total time: %s", p.op, v, time.Since(start))
}
log.Writef(log.EventLRO, "BEGIN PollUntilDone() for %T", p.op)
if p.resp != nil {
// initial check for a retry-after header existing on the initial response
if retryAfter := shared.RetryAfter(p.resp); retryAfter > 0 {
log.Writef(log.EventLRO, "initial Retry-After delay for %s", retryAfter.String())
if err := shared.Delay(ctx, retryAfter); err != nil {
logPollUntilDoneExit(err)
return *new(T), err
}
}
}
// begin polling the endpoint until a terminal state is reached
for {
resp, err := p.Poll(ctx)
if err != nil {
logPollUntilDoneExit(err)
return *new(T), err
}
if p.Done() {
logPollUntilDoneExit("succeeded")
return p.Result(ctx)
}
d := cp.Frequency
if retryAfter := shared.RetryAfter(resp); retryAfter > 0 {
log.Writef(log.EventLRO, "Retry-After delay for %s", retryAfter.String())
d = retryAfter
} else {
log.Writef(log.EventLRO, "delay for %s", d.String())
}
if err = shared.Delay(ctx, d); err != nil {
logPollUntilDoneExit(err)
return *new(T), err
}
}
}
// Poll fetches the latest state of the LRO. It returns an HTTP response or error.
// If Poll succeeds, the poller's state is updated and the HTTP response is returned.
// If Poll fails, the poller's state is unmodified and the error is returned.
// Calling Poll on an LRO that has reached a terminal state will return the last HTTP response.
func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
if p.Done() {
// the LRO has reached a terminal state, don't poll again
return p.resp, nil
}
resp, err := p.op.Poll(ctx)
if err != nil {
return nil, err
}
p.resp = resp
return p.resp, nil
}
// Done returns true if the LRO has reached a terminal state.
// Once a terminal state is reached, call Result().
func (p *Poller[T]) Done() bool {
return p.op.Done()
}
// Result returns the result of the LRO and is meant to be used in conjunction with Poll and Done.
// If the LRO completed successfully, a populated instance of T is returned.
// If the LRO failed or was canceled, an *azcore.ResponseError error is returned.
// Calling this on an LRO in a non-terminal state will return an error.
func (p *Poller[T]) Result(ctx context.Context) (T, error) {
if !p.Done() {
return *new(T), errors.New("poller is in a non-terminal state")
}
if p.done {
// the result has already been retrieved, return the cached value
if p.err != nil {
return *new(T), p.err
}
return *p.result, nil
}
err := p.op.Result(ctx, p.result)
var respErr *exported.ResponseError
if errors.As(err, &respErr) {
// the LRO failed. record the error
p.err = err
} else if err != nil {
// the call to Result failed, don't cache anything in this case
return *new(T), err
}
p.done = true
if p.err != nil {
return *new(T), p.err
}
return *p.result, nil
}
// ResumeToken returns a value representing the poller that can be used to resume
// the LRO at a later time. ResumeTokens are unique per service operation.
// The token's format should be considered opaque and is subject to change.
// Calling this on an LRO in a terminal state will return an error.
func (p *Poller[T]) ResumeToken() (string, error) {
if p.Done() {
return "", errors.New("poller is in a terminal state")
}
tk, err := pollers.NewResumeToken[T](p.op)
if err != nil {
return "", err
}
return tk, err
}

View file

@ -0,0 +1,225 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"encoding/xml"
"fmt"
"io"
"mime/multipart"
"reflect"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
)
// Base64Encoding is usesd to specify which base-64 encoder/decoder to use when
// encoding/decoding a slice of bytes to/from a string.
type Base64Encoding int
const (
// Base64StdFormat uses base64.StdEncoding for encoding and decoding payloads.
Base64StdFormat Base64Encoding = 0
// Base64URLFormat uses base64.RawURLEncoding for encoding and decoding payloads.
Base64URLFormat Base64Encoding = 1
)
// NewRequest creates a new policy.Request with the specified input.
func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*policy.Request, error) {
return exported.NewRequest(ctx, httpMethod, endpoint)
}
// JoinPaths concatenates multiple URL path segments into one path,
// inserting path separation characters as required. JoinPaths will preserve
// query parameters in the root path
func JoinPaths(root string, paths ...string) string {
if len(paths) == 0 {
return root
}
qps := ""
if strings.Contains(root, "?") {
splitPath := strings.Split(root, "?")
root, qps = splitPath[0], splitPath[1]
}
for i := 0; i < len(paths); i++ {
root = strings.TrimRight(root, "/")
paths[i] = strings.TrimLeft(paths[i], "/")
root += "/" + paths[i]
}
if qps != "" {
if !strings.HasSuffix(root, "/") {
root += "/"
}
return root + "?" + qps
}
return root
}
// EncodeByteArray will base-64 encode the byte slice v.
func EncodeByteArray(v []byte, format Base64Encoding) string {
if format == Base64URLFormat {
return base64.RawURLEncoding.EncodeToString(v)
}
return base64.StdEncoding.EncodeToString(v)
}
// MarshalAsByteArray will base-64 encode the byte slice v, then calls SetBody.
// The encoded value is treated as a JSON string.
func MarshalAsByteArray(req *policy.Request, v []byte, format Base64Encoding) error {
// send as a JSON string
encode := fmt.Sprintf("\"%s\"", EncodeByteArray(v, format))
return req.SetBody(exported.NopCloser(strings.NewReader(encode)), shared.ContentTypeAppJSON)
}
// MarshalAsJSON calls json.Marshal() to get the JSON encoding of v then calls SetBody.
func MarshalAsJSON(req *policy.Request, v interface{}) error {
v = cloneWithoutReadOnlyFields(v)
b, err := json.Marshal(v)
if err != nil {
return fmt.Errorf("error marshalling type %T: %s", v, err)
}
return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppJSON)
}
// MarshalAsXML calls xml.Marshal() to get the XML encoding of v then calls SetBody.
func MarshalAsXML(req *policy.Request, v interface{}) error {
b, err := xml.Marshal(v)
if err != nil {
return fmt.Errorf("error marshalling type %T: %s", v, err)
}
// inclue the XML header as some services require it
b = []byte(xml.Header + string(b))
return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppXML)
}
// SetMultipartFormData writes the specified keys/values as multi-part form
// fields with the specified value. File content must be specified as a ReadSeekCloser.
// All other values are treated as string values.
func SetMultipartFormData(req *policy.Request, formData map[string]interface{}) error {
body := bytes.Buffer{}
writer := multipart.NewWriter(&body)
for k, v := range formData {
if rsc, ok := v.(io.ReadSeekCloser); ok {
// this is the body to upload, the key is its file name
fd, err := writer.CreateFormFile(k, k)
if err != nil {
return err
}
// copy the data to the form file
if _, err = io.Copy(fd, rsc); err != nil {
return err
}
continue
}
// ensure the value is in string format
s, ok := v.(string)
if !ok {
s = fmt.Sprintf("%v", v)
}
if err := writer.WriteField(k, s); err != nil {
return err
}
}
if err := writer.Close(); err != nil {
return err
}
return req.SetBody(exported.NopCloser(bytes.NewReader(body.Bytes())), writer.FormDataContentType())
}
// SkipBodyDownload will disable automatic downloading of the response body.
func SkipBodyDownload(req *policy.Request) {
req.SetOperationValue(bodyDownloadPolicyOpValues{Skip: true})
}
// returns a clone of the object graph pointed to by v, omitting values of all read-only
// fields. if there are no read-only fields in the object graph, no clone is created.
func cloneWithoutReadOnlyFields(v interface{}) interface{} {
val := reflect.Indirect(reflect.ValueOf(v))
if val.Kind() != reflect.Struct {
// not a struct, skip
return v
}
// first walk the graph to find any R/O fields.
// if there aren't any, skip cloning the graph.
if !recursiveFindReadOnlyField(val) {
return v
}
return recursiveCloneWithoutReadOnlyFields(val)
}
// returns true if any field in the object graph of val contains the `azure:"ro"` tag value
func recursiveFindReadOnlyField(val reflect.Value) bool {
t := val.Type()
// iterate over the fields, looking for the "azure" tag.
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
aztag := field.Tag.Get("azure")
if azureTagIsReadOnly(aztag) {
return true
} else if reflect.Indirect(val.Field(i)).Kind() == reflect.Struct && recursiveFindReadOnlyField(reflect.Indirect(val.Field(i))) {
return true
}
}
return false
}
// clones the object graph of val. all non-R/O properties are copied to the clone
func recursiveCloneWithoutReadOnlyFields(val reflect.Value) interface{} {
t := val.Type()
clone := reflect.New(t)
// iterate over the fields, looking for the "azure" tag.
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
aztag := field.Tag.Get("azure")
if azureTagIsReadOnly(aztag) {
// omit from payload
continue
}
// clone field will receive the same value as the source field...
value := val.Field(i)
v := reflect.Indirect(value)
if v.IsValid() && v.Type() != reflect.TypeOf(time.Time{}) && v.Kind() == reflect.Struct {
// ...unless the source value is a struct, in which case we recurse to clone that struct.
// (We can't recursively clone time.Time because it contains unexported fields.)
c := recursiveCloneWithoutReadOnlyFields(v)
if field.Anonymous {
// NOTE: this does not handle the case of embedded fields of unexported struct types.
// this should be ok as we don't generate any code like this at present
value = reflect.Indirect(reflect.ValueOf(c))
} else {
value = reflect.ValueOf(c)
}
}
reflect.Indirect(clone).Field(i).Set(value)
}
return clone.Interface()
}
// returns true if the "azure" tag contains the option "ro"
func azureTagIsReadOnly(tag string) bool {
if tag == "" {
return false
}
parts := strings.Split(tag, ",")
for _, part := range parts {
if part == "ro" {
return true
}
}
return false
}

View file

@ -0,0 +1,137 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"bytes"
"encoding/base64"
"encoding/json"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
)
// Payload reads and returns the response body or an error.
// On a successful read, the response body is cached.
// Subsequent reads will access the cached value.
func Payload(resp *http.Response) ([]byte, error) {
return exported.Payload(resp)
}
// HasStatusCode returns true if the Response's status code is one of the specified values.
func HasStatusCode(resp *http.Response, statusCodes ...int) bool {
return exported.HasStatusCode(resp, statusCodes...)
}
// UnmarshalAsByteArray will base-64 decode the received payload and place the result into the value pointed to by v.
func UnmarshalAsByteArray(resp *http.Response, v *[]byte, format Base64Encoding) error {
p, err := Payload(resp)
if err != nil {
return err
}
return DecodeByteArray(string(p), v, format)
}
// UnmarshalAsJSON calls json.Unmarshal() to unmarshal the received payload into the value pointed to by v.
func UnmarshalAsJSON(resp *http.Response, v interface{}) error {
payload, err := Payload(resp)
if err != nil {
return err
}
// TODO: verify early exit is correct
if len(payload) == 0 {
return nil
}
err = removeBOM(resp)
if err != nil {
return err
}
err = json.Unmarshal(payload, v)
if err != nil {
err = fmt.Errorf("unmarshalling type %T: %s", v, err)
}
return err
}
// UnmarshalAsXML calls xml.Unmarshal() to unmarshal the received payload into the value pointed to by v.
func UnmarshalAsXML(resp *http.Response, v interface{}) error {
payload, err := Payload(resp)
if err != nil {
return err
}
// TODO: verify early exit is correct
if len(payload) == 0 {
return nil
}
err = removeBOM(resp)
if err != nil {
return err
}
err = xml.Unmarshal(payload, v)
if err != nil {
err = fmt.Errorf("unmarshalling type %T: %s", v, err)
}
return err
}
// Drain reads the response body to completion then closes it. The bytes read are discarded.
func Drain(resp *http.Response) {
if resp != nil && resp.Body != nil {
_, _ = io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}
}
// removeBOM removes any byte-order mark prefix from the payload if present.
func removeBOM(resp *http.Response) error {
payload, err := Payload(resp)
if err != nil {
return err
}
// UTF8
trimmed := bytes.TrimPrefix(payload, []byte("\xef\xbb\xbf"))
if len(trimmed) < len(payload) {
resp.Body.(shared.BytesSetter).Set(trimmed)
}
return nil
}
// DecodeByteArray will base-64 decode the provided string into v.
func DecodeByteArray(s string, v *[]byte, format Base64Encoding) error {
if len(s) == 0 {
return nil
}
payload := string(s)
if payload[0] == '"' {
// remove surrounding quotes
payload = payload[1 : len(payload)-1]
}
switch format {
case Base64StdFormat:
decoded, err := base64.StdEncoding.DecodeString(payload)
if err == nil {
*v = decoded
return nil
}
return err
case Base64URLFormat:
// use raw encoding as URL format should not contain any '=' characters
decoded, err := base64.RawURLEncoding.DecodeString(payload)
if err == nil {
*v = decoded
return nil
}
return err
default:
return fmt.Errorf("unrecognized byte array format: %d", format)
}
}

View file

@ -0,0 +1,37 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"crypto/tls"
"net"
"net/http"
"time"
)
var defaultHTTPClient *http.Client
func init() {
defaultTransport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
ForceAttemptHTTP2: true,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
TLSClientConfig: &tls.Config{
MinVersion: tls.VersionTLS12,
},
}
defaultHTTPClient = &http.Client{
Transport: defaultTransport,
}
}

View file

@ -0,0 +1,9 @@
//go:build go1.18
// +build go1.18
// Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT
// license that can be found in the LICENSE file.
// Package streaming contains helpers for streaming IO operations and progress reporting.
package streaming

View file

@ -0,0 +1,72 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package streaming
import (
"io"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
)
type progress struct {
rc io.ReadCloser
rsc io.ReadSeekCloser
pr func(bytesTransferred int64)
offset int64
}
// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker.
func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser {
return exported.NopCloser(rs)
}
// NewRequestProgress adds progress reporting to an HTTP request's body stream.
func NewRequestProgress(body io.ReadSeekCloser, pr func(bytesTransferred int64)) io.ReadSeekCloser {
return &progress{
rc: body,
rsc: body,
pr: pr,
offset: 0,
}
}
// NewResponseProgress adds progress reporting to an HTTP response's body stream.
func NewResponseProgress(body io.ReadCloser, pr func(bytesTransferred int64)) io.ReadCloser {
return &progress{
rc: body,
rsc: nil,
pr: pr,
offset: 0,
}
}
// Read reads a block of data from an inner stream and reports progress
func (p *progress) Read(b []byte) (n int, err error) {
n, err = p.rc.Read(b)
if err != nil && err != io.EOF {
return
}
p.offset += int64(n)
// Invokes the user's callback method to report progress
p.pr(p.offset)
return
}
// Seek only expects a zero or from beginning.
func (p *progress) Seek(offset int64, whence int) (int64, error) {
// This should only ever be called with offset = 0 and whence = io.SeekStart
n, err := p.rsc.Seek(offset, whence)
if err == nil {
p.offset = int64(n)
}
return n, err
}
// requestBodyProgress supports Close but the underlying stream may not; if it does, Close will close it.
func (p *progress) Close() error {
return p.rc.Close()
}

View file

@ -0,0 +1,9 @@
//go:build go1.18
// +build go1.18
// Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT
// license that can be found in the LICENSE file.
// Package to contains various type-conversion helper functions.
package to

View file

@ -0,0 +1,21 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package to
// Ptr returns a pointer to the provided value.
func Ptr[T any](v T) *T {
return &v
}
// SliceOfPtrs returns a slice of *T from the specified values.
func SliceOfPtrs[T any](vv ...T) []*T {
slc := make([]*T, len(vv))
for i := range vv {
slc[i] = Ptr(vv[i])
}
return slc
}

View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) Microsoft Corporation.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE

View file

@ -0,0 +1,51 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package diag
import (
"fmt"
"runtime"
"strings"
)
// Caller returns the file and line number of a frame on the caller's stack.
// If the funtion fails an empty string is returned.
// skipFrames - the number of frames to skip when determining the caller.
// Passing a value of 0 will return the immediate caller of this function.
func Caller(skipFrames int) string {
if pc, file, line, ok := runtime.Caller(skipFrames + 1); ok {
// the skipFrames + 1 is to skip ourselves
frame := runtime.FuncForPC(pc)
return fmt.Sprintf("%s()\n\t%s:%d", frame.Name(), file, line)
}
return ""
}
// StackTrace returns a formatted stack trace string.
// If the funtion fails an empty string is returned.
// skipFrames - the number of stack frames to skip before composing the trace string.
// totalFrames - the maximum number of stack frames to include in the trace string.
func StackTrace(skipFrames, totalFrames int) string {
pcCallers := make([]uintptr, totalFrames)
if frames := runtime.Callers(skipFrames, pcCallers); frames == 0 {
return ""
}
frames := runtime.CallersFrames(pcCallers)
sb := strings.Builder{}
for {
frame, more := frames.Next()
sb.WriteString(frame.Function)
sb.WriteString("()\n\t")
sb.WriteString(frame.File)
sb.WriteRune(':')
sb.WriteString(fmt.Sprintf("%d\n", frame.Line))
if !more {
break
}
}
return sb.String()
}

View file

@ -0,0 +1,7 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package diag

View file

@ -0,0 +1,7 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package errorinfo

View file

@ -0,0 +1,16 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package errorinfo
// NonRetriable represents a non-transient error. This works in
// conjunction with the retry policy, indicating that the error condition
// is idempotent, so no retries will be attempted.
// Use errors.As() to access this interface in the error chain.
type NonRetriable interface {
error
NonRetriable()
}

View file

@ -0,0 +1,7 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package log

View file

@ -0,0 +1,104 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package log
import (
"fmt"
"os"
"time"
)
///////////////////////////////////////////////////////////////////////////////////////////////////
// NOTE: The following are exported as public surface area from azcore. DO NOT MODIFY
///////////////////////////////////////////////////////////////////////////////////////////////////
// Event is used to group entries. Each group can be toggled on or off.
type Event string
// SetEvents is used to control which events are written to
// the log. By default all log events are writen.
func SetEvents(cls ...Event) {
log.cls = cls
}
// SetListener will set the Logger to write to the specified listener.
func SetListener(lst func(Event, string)) {
log.lst = lst
}
///////////////////////////////////////////////////////////////////////////////////////////////////
// END PUBLIC SURFACE AREA
///////////////////////////////////////////////////////////////////////////////////////////////////
// Should returns true if the specified log event should be written to the log.
// By default all log events will be logged. Call SetEvents() to limit
// the log events for logging.
// If no listener has been set this will return false.
// Calling this method is useful when the message to log is computationally expensive
// and you want to avoid the overhead if its log event is not enabled.
func Should(cls Event) bool {
if log.lst == nil {
return false
}
if log.cls == nil || len(log.cls) == 0 {
return true
}
for _, c := range log.cls {
if c == cls {
return true
}
}
return false
}
// Write invokes the underlying listener with the specified event and message.
// If the event shouldn't be logged or there is no listener then Write does nothing.
func Write(cls Event, message string) {
if !Should(cls) {
return
}
log.lst(cls, message)
}
// Writef invokes the underlying listener with the specified event and formatted message.
// If the event shouldn't be logged or there is no listener then Writef does nothing.
func Writef(cls Event, format string, a ...interface{}) {
if !Should(cls) {
return
}
log.lst(cls, fmt.Sprintf(format, a...))
}
// TestResetEvents is used for TESTING PURPOSES ONLY.
func TestResetEvents() {
log.cls = nil
}
// logger controls which events to log and writing to the underlying log.
type logger struct {
cls []Event
lst func(Event, string)
}
// the process-wide logger
var log logger
func init() {
initLogging()
}
// split out for testing purposes
func initLogging() {
if cls := os.Getenv("AZURE_SDK_GO_LOGGING"); cls == "all" {
// cls could be enhanced to support a comma-delimited list of log events
log.lst = func(cls Event, msg string) {
// simple console logger, it writes to stderr in the following format:
// [time-stamp] Event: message
fmt.Fprintf(os.Stderr, "[%s] %s: %s\n", time.Now().Format(time.StampMicro), cls, msg)
}
}
}

View file

@ -0,0 +1,120 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package temporal
import (
"sync"
"time"
)
// AcquireResource abstracts a method for refreshing a temporal resource.
type AcquireResource[TResource, TState any] func(state TState) (newResource TResource, newExpiration time.Time, err error)
// Resource is a temporal resource (usually a credential) that requires periodic refreshing.
type Resource[TResource, TState any] struct {
// cond is used to synchronize access to the shared resource embodied by the remaining fields
cond *sync.Cond
// acquiring indicates that some thread/goroutine is in the process of acquiring/updating the resource
acquiring bool
// resource contains the value of the shared resource
resource TResource
// expiration indicates when the shared resource expires; it is 0 if the resource was never acquired
expiration time.Time
// lastAttempt indicates when a thread/goroutine last attempted to acquire/update the resource
lastAttempt time.Time
// acquireResource is the callback function that actually acquires the resource
acquireResource AcquireResource[TResource, TState]
}
// NewResource creates a new Resource that uses the specified AcquireResource for refreshing.
func NewResource[TResource, TState any](ar AcquireResource[TResource, TState]) *Resource[TResource, TState] {
return &Resource[TResource, TState]{cond: sync.NewCond(&sync.Mutex{}), acquireResource: ar}
}
// Get returns the underlying resource.
// If the resource is fresh, no refresh is performed.
func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) {
// If the resource is expiring within this time window, update it eagerly.
// This allows other threads/goroutines to keep running by using the not-yet-expired
// resource value while one thread/goroutine updates the resource.
const window = 5 * time.Minute // This example updates the resource 5 minutes prior to expiration
const backoff = 30 * time.Second // Minimum wait time between eager update attempts
now, acquire, expired, resource := time.Now(), false, false, er.resource
// acquire exclusive lock
er.cond.L.Lock()
for {
expired = er.expiration.IsZero() || er.expiration.Before(now)
if expired {
// The resource was never acquired or has expired
if !er.acquiring {
// If another thread/goroutine is not acquiring/updating the resource, this thread/goroutine will do it
er.acquiring, acquire = true, true
break
}
// Getting here means that this thread/goroutine will wait for the updated resource
} else if er.expiration.Add(-window).Before(now) {
// The resource is valid but is expiring within the time window
if !er.acquiring && er.lastAttempt.Add(backoff).Before(now) {
// If another thread/goroutine is not acquiring/renewing the resource, and none has attempted
// to do so within the last 30 seconds, this thread/goroutine will do it
er.acquiring, acquire = true, true
break
}
// This thread/goroutine will use the existing resource value while another updates it
resource = er.resource
break
} else {
// The resource is not close to expiring, this thread/goroutine should use its current value
resource = er.resource
break
}
// If we get here, wait for the new resource value to be acquired/updated
er.cond.Wait()
}
er.cond.L.Unlock() // Release the lock so no threads/goroutines are blocked
var err error
if acquire {
// This thread/goroutine has been selected to acquire/update the resource
var expiration time.Time
var newValue TResource
er.lastAttempt = now
newValue, expiration, err = er.acquireResource(state)
// Atomically, update the shared resource's new value & expiration.
er.cond.L.Lock()
if err == nil {
// Update resource & expiration, return the new value
resource = newValue
er.resource, er.expiration = resource, expiration
} else if !expired {
// An eager update failed. Discard the error and return the current--still valid--resource value
err = nil
}
er.acquiring = false // Indicate that no thread/goroutine is currently acquiring the resource
// Wake up any waiting threads/goroutines since there is a resource they can ALL use
er.cond.L.Unlock()
er.cond.Broadcast()
}
return resource, err // Return the resource this thread/goroutine can use
}
// Expire marks the resource as expired, ensuring it's refreshed on the next call to Get().
func (er *Resource[TResource, TState]) Expire() {
er.cond.L.Lock()
defer er.cond.L.Unlock()
// Reset the expiration as if we never got this resource to begin with
er.expiration = time.Time{}
}

View file

@ -0,0 +1,7 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package uuid

View file

@ -0,0 +1,76 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package uuid
import (
"crypto/rand"
"errors"
"fmt"
"strconv"
)
// The UUID reserved variants.
const (
reservedRFC4122 byte = 0x40
)
// A UUID representation compliant with specification in RFC4122 document.
type UUID [16]byte
// New returns a new UUID using the RFC4122 algorithm.
func New() (UUID, error) {
u := UUID{}
// Set all bits to pseudo-random values.
// NOTE: this takes a process-wide lock
_, err := rand.Read(u[:])
if err != nil {
return u, err
}
u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122)
var version byte = 4
u[6] = (u[6] & 0xF) | (version << 4) // u.setVersion(4)
return u, nil
}
// String returns the UUID in "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" format.
func (u UUID) String() string {
return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:])
}
// Parse parses a string formatted as "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
// or "{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}" into a UUID.
func Parse(s string) (UUID, error) {
var uuid UUID
// ensure format
switch len(s) {
case 36:
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 38:
// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
s = s[1:37]
default:
return uuid, errors.New("invalid UUID format")
}
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
return uuid, errors.New("invalid UUID format")
}
// parse chunks
for i, x := range [16]int{
0, 2, 4, 6,
9, 11,
14, 16,
19, 21,
24, 26, 28, 30, 32, 34} {
b, err := strconv.ParseUint(s[x:x+2], 16, 8)
if err != nil {
return uuid, fmt.Errorf("invalid UUID format: %s", err)
}
uuid[i] = byte(b)
}
return uuid, nil
}

View file

@ -0,0 +1,54 @@
# Release History
## 0.4.1 (2022-05-12)
### Other Changes
* Updated to latest `azcore` and `internal` modules
## 0.4.0 (2022-04-19)
### Breaking Changes
* Fixed Issue #17150 : Renaming/refactoring high level methods.
* Fixed Issue #16972 : Constructors should return clients by reference.
* Renaming the options bags to match the naming convention same as that of response. The behaviour of options bags remains the same.
### Bugs Fixed
* Fixed Issue #17515 : SetTags options bag missing leaseID.
* Fixed Issue #17423 : Drop "Type" suffix from `GeoReplicationStatusType`.
* Fixed Issue #17335 : Nil pointer exception when passing nil options bag in `ListBlobsFlat` API call.
* Fixed Issue #17188 : `BlobURLParts` not supporting VersionID
* Fixed Issue #17152 , Issue #17131 , Issue #17061 : `UploadStreamToBlockBlob` / `UploadStreamToBlockBlob` methods ignoring the options bag.
* Fixed Issue #16920 : Fixing error handling example.
* Fixed Issue #16786 : Refactoring of autorest code generation definition and adding necessary transformations.
* Fixed Issue #16679 : Response parsing issue in List blobs API.
## 0.3.0 (2022-02-09)
### Breaking Changes
* Updated to latest `azcore`. Public surface area is unchanged.
* [#16978](https://github.com/Azure/azure-sdk-for-go/pull/16978): The `DownloadResponse.Body` parameter is
now `*RetryReaderOptions`.
### Bugs Fixed
* Fixed Issue #16193 : `azblob.GetSASToken` wrong signed resource.
* Fixed Issue #16223 : `HttpRange` does not expose its fields.
* Fixed Issue #16254 : Issue passing reader to upload `BlockBlobClient`
* Fixed Issue #16295 : Problem with listing blobs by using of `ListBlobsHierarchy()`
* Fixed Issue #16542 : Empty `StorageError` in the Azurite environment
* Fixed Issue #16679 : Unable to access Metadata when listing blobs
* Fixed Issue #16816 : `ContainerClient.GetSASToken` doesn't allow list permission.
* Fixed Issue #16988 : Too many arguments in call to `runtime.NewResponseError`
## 0.2.0 (2021-11-03)
### Breaking Changes
* Clients now have one constructor per authentication method
## 0.1.0 (2021-09-13)
### Features Added
* This is the initial preview release of the `azblob` library

View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) Microsoft Corporation. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE

View file

@ -0,0 +1,397 @@
# Azure Blob Storage SDK for Go
## Introduction
The Microsoft Azure Storage SDK for Go allows you to build applications that takes advantage of Azure's scalable cloud
storage. This is the new beta client module for Azure Blob Storage, which follows
our [Azure SDK Design Guidelines for Go](https://azure.github.io/azure-sdk/golang_introduction.html) and replaces the
previous beta [azblob package](https://github.com/azure/azure-storage-blob-go).
## Getting Started
The Azure Blob SDK can access an Azure Storage account.
### Prerequisites
* Go versions 1.18 or higher
* You must have an [Azure storage account][azure_storage_account]. If you need to create one, you can use
the [Azure Cloud Shell](https://shell.azure.com/bash) to create one with these commands (replace `my-resource-group`
and `mystorageaccount` with your own unique names):
(Optional) if you want a new resource group to hold the Storage Account:
```
az group create --name my-resource-group --location westus2
```
Create the storage account:
```
az storage account create --resource-group my-resource-group --name mystorageaccount
```
The storage account name can be queried with:
```
az storage account show -n mystorageaccount -g my-resource-group --query "primaryEndpoints.blob"
```
You can set this as an environment variable with:
```bash
# PowerShell
$ENV:AZURE_STORAGE_ACCOUNT_NAME="mystorageaccount"
# bash
export AZURE_STORAGE_ACCOUNT_NAME="mystorageaccount"
```
Query your storage account keys:
```
az storage account keys list --resource-group my-resource-group -n mystorageaccount
```
Output:
```json
[
{
"creationTime": "2022-02-07T17:18:44.088870+00:00",
"keyName": "key1",
"permissions": "FULL",
"value": "..."
},
{
"creationTime": "2022-02-07T17:18:44.088870+00:00",
"keyName": "key2",
"permissions": "FULL",
"value": "..."
}
]
```
```bash
# PowerShell
$ENV:AZURE_STORAGE_ACCOUNT_KEY="<mystorageaccountkey>"
# Bash
export AZURE_STORAGE_ACCOUNT_KEY="<mystorageaccountkey>"
```
> You can obtain your account key from the Azure Portal under the "Access Keys" section on the left-hand pane of your storage account.
#### Create account
* To create a new Storage account, you can use [Azure Portal][azure_portal_create_account]
, [Azure PowerShell][azure_powershell_create_account], or [Azure CLI][azure_cli_create_account].
### Install the package
* Install the Azure Blob Storage client module for Go with `go get`:
```bash
go get github.com/Azure/azure-sdk-for-go/sdk/storage/azblob
```
> Optional: If you are going to use AAD authentication, install the `azidentity` package:
```bash
go get github.com/Azure/azure-sdk-for-go/sdk/azidentity
```
#### Create the client
`azblob` allows you to interact with three types of resources :-
* [Azure storage accounts][azure_storage_account].
* [Containers](https://azure.microsoft.com/en-in/overview/what-is-a-container/#overview) within those storage accounts.
* [Blobs](https://azure.microsoft.com/en-in/services/storage/blobs/#overview) (block blobs/ page blobs/ append blobs)
within those containers.
Interaction with these resources starts with an instance of a [client](#clients). To create a client object, you will
need the account's blob service endpoint URL and a credential that allows you to access the account. The `endpoint` can
be found on the page for your storage account in the [Azure Portal][azure_portal_account_url] under the "Access Keys"
section or by running the following Azure CLI command:
```bash
# Get the blob service URL for the account
az storage account show -n mystorageaccount -g my-resource-group --query "primaryEndpoints.blob"
```
Once you have the account URL, it can be used to create the service client:
```golang
cred, err := azblob.NewSharedKeyCredential("myAccountName", "myAccountKey")
handle(err)
serviceClient, err := azblob.NewServiceClientWithSharedKey("https://<myAccountName>.blob.core.windows.net/", cred, nil)
handle(err)
```
For more information about blob service URL's and how to configure custom domain names for Azure Storage check out
the [official documentation][azure_portal_account_url]
#### Types of credentials
The azblob clients support authentication via Shared Key Credential, Connection String, Shared Access Signature, or any
of the `azidentity` types that implement the `azcore.TokenCredential` interface.
##### 1. Creating the client from a shared key
To use an account [shared key][azure_shared_key] (aka account key or access key), provide the key as a string. This can
be found in your storage account in the [Azure Portal][azure_portal_account_url] under the "Access Keys" section or by
running the following Azure CLI command:
```bash
az storage account keys list -g my-resource-group -n mystorageaccount
```
Use Shared Key authentication as the credential parameter to authenticate the client:
```golang
credential, err := azblob.NewSharedKeyCredential("accountName", "accountKey")
handle(err)
serviceClient, err := azblob.NewServiceClientWithSharedKey("https://<myAccountName>.blob.core.windows.net/", credential, nil)
handle(err)
```
##### 2. Creating the client from a connection string
You can use connection string, instead of providing the account URL and credential separately, for authentication as
well. To do this, pass the connection string to the client's `NewServiceClientFromConnectionString` method. The
connection string can be found in your storage account in the [Azure Portal][azure_portal_account_url] under the "Access
Keys" section or with the following Azure CLI command:
```bash
az storage account show-connection-string -g my-resource-group -n mystorageaccount
```
```golang
connStr := "DefaultEndpointsProtocol=https;AccountName=<myAccountName>;AccountKey=<myAccountKey>;EndpointSuffix=core.windows.net"
serviceClient, err := azblob.NewServiceClientFromConnectionString(connStr, nil)
```
##### 3. Creating the client from a SAS token
To use a [shared access signature (SAS) token][azure_sas_token], provide the token as a string. You can generate a SAS
token from the Azure Portal
under [Shared access signature](https://docs.microsoft.com/rest/api/storageservices/create-service-sas) or use
the `ServiceClient.GetSASToken` or `ContainerClient.GetSASToken()` methods.
```golang
credential, err := azblob.NewSharedKeyCredential("accountName", "accountKey")
handle(err)
serviceClient, err := azblob.NewServiceClientWithSharedKey(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), credential, nil)
handle(err)
// Provide the convenience function with relevant info (services, resource types, permissions, and duration)
// The SAS token will be valid from this moment onwards.
accountSAS, err := serviceClient.GetSASToken(AccountSASResourceTypes{Object: true, Service: true, Container: true},
AccountSASPermissions{Read: true, List: true}, AccountSASServices{Blob: true}, time.Now(), time.Now().Add(48*time.Hour))
handle(err)
sasURL := fmt.Sprintf("https://%s.blob.core.windows.net/?%s", accountName, accountSAS)
// The sasURL can be used to authenticate a client without need for a credential
serviceClient, err = NewServiceClientWithNoCredential(sasURL, nil)
handle(err)
```
### Clients
Three different clients are provided to interact with the various components of the Blob Service:
1. **`ServiceClient`**
* Get and set account settings.
* Query, create, and delete containers within the account.
2. **`ContainerClient`**
* Get and set container access settings, properties, and metadata.
* Create, delete, and query blobs within the container.
* `ContainerLeaseClient` to support container lease management.
3. **`BlobClient`**
* `AppendBlobClient`, `BlockBlobClient`, and `PageBlobClient`
* Get and set blob properties.
* Perform CRUD operations on a given blob.
* `BlobLeaseClient` to support blob lease management.
### Example
```go
// Use your storage account's name and key to create a credential object, used to access your account.
// You can obtain these details from the Azure Portal.
accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME")
if !ok {
handle(errors.New("AZURE_STORAGE_ACCOUNT_NAME could not be found"))
}
accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY")
if !ok {
handle(errors.New("AZURE_STORAGE_ACCOUNT_KEY could not be found"))
}
cred, err := NewSharedKeyCredential(accountName, accountKey)
handle(err)
// Open up a service client.
// You'll need to specify a service URL, which for blob endpoints usually makes up the syntax http(s)://<account>.blob.core.windows.net/
service, err := NewServiceClientWithSharedKey(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), cred, nil)
handle(err)
// All operations in the Azure Blob Storage SDK for Go operate on a context.Context, allowing you to control cancellation/timeout.
ctx := context.Background() // This example has no expiry.
// This example showcases several common operations to help you get started, such as:
// ===== 1. Creating a container =====
// First, branch off of the service client and create a container client.
container := service.NewContainerClient("mycontainer")
// Then, fire off a create operation on the container client.
// Note that, all service-side requests have an options bag attached, allowing you to specify things like metadata, public access types, etc.
// Specifying nil omits all options.
_, err = container.Create(ctx, nil)
handle(err)
// ===== 2. Uploading/downloading a block blob =====
// We'll specify our data up-front, rather than reading a file for simplicity's sake.
data := "Hello world!"
// Branch off of the container into a block blob client
blockBlob := container.NewBlockBlobClient("HelloWorld.txt")
// Upload data to the block blob
_, err = blockBlob.Upload(ctx, NopCloser(strings.NewReader(data)), nil)
handle(err)
// Download the blob's contents and ensure that the download worked properly
get, err := blockBlob.Download(ctx, nil)
handle(err)
// Open a buffer, reader, and then download!
downloadedData := &bytes.Buffer{}
// RetryReaderOptions has a lot of in-depth tuning abilities, but for the sake of simplicity, we'll omit those here.
reader := get.Body(RetryReaderOptions{})
_, err = downloadedData.ReadFrom(reader)
handle(err)
err = reader.Close()
handle(err)
if data != downloadedData.String() {
handle(errors.New("downloaded data doesn't match uploaded data"))
}
// ===== 3. list blobs =====
// The ListBlobs and ListContainers APIs return two channels, a values channel, and an errors channel.
// You should enumerate on a range over the values channel, and then check the errors channel, as only ONE value will ever be passed to the errors channel.
// The AutoPagerTimeout defines how long it will wait to place into the items channel before it exits & cleans itself up. A zero time will result in no timeout.
pager := container.ListBlobsFlat(nil)
for pager.NextPage(ctx) {
resp := pager.PageResponse()
for _, v := range resp.ContainerListBlobFlatSegmentResult.Segment.BlobItems {
fmt.Println(*v.Name)
}
}
if err = pager.Err(); err != nil {
handle(err)
}
// Delete the blob we created earlier.
_, err = blockBlob.Delete(ctx, nil)
handle(err)
// Delete the container we created earlier.
_, err = container.Delete(ctx, nil)
handle(err)
```
## Troubleshooting
### Error Handling
All I/O operations will return an `error` that can be investigated to discover more information about the error. In
addition, you can investigate the raw response of any response object:
```golang
var storageErr *azblob.StorageError
resp, err := serviceClient.CreateContainer(context.Background(), "testcontainername", nil)
if err != nil && errors.As(err, &storageErr) {
// do something with storageErr.Response()
}
```
### Logging
This module uses the classification based logging implementation in azcore. To turn on logging
set `AZURE_SDK_GO_LOGGING` to `all`.
If you only want to include logs for `azblob`, you must create your own logger and set the log classification
as `LogCredential`.
To obtain more detailed logging, including request/response bodies and header values, make sure to leave the logger as
default or enable the `LogRequest` and/or `LogResponse` classificatons. A logger that only includes credential logs can
be like the following:
```golang
import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log"
// Set log to output to the console
azlog.SetListener(func (cls azlog.Classification, msg string) {
fmt.Println(msg) // printing log out to the console
})
// Includes only requests and responses in credential logs
azlog.SetClassifications(azlog.Request, azlog.Response)
```
> CAUTION: logs from credentials contain sensitive information.
> These logs must be protected to avoid compromising account security.
>
## License
This project is licensed under MIT.
## Provide Feedback
If you encounter bugs or have suggestions, please
[open an issue](https://github.com/Azure/azure-sdk-for-go/issues) and assign the `Azure.AzBlob` label.
## Contributing
This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License
Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For
details, visit https://cla.microsoft.com.
When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate
the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to
do this once across all repos using our CLA.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
<!-- LINKS -->
[azure_subscription]:https://azure.microsoft.com/free/
[azure_storage_account]:https://docs.microsoft.com/azure/storage/common/storage-account-create?tabs=azure-portal
[azure_portal_create_account]:https://docs.microsoft.com/azure/storage/common/storage-account-create?tabs=azure-portal
[azure_powershell_create_account]:https://docs.microsoft.com/azure/storage/common/storage-account-create?tabs=azure-powershell
[azure_cli_create_account]: https://docs.microsoft.com/azure/storage/common/storage-account-create?tabs=azure-cli
[azure_cli_account_url]:https://docs.microsoft.com/cli/azure/storage/account?view=azure-cli-latest#az-storage-account-show
[azure_powershell_account_url]:https://docs.microsoft.com/powershell/module/az.storage/get-azstorageaccount?view=azps-4.6.1
[azure_portal_account_url]:https://docs.microsoft.com/azure/storage/common/storage-account-overview#storage-account-endpoints
[azure_sas_token]:https://docs.microsoft.com/azure/storage/common/storage-sas-overview
[azure_shared_key]:https://docs.microsoft.com/rest/api/storageservices/authorize-with-shared-key
[azure_core_ref_docs]:https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore
[azure_core_readme]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azcore/README.md
[blobs_error_codes]: https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-error-codes
[msft_oss_coc]:https://opensource.microsoft.com/codeofconduct/
[msft_oss_coc_faq]:https://opensource.microsoft.com/codeofconduct/faq/
[contact_msft_oss]:mailto:opencode@microsoft.com
[blobs_rest]: https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-rest-api

View file

@ -0,0 +1,171 @@
# Code Generation - Azure Blob SDK for Golang
<!-- autorest --use=@autorest/go@4.0.0-preview.35 https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/storage/data-plane/Microsoft.BlobStorage/preview/2020-10-02/blob.json --file-prefix="zz_generated_" --modelerfour.lenient-model-deduplication --license-header=MICROSOFT_MIT_NO_VERSION --output-folder=generated/ --module=azblob --openapi-type="data-plane" --credential-scope=none -->
```bash
cd swagger
autorest autorest.md
gofmt -w generated/*
```
### Settings
```yaml
go: true
clear-output-folder: false
version: "^3.0.0"
license-header: MICROSOFT_MIT_NO_VERSION
input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/storage/data-plane/Microsoft.BlobStorage/preview/2020-10-02/blob.json"
module: "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
credential-scope: "https://storage.azure.com/.default"
output-folder: internal/
file-prefix: "zz_generated_"
openapi-type: "data-plane"
verbose: true
security: AzureKey
module-version: "0.3.0"
modelerfour:
group-parameters: false
seal-single-value-enum-by-default: true
lenient-model-deduplication: true
export-clients: false
use: "@autorest/go@4.0.0-preview.36"
```
### Fix BlobMetadata.
``` yaml
directive:
- from: swagger-document
where: $.definitions
transform: >
delete $.BlobMetadata["properties"];
```
### Don't include container name or blob in path - we have direct URIs.
``` yaml
directive:
- from: swagger-document
where: $["x-ms-paths"]
transform: >
for (const property in $)
{
if (property.includes('/{containerName}/{blob}'))
{
$[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName") && false == param['$ref'].endsWith("#/parameters/Blob"))});
}
else if (property.includes('/{containerName}'))
{
$[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName"))});
}
}
```
### Remove DataLake stuff.
``` yaml
directive:
- from: swagger-document
where: $["x-ms-paths"]
transform: >
for (const property in $)
{
if (property.includes('filesystem'))
{
delete $[property];
}
}
```
### Remove DataLakeStorageError
``` yaml
directive:
- from: swagger-document
where: $.definitions
transform: >
delete $.DataLakeStorageError;
```
### Fix 304s
``` yaml
directive:
- from: swagger-document
where: $["x-ms-paths"]["/{containerName}/{blob}"]
transform: >
$.get.responses["304"] = {
"description": "The condition specified using HTTP conditional header(s) is not met.",
"x-az-response-name": "ConditionNotMetError",
"headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }
};
```
### Fix GeoReplication
``` yaml
directive:
- from: swagger-document
where: $.definitions
transform: >
delete $.GeoReplication.properties.Status["x-ms-enum"];
$.GeoReplication.properties.Status["x-ms-enum"] = {
"name": "BlobGeoReplicationStatus",
"modelAsString": false
};
```
### Fix RehydratePriority
``` yaml
directive:
- from: swagger-document
where: $.definitions
transform: >
delete $.RehydratePriority["x-ms-enum"];
$.RehydratePriority["x-ms-enum"] = {
"name": "RehydratePriority",
"modelAsString": false
};
```
### Fix BlobDeleteType
``` yaml
directive:
- from: swagger-document
where: $.parameters
transform: >
delete $.BlobDeleteType.enum;
$.BlobDeleteType.enum = [
"None",
"Permanent"
];
```
### Fix EncryptionAlgorithm
``` yaml
directive:
- from: swagger-document
where: $.parameters
transform: >
delete $.EncryptionAlgorithm.enum;
$.EncryptionAlgorithm.enum = [
"None",
"AES256"
];
```
### Fix XML string "ObjectReplicationMetadata" to "OrMetadata"
``` yaml
directive:
- from: swagger-document
where: $.definitions
transform: >
$.BlobItemInternal.properties["OrMetadata"] = $.BlobItemInternal.properties["ObjectReplicationMetadata"];
delete $.BlobItemInternal.properties["ObjectReplicationMetadata"];
```

View file

@ -0,0 +1,30 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"errors"
)
type bytesWriter []byte
func newBytesWriter(b []byte) bytesWriter {
return b
}
func (c bytesWriter) WriteAt(b []byte, off int64) (int, error) {
if off >= int64(len(c)) || off < 0 {
return 0, errors.New("offset value is out of range")
}
n := copy(c[int(off):], b)
if n < len(b) {
return n, errors.New("not enough space for all bytes")
}
return n, nil
}

View file

@ -0,0 +1,231 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"bytes"
"context"
"encoding/base64"
"encoding/binary"
"errors"
"fmt"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal"
"io"
"sync"
"sync/atomic"
"github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
)
// blockWriter provides methods to upload blocks that represent a file to a server and commit them.
// This allows us to provide a local implementation that fakes the server for hermetic testing.
type blockWriter interface {
StageBlock(context.Context, string, io.ReadSeekCloser, *BlockBlobStageBlockOptions) (BlockBlobStageBlockResponse, error)
CommitBlockList(context.Context, []string, *BlockBlobCommitBlockListOptions) (BlockBlobCommitBlockListResponse, error)
}
// copyFromReader copies a source io.Reader to blob storage using concurrent uploads.
// TODO(someone): The existing model provides a buffer size and buffer limit as limiting factors. The buffer size is probably
// useless other than needing to be above some number, as the network stack is going to hack up the buffer over some size. The
// max buffers is providing a cap on how much memory we use (by multiplying it times the buffer size) and how many go routines can upload
// at a time. I think having a single max memory dial would be more efficient. We can choose an internal buffer size that works
// well, 4 MiB or 8 MiB, and auto-scale to as many goroutines within the memory limit. This gives a single dial to tweak and we can
// choose a max value for the memory setting based on internal transfers within Azure (which will give us the maximum throughput model).
// We can even provide a utility to dial this number in for customer networks to optimize their copies.
func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o UploadStreamOptions) (BlockBlobCommitBlockListResponse, error) {
if err := o.defaults(); err != nil {
return BlockBlobCommitBlockListResponse{}, err
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
var err error
generatedUuid, err := uuid.New()
if err != nil {
return BlockBlobCommitBlockListResponse{}, err
}
cp := &copier{
ctx: ctx,
cancel: cancel,
reader: from,
to: to,
id: newID(generatedUuid),
o: o,
errCh: make(chan error, 1),
}
// Send all our chunks until we get an error.
for {
if err = cp.sendChunk(); err != nil {
break
}
}
// If the error is not EOF, then we have a problem.
if err != nil && !errors.Is(err, io.EOF) {
return BlockBlobCommitBlockListResponse{}, err
}
// Close out our upload.
if err := cp.close(); err != nil {
return BlockBlobCommitBlockListResponse{}, err
}
return cp.result, nil
}
// copier streams a file via chunks in parallel from a reader representing a file.
// Do not use directly, instead use copyFromReader().
type copier struct {
// ctx holds the context of a copier. This is normally a faux pas to store a Context in a struct. In this case,
// the copier has the lifetime of a function call, so it's fine.
ctx context.Context
cancel context.CancelFunc
// reader is the source to be written to storage.
reader io.Reader
// to is the location we are writing our chunks to.
to blockWriter
// o contains our options for uploading.
o UploadStreamOptions
// id provides the ids for each chunk.
id *id
//// num is the current chunk we are on.
//num int32
//// ch is used to pass the next chunk of data from our reader to one of the writers.
//ch chan copierChunk
// errCh is used to hold the first error from our concurrent writers.
errCh chan error
// wg provides a count of how many writers we are waiting to finish.
wg sync.WaitGroup
// result holds the final result from blob storage after we have submitted all chunks.
result BlockBlobCommitBlockListResponse
}
type copierChunk struct {
buffer []byte
id string
length int
}
// getErr returns an error by priority. First, if a function set an error, it returns that error. Next, if the Context has an error
// it returns that error. Otherwise, it is nil. getErr supports only returning an error once per copier.
func (c *copier) getErr() error {
select {
case err := <-c.errCh:
return err
default:
}
return c.ctx.Err()
}
// sendChunk reads data from out internal reader, creates a chunk, and sends it to be written via a channel.
// sendChunk returns io.EOF when the reader returns an io.EOF or io.ErrUnexpectedEOF.
func (c *copier) sendChunk() error {
if err := c.getErr(); err != nil {
return err
}
buffer := c.o.TransferManager.Get()
if len(buffer) == 0 {
return fmt.Errorf("TransferManager returned a 0 size buffer, this is a bug in the manager")
}
n, err := io.ReadFull(c.reader, buffer)
if n > 0 {
// Some data was read, schedule the write.
id := c.id.next()
c.wg.Add(1)
c.o.TransferManager.Run(
func() {
defer c.wg.Done()
c.write(copierChunk{buffer: buffer, id: id, length: n})
},
)
} else {
// Return the unused buffer to the manager.
c.o.TransferManager.Put(buffer)
}
if err == nil {
return nil
} else if err == io.EOF || err == io.ErrUnexpectedEOF {
return io.EOF
}
if cerr := c.getErr(); cerr != nil {
return cerr
}
return err
}
// write uploads a chunk to blob storage.
func (c *copier) write(chunk copierChunk) {
defer c.o.TransferManager.Put(chunk.buffer)
if err := c.ctx.Err(); err != nil {
return
}
stageBlockOptions := c.o.getStageBlockOptions()
_, err := c.to.StageBlock(c.ctx, chunk.id, internal.NopCloser(bytes.NewReader(chunk.buffer[:chunk.length])), stageBlockOptions)
if err != nil {
c.errCh <- fmt.Errorf("write error: %w", err)
return
}
}
// close commits our blocks to blob storage and closes our writer.
func (c *copier) close() error {
c.wg.Wait()
if err := c.getErr(); err != nil {
return err
}
var err error
commitBlockListOptions := c.o.getCommitBlockListOptions()
c.result, err = c.to.CommitBlockList(c.ctx, c.id.issued(), commitBlockListOptions)
return err
}
// id allows the creation of unique IDs based on UUID4 + an int32. This auto-increments.
type id struct {
u [64]byte
num uint32
all []string
}
// newID constructs a new id.
func newID(uu uuid.UUID) *id {
u := [64]byte{}
copy(u[:], uu[:])
return &id{u: u}
}
// next returns the next ID.
func (id *id) next() string {
defer atomic.AddUint32(&id.num, 1)
binary.BigEndian.PutUint32(id.u[len(uuid.UUID{}):], atomic.LoadUint32(&id.num))
str := base64.StdEncoding.EncodeToString(id.u[:])
id.all = append(id.all, str)
return str
}
// issued returns all ids that have been issued. This returned value shares the internal slice, so it is not safe to modify the return.
// The value is only valid until the next time next() is called.
func (id *id) issued() []string {
return id.all
}

View file

@ -0,0 +1,28 @@
trigger:
branches:
include:
- main
- feature/*
- hotfix/*
- release/*
paths:
include:
- sdk/storage/azblob
pr:
branches:
include:
- main
- feature/*
- hotfix/*
- release/*
paths:
include:
- sdk/storage/azblob
stages:
- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml
parameters:
ServiceDirectory: 'storage/azblob'
RunLiveTests: true

View file

@ -0,0 +1,39 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
package azblob
import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
)
type connection struct {
u string
p runtime.Pipeline
}
// newConnection creates an instance of the connection type with the specified endpoint.
// Pass nil to accept the default options; this is the same as passing a zero-value options.
func newConnection(endpoint string, options *azcore.ClientOptions) *connection {
cp := azcore.ClientOptions{}
if options != nil {
cp = *options
}
return &connection{u: endpoint, p: runtime.NewPipeline(moduleName, moduleVersion, runtime.PipelineOptions{}, &cp)}
}
// Endpoint returns the connection's endpoint.
func (c *connection) Endpoint() string {
return c.u
}
// Pipeline returns the connection's pipeline.
func (c *connection) Pipeline() runtime.Pipeline {
return c.p
}

View file

@ -0,0 +1,46 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
var SASVersion = "2019-12-12"
//nolint
const (
// BlockBlobMaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload.
BlockBlobMaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB
// BlockBlobMaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock.
BlockBlobMaxStageBlockBytes = 4000 * 1024 * 1024 // 4GB
// BlockBlobMaxBlocks indicates the maximum number of blocks allowed in a block blob.
BlockBlobMaxBlocks = 50000
// PageBlobPageBytes indicates the number of bytes in a page (512).
PageBlobPageBytes = 512
// BlobDefaultDownloadBlockSize is default block size
BlobDefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB
)
const (
headerAuthorization = "Authorization"
headerXmsDate = "x-ms-date"
headerContentLength = "Content-Length"
headerContentEncoding = "Content-Encoding"
headerContentLanguage = "Content-Language"
headerContentType = "Content-Type"
headerContentMD5 = "Content-MD5"
headerIfModifiedSince = "If-Modified-Since"
headerIfMatch = "If-Match"
headerIfNoneMatch = "If-None-Match"
headerIfUnmodifiedSince = "If-Unmodified-Since"
headerRange = "Range"
)
const (
tokenScope = "https://storage.azure.com/.default"
)

View file

@ -0,0 +1,214 @@
//go:build go1.18
// +build go1.18
// Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT
// license that can be found in the LICENSE file.
/*
Package azblob can access an Azure Blob Storage.
The azblob package is capable of :-
- Creating, deleting, and querying containers in an account
- Creating, deleting, and querying blobs in a container
- Creating Shared Access Signature for authentication
Types of Resources
The azblob package allows you to interact with three types of resources :-
* Azure storage accounts.
* Containers within those storage accounts.
* Blobs (block blobs/ page blobs/ append blobs) within those containers.
The Azure Blob Storage (azblob) client library for Go allows you to interact with each of these components through the use of a dedicated client object.
To create a client object, you will need the account's blob service endpoint URL and a credential that allows you to access the account.
Types of Credentials
The clients support different forms of authentication.
The azblob library supports any of the `azcore.TokenCredential` interfaces, authorization via a Connection String,
or authorization with a Shared Access Signature token.
Using a Shared Key
To use an account shared key (aka account key or access key), provide the key as a string.
This can be found in your storage account in the Azure Portal under the "Access Keys" section.
Use the key as the credential parameter to authenticate the client:
accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME")
if !ok {
panic("AZURE_STORAGE_ACCOUNT_NAME could not be found")
}
accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY")
if !ok {
panic("AZURE_STORAGE_ACCOUNT_KEY could not be found")
}
serviceURL := fmt.Sprintf("https://%s.blob.core.windows.net/", accountName)
cred, err := azblob.NewSharedKeyCredential(accountName, accountKey)
handle(err)
serviceClient, err := azblob.NewServiceClientWithSharedKey(serviceURL, cred, nil)
handle(err)
fmt.Println(serviceClient.URL())
Using a Connection String
Depending on your use case and authorization method, you may prefer to initialize a client instance with a connection string instead of providing the account URL and credential separately.
To do this, pass the connection string to the service client's `NewServiceClientFromConnectionString` method.
The connection string can be found in your storage account in the Azure Portal under the "Access Keys" section.
connStr := "DefaultEndpointsProtocol=https;AccountName=<my_account_name>;AccountKey=<my_account_key>;EndpointSuffix=core.windows.net"
serviceClient, err := azblob.NewServiceClientFromConnectionString(connStr, nil)
Using a Shared Access Signature (SAS) Token
To use a shared access signature (SAS) token, provide the token at the end of your service URL.
You can generate a SAS token from the Azure Portal under Shared Access Signature or use the ServiceClient.GetSASToken() functions.
accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME")
if !ok {
panic("AZURE_STORAGE_ACCOUNT_NAME could not be found")
}
accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY")
if !ok {
panic("AZURE_STORAGE_ACCOUNT_KEY could not be found")
}
serviceURL := fmt.Sprintf("https://%s.blob.core.windows.net/", accountName)
cred, err := azblob.NewSharedKeyCredential(accountName, accountKey)
handle(err)
serviceClient, err := azblob.NewServiceClientWithSharedKey(serviceURL, cred, nil)
handle(err)
fmt.Println(serviceClient.URL())
// Alternatively, you can create SAS on the fly
resources := azblob.AccountSASResourceTypes{Service: true}
permission := azblob.AccountSASPermissions{Read: true}
start := time.Now()
expiry := start.AddDate(0, 0, 1)
serviceURLWithSAS, err := serviceClient.GetSASURL(resources, permission, start, expiry)
handle(err)
serviceClientWithSAS, err := azblob.NewServiceClientWithNoCredential(serviceURLWithSAS, nil)
handle(err)
fmt.Println(serviceClientWithSAS.URL())
Types of Clients
There are three different clients provided to interact with the various components of the Blob Service:
1. **`ServiceClient`**
* Get and set account settings.
* Query, create, and delete containers within the account.
2. **`ContainerClient`**
* Get and set container access settings, properties, and metadata.
* Create, delete, and query blobs within the container.
* `ContainerLeaseClient` to support container lease management.
3. **`BlobClient`**
* `AppendBlobClient`, `BlockBlobClient`, and `PageBlobClient`
* Get and set blob properties.
* Perform CRUD operations on a given blob.
* `BlobLeaseClient` to support blob lease management.
Examples
// Your account name and key can be obtained from the Azure Portal.
accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME")
if !ok {
panic("AZURE_STORAGE_ACCOUNT_NAME could not be found")
}
accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY")
if !ok {
panic("AZURE_STORAGE_ACCOUNT_KEY could not be found")
}
cred, err := azblob.NewSharedKeyCredential(accountName, accountKey)
handle(err)
// The service URL for blob endpoints is usually in the form: http(s)://<account>.blob.core.windows.net/
serviceClient, err := azblob.NewServiceClientWithSharedKey(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), cred, nil)
handle(err)
// ===== 1. Create a container =====
// First, create a container client, and use the Create method to create a new container in your account
containerClient, err := serviceClient.NewContainerClient("testcontainer")
handle(err)
// All APIs have an options' bag struct as a parameter.
// The options' bag struct allows you to specify optional parameters such as metadata, public access types, etc.
// If you want to use the default options, pass in nil.
_, err = containerClient.Create(context.TODO(), nil)
handle(err)
// ===== 2. Upload and Download a block blob =====
uploadData := "Hello world!"
// Create a new blockBlobClient from the containerClient
blockBlobClient, err := containerClient.NewBlockBlobClient("HelloWorld.txt")
handle(err)
// Upload data to the block blob
blockBlobUploadOptions := azblob.BlockBlobUploadOptions{
Metadata: map[string]string{"Foo": "Bar"},
TagsMap: map[string]string{"Year": "2022"},
}
_, err = blockBlobClient.Upload(context.TODO(), streaming.NopCloser(strings.NewReader(uploadData)), &blockBlobUploadOptions)
handle(err)
// Download the blob's contents and ensure that the download worked properly
blobDownloadResponse, err := blockBlobClient.Download(context.TODO(), nil)
handle(err)
// Use the bytes.Buffer object to read the downloaded data.
// RetryReaderOptions has a lot of in-depth tuning abilities, but for the sake of simplicity, we'll omit those here.
reader := blobDownloadResponse.Body(nil)
downloadData, err := ioutil.ReadAll(reader)
handle(err)
if string(downloadData) != uploadData {
handle(errors.New("Uploaded data should be same as downloaded data"))
}
if err = reader.Close(); err != nil {
handle(err)
return
}
// ===== 3. List blobs =====
// List methods returns a pager object which can be used to iterate over the results of a paging operation.
// To iterate over a page use the NextPage(context.Context) to fetch the next page of results.
// PageResponse() can be used to iterate over the results of the specific page.
// Always check the Err() method after paging to see if an error was returned by the pager. A pager will return either an error or the page of results.
pager := containerClient.ListBlobsFlat(nil)
for pager.NextPage(context.TODO()) {
resp := pager.PageResponse()
for _, v := range resp.Segment.BlobItems {
fmt.Println(*v.Name)
}
}
if err = pager.Err(); err != nil {
handle(err)
}
// Delete the blob.
_, err = blockBlobClient.Delete(context.TODO(), nil)
handle(err)
// Delete the container.
_, err = containerClient.Delete(context.TODO(), nil)
handle(err)
*/
package azblob

View file

@ -0,0 +1,316 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"context"
"encoding/base64"
"io"
"net/http"
"sync"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
"github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal"
"bytes"
"errors"
"os"
)
// uploadReaderAtToBlockBlob uploads a buffer in blocks to a block blob.
func (bb *BlockBlobClient) uploadReaderAtToBlockBlob(ctx context.Context, reader io.ReaderAt, readerSize int64, o UploadOption) (*http.Response, error) {
if o.BlockSize == 0 {
// If bufferSize > (BlockBlobMaxStageBlockBytes * BlockBlobMaxBlocks), then error
if readerSize > BlockBlobMaxStageBlockBytes*BlockBlobMaxBlocks {
return nil, errors.New("buffer is too large to upload to a block blob")
}
// If bufferSize <= BlockBlobMaxUploadBlobBytes, then Upload should be used with just 1 I/O request
if readerSize <= BlockBlobMaxUploadBlobBytes {
o.BlockSize = BlockBlobMaxUploadBlobBytes // Default if unspecified
} else {
o.BlockSize = readerSize / BlockBlobMaxBlocks // buffer / max blocks = block size to use all 50,000 blocks
if o.BlockSize < BlobDefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB
o.BlockSize = BlobDefaultDownloadBlockSize
}
// StageBlock will be called with blockSize blocks and a Parallelism of (BufferSize / BlockSize).
}
}
if readerSize <= BlockBlobMaxUploadBlobBytes {
// If the size can fit in 1 Upload call, do it this way
var body io.ReadSeeker = io.NewSectionReader(reader, 0, readerSize)
if o.Progress != nil {
body = streaming.NewRequestProgress(internal.NopCloser(body), o.Progress)
}
uploadBlockBlobOptions := o.getUploadBlockBlobOptions()
resp, err := bb.Upload(ctx, internal.NopCloser(body), uploadBlockBlobOptions)
return resp.RawResponse, err
}
var numBlocks = uint16(((readerSize - 1) / o.BlockSize) + 1)
blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs
progress := int64(0)
progressLock := &sync.Mutex{}
err := DoBatchTransfer(ctx, BatchTransferOptions{
OperationName: "uploadReaderAtToBlockBlob",
TransferSize: readerSize,
ChunkSize: o.BlockSize,
Parallelism: o.Parallelism,
Operation: func(offset int64, count int64, ctx context.Context) error {
// This function is called once per block.
// It is passed this block's offset within the buffer and its count of bytes
// Prepare to read the proper block/section of the buffer
var body io.ReadSeeker = io.NewSectionReader(reader, offset, count)
blockNum := offset / o.BlockSize
if o.Progress != nil {
blockProgress := int64(0)
body = streaming.NewRequestProgress(internal.NopCloser(body),
func(bytesTransferred int64) {
diff := bytesTransferred - blockProgress
blockProgress = bytesTransferred
progressLock.Lock() // 1 goroutine at a time gets progress report
progress += diff
o.Progress(progress)
progressLock.Unlock()
})
}
// Block IDs are unique values to avoid issue if 2+ clients are uploading blocks
// at the same time causing PutBlockList to get a mix of blocks from all the clients.
generatedUuid, err := uuid.New()
if err != nil {
return err
}
blockIDList[blockNum] = base64.StdEncoding.EncodeToString([]byte(generatedUuid.String()))
stageBlockOptions := o.getStageBlockOptions()
_, err = bb.StageBlock(ctx, blockIDList[blockNum], internal.NopCloser(body), stageBlockOptions)
return err
},
})
if err != nil {
return nil, err
}
// All put blocks were successful, call Put Block List to finalize the blob
commitBlockListOptions := o.getCommitBlockListOptions()
resp, err := bb.CommitBlockList(ctx, blockIDList, commitBlockListOptions)
return resp.RawResponse, err
}
// UploadBuffer uploads a buffer in blocks to a block blob.
func (bb *BlockBlobClient) UploadBuffer(ctx context.Context, b []byte, o UploadOption) (*http.Response, error) {
return bb.uploadReaderAtToBlockBlob(ctx, bytes.NewReader(b), int64(len(b)), o)
}
// UploadFile uploads a file in blocks to a block blob.
func (bb *BlockBlobClient) UploadFile(ctx context.Context, file *os.File, o UploadOption) (*http.Response, error) {
stat, err := file.Stat()
if err != nil {
return nil, err
}
return bb.uploadReaderAtToBlockBlob(ctx, file, stat.Size(), o)
}
// ---------------------------------------------------------------------------------------------------------------------
// UploadStream copies the file held in io.Reader to the Blob at blockBlobClient.
// A Context deadline or cancellation will cause this to error.
func (bb *BlockBlobClient) UploadStream(ctx context.Context, body io.Reader, o UploadStreamOptions) (BlockBlobCommitBlockListResponse, error) {
if err := o.defaults(); err != nil {
return BlockBlobCommitBlockListResponse{}, err
}
// If we used the default manager, we need to close it.
if o.transferMangerNotSet {
defer o.TransferManager.Close()
}
result, err := copyFromReader(ctx, body, bb, o)
if err != nil {
return BlockBlobCommitBlockListResponse{}, err
}
return result, nil
}
// ---------------------------------------------------------------------------------------------------------------------
// DownloadToWriterAt downloads an Azure blob to a WriterAt with parallel.
// Offset and count are optional, pass 0 for both to download the entire blob.
func (b *BlobClient) DownloadToWriterAt(ctx context.Context, offset int64, count int64, writer io.WriterAt, o DownloadOptions) error {
if o.BlockSize == 0 {
o.BlockSize = BlobDefaultDownloadBlockSize
}
if count == CountToEnd { // If size not specified, calculate it
// If we don't have the length at all, get it
downloadBlobOptions := o.getDownloadBlobOptions(0, CountToEnd, nil)
dr, err := b.Download(ctx, downloadBlobOptions)
if err != nil {
return err
}
count = *dr.ContentLength - offset
}
if count <= 0 {
// The file is empty, there is nothing to download.
return nil
}
// Prepare and do parallel download.
progress := int64(0)
progressLock := &sync.Mutex{}
err := DoBatchTransfer(ctx, BatchTransferOptions{
OperationName: "downloadBlobToWriterAt",
TransferSize: count,
ChunkSize: o.BlockSize,
Parallelism: o.Parallelism,
Operation: func(chunkStart int64, count int64, ctx context.Context) error {
downloadBlobOptions := o.getDownloadBlobOptions(chunkStart+offset, count, nil)
dr, err := b.Download(ctx, downloadBlobOptions)
if err != nil {
return err
}
body := dr.Body(&o.RetryReaderOptionsPerBlock)
if o.Progress != nil {
rangeProgress := int64(0)
body = streaming.NewResponseProgress(
body,
func(bytesTransferred int64) {
diff := bytesTransferred - rangeProgress
rangeProgress = bytesTransferred
progressLock.Lock()
progress += diff
o.Progress(progress)
progressLock.Unlock()
})
}
_, err = io.Copy(newSectionWriter(writer, chunkStart, count), body)
if err != nil {
return err
}
err = body.Close()
return err
},
})
if err != nil {
return err
}
return nil
}
// DownloadToBuffer downloads an Azure blob to a buffer with parallel.
// Offset and count are optional, pass 0 for both to download the entire blob.
func (b *BlobClient) DownloadToBuffer(ctx context.Context, offset int64, count int64, _bytes []byte, o DownloadOptions) error {
return b.DownloadToWriterAt(ctx, offset, count, newBytesWriter(_bytes), o)
}
// DownloadToFile downloads an Azure blob to a local file.
// The file would be truncated if the size doesn't match.
// Offset and count are optional, pass 0 for both to download the entire blob.
func (b *BlobClient) DownloadToFile(ctx context.Context, offset int64, count int64, file *os.File, o DownloadOptions) error {
// 1. Calculate the size of the destination file
var size int64
if count == CountToEnd {
// Try to get Azure blob's size
getBlobPropertiesOptions := o.getBlobPropertiesOptions()
props, err := b.GetProperties(ctx, getBlobPropertiesOptions)
if err != nil {
return err
}
size = *props.ContentLength - offset
} else {
size = count
}
// 2. Compare and try to resize local file's size if it doesn't match Azure blob's size.
stat, err := file.Stat()
if err != nil {
return err
}
if stat.Size() != size {
if err = file.Truncate(size); err != nil {
return err
}
}
if size > 0 {
return b.DownloadToWriterAt(ctx, offset, size, file, o)
} else { // if the blob's size is 0, there is no need in downloading it
return nil
}
}
// ---------------------------------------------------------------------------------------------------------------------
// DoBatchTransfer helps to execute operations in a batch manner.
// Can be used by users to customize batch works (for other scenarios that the SDK does not provide)
func DoBatchTransfer(ctx context.Context, o BatchTransferOptions) error {
if o.ChunkSize == 0 {
return errors.New("ChunkSize cannot be 0")
}
if o.Parallelism == 0 {
o.Parallelism = 5 // default Parallelism
}
// Prepare and do parallel operations.
numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1)
operationChannel := make(chan func() error, o.Parallelism) // Create the channel that release 'Parallelism' goroutines concurrently
operationResponseChannel := make(chan error, numChunks) // Holds each response
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Create the goroutines that process each operation (in parallel).
for g := uint16(0); g < o.Parallelism; g++ {
//grIndex := g
go func() {
for f := range operationChannel {
err := f()
operationResponseChannel <- err
}
}()
}
// Add each chunk's operation to the channel.
for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ {
curChunkSize := o.ChunkSize
if chunkNum == numChunks-1 { // Last chunk
curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total
}
offset := int64(chunkNum) * o.ChunkSize
operationChannel <- func() error {
return o.Operation(offset, curChunkSize, ctx)
}
}
close(operationChannel)
// Wait for the operations to complete.
var firstErr error = nil
for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ {
responseError := <-operationResponseChannel
// record the first error (the original error which should cause the other chunks to fail with canceled context)
if responseError != nil && firstErr == nil {
cancel() // As soon as any operation fails, cancel all remaining operation calls
firstErr = responseError
}
}
return firstErr
}
// ---------------------------------------------------------------------------------------------------------------------

View file

@ -0,0 +1,150 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package internal
import (
"bytes"
"context"
"encoding/json"
"errors"
"io"
"io/ioutil"
"net/http"
"strconv"
"time"
)
// CtxWithHTTPHeaderKey is used as a context key for adding/retrieving http.Header.
type CtxWithHTTPHeaderKey struct{}
// CtxWithRetryOptionsKey is used as a context key for adding/retrieving RetryOptions.
type CtxWithRetryOptionsKey struct{}
type nopCloser struct {
io.ReadSeeker
}
func (n nopCloser) Close() error {
return nil
}
// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker.
func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser {
return nopCloser{rs}
}
// BodyDownloadPolicyOpValues is the struct containing the per-operation values
type BodyDownloadPolicyOpValues struct {
Skip bool
}
func NewResponseError(inner error, resp *http.Response) error {
return &ResponseError{inner: inner, resp: resp}
}
type ResponseError struct {
inner error
resp *http.Response
}
// Error implements the error interface for type ResponseError.
func (e *ResponseError) Error() string {
return e.inner.Error()
}
// Unwrap returns the inner error.
func (e *ResponseError) Unwrap() error {
return e.inner
}
// RawResponse returns the HTTP response associated with this error.
func (e *ResponseError) RawResponse() *http.Response {
return e.resp
}
// NonRetriable indicates this error is non-transient.
func (e *ResponseError) NonRetriable() {
// marker method
}
// Delay waits for the duration to elapse or the context to be cancelled.
func Delay(ctx context.Context, delay time.Duration) error {
select {
case <-time.After(delay):
return nil
case <-ctx.Done():
return ctx.Err()
}
}
// ErrNoBody is returned if the response didn't contain a body.
var ErrNoBody = errors.New("the response did not contain a body")
// GetJSON reads the response body into a raw JSON object.
// It returns ErrNoBody if there was no content.
func GetJSON(resp *http.Response) (map[string]interface{}, error) {
body, err := ioutil.ReadAll(resp.Body)
defer resp.Body.Close()
if err != nil {
return nil, err
}
if len(body) == 0 {
return nil, ErrNoBody
}
// put the body back so it's available to others
resp.Body = ioutil.NopCloser(bytes.NewReader(body))
// unmarshall the body to get the value
var jsonBody map[string]interface{}
if err = json.Unmarshal(body, &jsonBody); err != nil {
return nil, err
}
return jsonBody, nil
}
const HeaderRetryAfter = "Retry-After"
// RetryAfter returns non-zero if the response contains a Retry-After header value.
func RetryAfter(resp *http.Response) time.Duration {
if resp == nil {
return 0
}
ra := resp.Header.Get(HeaderRetryAfter)
if ra == "" {
return 0
}
// retry-after values are expressed in either number of
// seconds or an HTTP-date indicating when to try again
if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 {
return time.Duration(retryAfter) * time.Second
} else if t, err := time.Parse(time.RFC1123, ra); err == nil {
return time.Until(t)
}
return 0
}
// HasStatusCode returns true if the Response's status code is one of the specified values.
func HasStatusCode(resp *http.Response, statusCodes ...int) bool {
if resp == nil {
return false
}
for _, sc := range statusCodes {
if resp.StatusCode == sc {
return true
}
}
return false
}
const defaultScope = "/.default"
// EndpointToScope converts the provided URL endpoint to its default scope.
func EndpointToScope(endpoint string) string {
if endpoint[len(endpoint)-1] != '/' {
endpoint += "/"
}
return endpoint + defaultScope
}

View file

@ -0,0 +1,53 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"errors"
"io"
)
type sectionWriter struct {
count int64
offset int64
position int64
writerAt io.WriterAt
}
func newSectionWriter(c io.WriterAt, off int64, count int64) *sectionWriter {
return &sectionWriter{
count: count,
offset: off,
writerAt: c,
}
}
func (c *sectionWriter) Write(p []byte) (int, error) {
remaining := c.count - c.position
if remaining <= 0 {
return 0, errors.New("end of section reached")
}
slice := p
if int64(len(slice)) > remaining {
slice = slice[:remaining]
}
n, err := c.writerAt.WriteAt(slice, c.offset+c.position)
c.position += int64(n)
if err != nil {
return n, err
}
if len(p) > n {
return n, errors.New("not enough space for all bytes")
}
return n, nil
}

View file

@ -0,0 +1,154 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"fmt"
"sync"
)
// TransferManager provides a buffer and thread pool manager for certain transfer options.
// It is undefined behavior if code outside this package call any of these methods.
type TransferManager interface {
// Get provides a buffer that will be used to read data into and write out to the stream.
// It is guaranteed by this package to not read or write beyond the size of the slice.
Get() []byte
// Put may or may not put the buffer into underlying storage, depending on settings.
// The buffer must not be touched after this has been called.
Put(b []byte) // nolint
// Run will use a goroutine pool entry to run a function. This blocks until a pool
// goroutine becomes available.
Run(func())
// Close shuts down all internal goroutines. This must be called when the TransferManager
// will no longer be used. Not closing it will cause a goroutine leak.
Close()
}
// ---------------------------------------------------------------------------------------------------------------------
type staticBuffer struct {
buffers chan []byte
size int
threadpool chan func()
}
// NewStaticBuffer creates a TransferManager that will use a channel as a circular buffer
// that can hold "max" buffers of "size". The goroutine pool is also sized at max. This
// can be shared between calls if you wish to control maximum memory and concurrency with
// multiple concurrent calls.
func NewStaticBuffer(size, max int) (TransferManager, error) {
if size < 1 || max < 1 {
return nil, fmt.Errorf("cannot be called with size or max set to < 1")
}
if size < _1MiB {
return nil, fmt.Errorf("cannot have size < 1MiB")
}
threadpool := make(chan func(), max)
buffers := make(chan []byte, max)
for i := 0; i < max; i++ {
go func() {
for f := range threadpool {
f()
}
}()
buffers <- make([]byte, size)
}
return staticBuffer{
buffers: buffers,
size: size,
threadpool: threadpool,
}, nil
}
// Get implements TransferManager.Get().
func (s staticBuffer) Get() []byte {
return <-s.buffers
}
// Put implements TransferManager.Put().
func (s staticBuffer) Put(b []byte) { // nolint
select {
case s.buffers <- b:
default: // This shouldn't happen, but just in case they call Put() with there own buffer.
}
}
// Run implements TransferManager.Run().
func (s staticBuffer) Run(f func()) {
s.threadpool <- f
}
// Close implements TransferManager.Close().
func (s staticBuffer) Close() {
close(s.threadpool)
close(s.buffers)
}
// ---------------------------------------------------------------------------------------------------------------------
type syncPool struct {
threadpool chan func()
pool sync.Pool
}
// NewSyncPool creates a TransferManager that will use a sync.Pool
// that can hold a non-capped number of buffers constrained by concurrency. This
// can be shared between calls if you wish to share memory and concurrency.
func NewSyncPool(size, concurrency int) (TransferManager, error) {
if size < 1 || concurrency < 1 {
return nil, fmt.Errorf("cannot be called with size or max set to < 1")
}
if size < _1MiB {
return nil, fmt.Errorf("cannot have size < 1MiB")
}
threadpool := make(chan func(), concurrency)
for i := 0; i < concurrency; i++ {
go func() {
for f := range threadpool {
f()
}
}()
}
return &syncPool{
threadpool: threadpool,
pool: sync.Pool{
New: func() interface{} {
return make([]byte, size)
},
},
}, nil
}
// Get implements TransferManager.Get().
func (s *syncPool) Get() []byte {
return s.pool.Get().([]byte)
}
// Put implements TransferManager.Put().
// nolint
func (s *syncPool) Put(b []byte) {
s.pool.Put(b)
}
// Run implements TransferManager.Run().
func (s *syncPool) Run(f func()) {
s.threadpool <- f
}
// Close implements TransferManager.Close().
func (s *syncPool) Close() {
close(s.threadpool)
}

View file

@ -0,0 +1,67 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"bytes"
"fmt"
)
// AccessPolicyPermission type simplifies creating the permissions string for a container's access policy.
// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field.
type AccessPolicyPermission struct {
Read, Add, Create, Write, Delete, List bool
}
// String produces the access policy permission string for an Azure Storage container.
// Call this method to set AccessPolicy's Permission field.
func (p AccessPolicyPermission) String() string {
var b bytes.Buffer
if p.Read {
b.WriteRune('r')
}
if p.Add {
b.WriteRune('a')
}
if p.Create {
b.WriteRune('c')
}
if p.Write {
b.WriteRune('w')
}
if p.Delete {
b.WriteRune('d')
}
if p.List {
b.WriteRune('l')
}
return b.String()
}
// Parse initializes the AccessPolicyPermission's fields from a string.
func (p *AccessPolicyPermission) Parse(s string) error {
*p = AccessPolicyPermission{} // Clear the flags
for _, r := range s {
switch r {
case 'r':
p.Read = true
case 'a':
p.Add = true
case 'c':
p.Create = true
case 'w':
p.Write = true
case 'd':
p.Delete = true
case 'l':
p.List = true
default:
return fmt.Errorf("invalid permission: '%v'", r)
}
}
return nil
}

View file

@ -0,0 +1,154 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"context"
"io"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
)
// AppendBlobClient represents a client to an Azure Storage append blob;
type AppendBlobClient struct {
BlobClient
client *appendBlobClient
}
// NewAppendBlobClient creates an AppendBlobClient with the specified URL, Azure AD credential, and options.
func NewAppendBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*AppendBlobClient, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil)
conOptions := getConnectionOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
conn := newConnection(blobURL, conOptions)
return &AppendBlobClient{
client: newAppendBlobClient(conn.Endpoint(), conn.Pipeline()),
BlobClient: BlobClient{
client: newBlobClient(conn.Endpoint(), conn.Pipeline()),
},
}, nil
}
// NewAppendBlobClientWithNoCredential creates an AppendBlobClient with the specified URL and options.
func NewAppendBlobClientWithNoCredential(blobURL string, options *ClientOptions) (*AppendBlobClient, error) {
conOptions := getConnectionOptions(options)
conn := newConnection(blobURL, conOptions)
return &AppendBlobClient{
client: newAppendBlobClient(conn.Endpoint(), conn.Pipeline()),
BlobClient: BlobClient{
client: newBlobClient(conn.Endpoint(), conn.Pipeline()),
},
}, nil
}
// NewAppendBlobClientWithSharedKey creates an AppendBlobClient with the specified URL, shared key, and options.
func NewAppendBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*AppendBlobClient, error) {
authPolicy := newSharedKeyCredPolicy(cred)
conOptions := getConnectionOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
conn := newConnection(blobURL, conOptions)
return &AppendBlobClient{
client: newAppendBlobClient(conn.Endpoint(), conn.Pipeline()),
BlobClient: BlobClient{
client: newBlobClient(conn.Endpoint(), conn.Pipeline()),
sharedKey: cred,
},
}, nil
}
// WithSnapshot creates a new AppendBlobURL object identical to the source but with the specified snapshot timestamp.
// Pass "" to remove the snapshot returning a URL to the base blob.
func (ab *AppendBlobClient) WithSnapshot(snapshot string) (*AppendBlobClient, error) {
p, err := NewBlobURLParts(ab.URL())
if err != nil {
return nil, err
}
p.Snapshot = snapshot
endpoint := p.URL()
pipeline := ab.client.pl
return &AppendBlobClient{
client: newAppendBlobClient(endpoint, pipeline),
BlobClient: BlobClient{
client: newBlobClient(endpoint, pipeline),
sharedKey: ab.sharedKey,
},
}, nil
}
// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id.
// Pass "" to remove the versionID returning a URL to the base blob.
func (ab *AppendBlobClient) WithVersionID(versionID string) (*AppendBlobClient, error) {
p, err := NewBlobURLParts(ab.URL())
if err != nil {
return nil, err
}
p.VersionID = versionID
endpoint := p.URL()
pipeline := ab.client.pl
return &AppendBlobClient{
client: newAppendBlobClient(endpoint, pipeline),
BlobClient: BlobClient{
client: newBlobClient(endpoint, pipeline),
sharedKey: ab.sharedKey,
},
}, nil
}
// Create creates a 0-size append blob. Call AppendBlock to append data to an append blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
func (ab *AppendBlobClient) Create(ctx context.Context, options *AppendBlobCreateOptions) (AppendBlobCreateResponse, error) {
appendBlobAppendBlockOptions, blobHttpHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := options.format()
resp, err := ab.client.Create(ctx, 0, appendBlobAppendBlockOptions, blobHttpHeaders,
leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
return toAppendBlobCreateResponse(resp), handleError(err)
}
// AppendBlock writes a stream to a new block of data to the end of the existing append blob.
// This method panics if the stream is not at position 0.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block.
func (ab *AppendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeekCloser, options *AppendBlobAppendBlockOptions) (AppendBlobAppendBlockResponse, error) {
count, err := validateSeekableStreamAt0AndGetCount(body)
if err != nil {
return AppendBlobAppendBlockResponse{}, nil
}
appendOptions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions := options.format()
resp, err := ab.client.AppendBlock(ctx, count, body, appendOptions, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions)
return toAppendBlobAppendBlockResponse(resp), handleError(err)
}
// AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url.
func (ab *AppendBlobClient) AppendBlockFromURL(ctx context.Context, source string, o *AppendBlobAppendBlockFromURLOptions) (AppendBlobAppendBlockFromURLResponse, error) {
appendBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions := o.format()
// content length should be 0 on * from URL. always. It's a 400 if it isn't.
resp, err := ab.client.AppendBlockFromURL(ctx, source, 0, appendBlockFromURLOptions, cpkInfo, cpkScopeInfo,
leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions)
return toAppendBlobAppendBlockFromURLResponse(resp), handleError(err)
}
// SealAppendBlob - The purpose of Append Blob Seal is to allow users and applications to seal append blobs, marking them as read only.
// https://docs.microsoft.com/en-us/rest/api/storageservices/append-blob-seal
func (ab *AppendBlobClient) SealAppendBlob(ctx context.Context, options *AppendBlobSealOptions) (AppendBlobSealResponse, error) {
leaseAccessConditions, modifiedAccessConditions, positionAccessConditions := options.format()
resp, err := ab.client.Seal(ctx, nil, leaseAccessConditions, modifiedAccessConditions, positionAccessConditions)
return toAppendBlobSealResponse(resp), handleError(err)
}

View file

@ -0,0 +1,278 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"context"
"errors"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
)
// BlobClient represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob.
type BlobClient struct {
client *blobClient
sharedKey *SharedKeyCredential
}
// NewBlobClient creates a BlobClient object using the specified URL, Azure AD credential, and options.
func NewBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*BlobClient, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil)
conOptions := getConnectionOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
conn := newConnection(blobURL, conOptions)
return &BlobClient{
client: newBlobClient(conn.Endpoint(), conn.Pipeline()),
}, nil
}
// NewBlobClientWithNoCredential creates a BlobClient object using the specified URL and options.
func NewBlobClientWithNoCredential(blobURL string, options *ClientOptions) (*BlobClient, error) {
conOptions := getConnectionOptions(options)
conn := newConnection(blobURL, conOptions)
return &BlobClient{
client: newBlobClient(conn.Endpoint(), conn.Pipeline()),
}, nil
}
// NewBlobClientWithSharedKey creates a BlobClient object using the specified URL, shared key, and options.
func NewBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*BlobClient, error) {
authPolicy := newSharedKeyCredPolicy(cred)
conOptions := getConnectionOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
conn := newConnection(blobURL, conOptions)
return &BlobClient{
client: newBlobClient(blobURL, conn.Pipeline()),
sharedKey: cred,
}, nil
}
// NewBlobClientFromConnectionString creates BlobClient from a connection String
//nolint
func NewBlobClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*BlobClient, error) {
containerClient, err := NewContainerClientFromConnectionString(connectionString, containerName, options)
if err != nil {
return nil, err
}
return containerClient.NewBlobClient(blobName)
}
// URL returns the URL endpoint used by the BlobClient object.
func (b *BlobClient) URL() string {
return b.client.endpoint
}
// WithSnapshot creates a new BlobClient object identical to the source but with the specified snapshot timestamp.
// Pass "" to remove the snapshot returning a URL to the base blob.
func (b *BlobClient) WithSnapshot(snapshot string) (*BlobClient, error) {
p, err := NewBlobURLParts(b.URL())
if err != nil {
return nil, err
}
p.Snapshot = snapshot
pipeline := b.client.pl
return &BlobClient{
client: newBlobClient(p.URL(), pipeline),
sharedKey: b.sharedKey,
}, nil
}
// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id.
// Pass "" to remove the versionID returning a URL to the base blob.
func (b *BlobClient) WithVersionID(versionID string) (*BlobClient, error) {
p, err := NewBlobURLParts(b.URL())
if err != nil {
return nil, err
}
p.VersionID = versionID
pipeline := b.client.pl
return &BlobClient{
client: newBlobClient(p.URL(), pipeline),
sharedKey: b.sharedKey,
}, nil
}
// Download reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob.
func (b *BlobClient) Download(ctx context.Context, options *BlobDownloadOptions) (BlobDownloadResponse, error) {
o, lease, cpk, accessConditions := options.format()
dr, err := b.client.Download(ctx, o, lease, cpk, accessConditions)
if err != nil {
return BlobDownloadResponse{}, handleError(err)
}
offset := int64(0)
count := int64(CountToEnd)
if options != nil && options.Offset != nil {
offset = *options.Offset
}
if options != nil && options.Count != nil {
count = *options.Count
}
eTag := ""
if dr.ETag != nil {
eTag = *dr.ETag
}
return BlobDownloadResponse{
b: b,
blobClientDownloadResponse: dr,
ctx: ctx,
getInfo: HTTPGetterInfo{Offset: offset, Count: count, ETag: eTag},
ObjectReplicationRules: deserializeORSPolicies(dr.ObjectReplicationRules),
}, err
}
// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection.
// Note that deleting a blob also deletes all its snapshots.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob.
func (b *BlobClient) Delete(ctx context.Context, o *BlobDeleteOptions) (BlobDeleteResponse, error) {
basics, leaseInfo, accessConditions := o.format()
resp, err := b.client.Delete(ctx, basics, leaseInfo, accessConditions)
return toBlobDeleteResponse(resp), handleError(err)
}
// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob.
func (b *BlobClient) Undelete(ctx context.Context, o *BlobUndeleteOptions) (BlobUndeleteResponse, error) {
undeleteOptions := o.format()
resp, err := b.client.Undelete(ctx, undeleteOptions)
return toBlobUndeleteResponse(resp), handleError(err)
}
// SetTier operation sets the tier on a blob. The operation is allowed on a page
// blob in a premium storage account and on a block blob in a blob storage account (locally
// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and
// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation
// does not update the blob's ETag.
// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers.
func (b *BlobClient) SetTier(ctx context.Context, tier AccessTier, options *BlobSetTierOptions) (BlobSetTierResponse, error) {
basics, lease, accessConditions := options.format()
resp, err := b.client.SetTier(ctx, tier, basics, lease, accessConditions)
return toBlobSetTierResponse(resp), handleError(err)
}
// GetProperties returns the blob's properties.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties.
func (b *BlobClient) GetProperties(ctx context.Context, options *BlobGetPropertiesOptions) (BlobGetPropertiesResponse, error) {
basics, lease, cpk, access := options.format()
resp, err := b.client.GetProperties(ctx, basics, lease, cpk, access)
return toGetBlobPropertiesResponse(resp), handleError(err)
}
// SetHTTPHeaders changes a blob's HTTP headers.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
func (b *BlobClient) SetHTTPHeaders(ctx context.Context, blobHttpHeaders BlobHTTPHeaders, options *BlobSetHTTPHeadersOptions) (BlobSetHTTPHeadersResponse, error) {
basics, lease, access := options.format()
resp, err := b.client.SetHTTPHeaders(ctx, basics, &blobHttpHeaders, lease, access)
return toBlobSetHTTPHeadersResponse(resp), handleError(err)
}
// SetMetadata changes a blob's metadata.
// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata.
func (b *BlobClient) SetMetadata(ctx context.Context, metadata map[string]string, options *BlobSetMetadataOptions) (BlobSetMetadataResponse, error) {
basics := blobClientSetMetadataOptions{
Metadata: metadata,
}
lease, cpk, cpkScope, access := options.format()
resp, err := b.client.SetMetadata(ctx, &basics, lease, cpk, cpkScope, access)
return toBlobSetMetadataResponse(resp), handleError(err)
}
// CreateSnapshot creates a read-only snapshot of a blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob.
func (b *BlobClient) CreateSnapshot(ctx context.Context, options *BlobCreateSnapshotOptions) (BlobCreateSnapshotResponse, error) {
// CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter
// because checking this would be a performance hit for a VERY unusual path, and we don't think the common case should suffer this
// performance hit.
basics, cpk, cpkScope, access, lease := options.format()
resp, err := b.client.CreateSnapshot(ctx, basics, cpk, cpkScope, access, lease)
return toBlobCreateSnapshotResponse(resp), handleError(err)
}
// StartCopyFromURL copies the data at the source URL to a blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob.
func (b *BlobClient) StartCopyFromURL(ctx context.Context, copySource string, options *BlobStartCopyOptions) (BlobStartCopyFromURLResponse, error) {
basics, srcAccess, destAccess, lease := options.format()
resp, err := b.client.StartCopyFromURL(ctx, copySource, basics, srcAccess, destAccess, lease)
return toBlobStartCopyFromURLResponse(resp), handleError(err)
}
// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob.
func (b *BlobClient) AbortCopyFromURL(ctx context.Context, copyID string, options *BlobAbortCopyOptions) (BlobAbortCopyFromURLResponse, error) {
basics, lease := options.format()
resp, err := b.client.AbortCopyFromURL(ctx, copyID, basics, lease)
return toBlobAbortCopyFromURLResponse(resp), handleError(err)
}
// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot.
// Each call to this operation replaces all existing tags attached to the blob.
// To remove all tags from the blob, call this operation with no tags set.
// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags
func (b *BlobClient) SetTags(ctx context.Context, options *BlobSetTagsOptions) (BlobSetTagsResponse, error) {
blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format()
resp, err := b.client.SetTags(ctx, blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions)
return toBlobSetTagsResponse(resp), handleError(err)
}
// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot.
// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags
func (b *BlobClient) GetTags(ctx context.Context, options *BlobGetTagsOptions) (BlobGetTagsResponse, error) {
blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format()
resp, err := b.client.GetTags(ctx, blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions)
return toBlobGetTagsResponse(resp), handleError(err)
}
// GetSASToken is a convenience method for generating a SAS token for the currently pointed at blob.
// It can only be used if the credential supplied during creation was a SharedKeyCredential.
func (b *BlobClient) GetSASToken(permissions BlobSASPermissions, start time.Time, expiry time.Time) (SASQueryParameters, error) {
urlParts, _ := NewBlobURLParts(b.URL())
t, err := time.Parse(SnapshotTimeFormat, urlParts.Snapshot)
if err != nil {
t = time.Time{}
}
if b.sharedKey == nil {
return SASQueryParameters{}, errors.New("credential is not a SharedKeyCredential. SAS can only be signed with a SharedKeyCredential")
}
return BlobSASSignatureValues{
ContainerName: urlParts.ContainerName,
BlobName: urlParts.BlobName,
SnapshotTime: t,
Version: SASVersion,
Permissions: permissions.String(),
StartTime: start.UTC(),
ExpiryTime: expiry.UTC(),
}.NewSASQueryParameters(b.sharedKey)
}

View file

@ -0,0 +1,98 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"context"
"errors"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
)
// BlobLeaseClient represents lease client on blob
type BlobLeaseClient struct {
BlobClient
leaseID *string
}
// NewBlobLeaseClient is constructor for BlobLeaseClient
func (b *BlobClient) NewBlobLeaseClient(leaseID *string) (*BlobLeaseClient, error) {
if leaseID == nil {
generatedUuid, err := uuid.New()
if err != nil {
return nil, err
}
leaseID = to.Ptr(generatedUuid.String())
}
return &BlobLeaseClient{
BlobClient: *b,
leaseID: leaseID,
}, nil
}
// AcquireLease acquires a lease on the blob for write and delete operations.
//The lease Duration must be between 15 and 60 seconds, or infinite (-1).
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
func (blc *BlobLeaseClient) AcquireLease(ctx context.Context, options *BlobAcquireLeaseOptions) (BlobAcquireLeaseResponse, error) {
blobAcquireLeaseOptions, modifiedAccessConditions := options.format()
blobAcquireLeaseOptions.ProposedLeaseID = blc.leaseID
resp, err := blc.client.AcquireLease(ctx, &blobAcquireLeaseOptions, modifiedAccessConditions)
return toBlobAcquireLeaseResponse(resp), handleError(err)
}
// BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1)
// constant to break a fixed-Duration lease when it expires or an infinite lease immediately.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
func (blc *BlobLeaseClient) BreakLease(ctx context.Context, options *BlobBreakLeaseOptions) (BlobBreakLeaseResponse, error) {
blobBreakLeaseOptions, modifiedAccessConditions := options.format()
resp, err := blc.client.BreakLease(ctx, blobBreakLeaseOptions, modifiedAccessConditions)
return toBlobBreakLeaseResponse(resp), handleError(err)
}
// ChangeLease changes the blob's lease ID.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
func (blc *BlobLeaseClient) ChangeLease(ctx context.Context, options *BlobChangeLeaseOptions) (BlobChangeLeaseResponse, error) {
if blc.leaseID == nil {
return BlobChangeLeaseResponse{}, errors.New("leaseID cannot be nil")
}
proposedLeaseID, changeLeaseOptions, modifiedAccessConditions, err := options.format()
if err != nil {
return BlobChangeLeaseResponse{}, err
}
resp, err := blc.client.ChangeLease(ctx, *blc.leaseID, *proposedLeaseID, changeLeaseOptions, modifiedAccessConditions)
// If lease has been changed successfully, set the leaseID in client
if err == nil {
blc.leaseID = proposedLeaseID
}
return toBlobChangeLeaseResponse(resp), handleError(err)
}
// RenewLease renews the blob's previously-acquired lease.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
func (blc *BlobLeaseClient) RenewLease(ctx context.Context, options *BlobRenewLeaseOptions) (BlobRenewLeaseResponse, error) {
if blc.leaseID == nil {
return BlobRenewLeaseResponse{}, errors.New("leaseID cannot be nil")
}
renewLeaseBlobOptions, modifiedAccessConditions := options.format()
resp, err := blc.client.RenewLease(ctx, *blc.leaseID, renewLeaseBlobOptions, modifiedAccessConditions)
return toBlobRenewLeaseResponse(resp), handleError(err)
}
// ReleaseLease releases the blob's previously-acquired lease.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
func (blc *BlobLeaseClient) ReleaseLease(ctx context.Context, options *ReleaseLeaseBlobOptions) (BlobReleaseLeaseResponse, error) {
if blc.leaseID == nil {
return BlobReleaseLeaseResponse{}, errors.New("leaseID cannot be nil")
}
renewLeaseBlobOptions, modifiedAccessConditions := options.format()
resp, err := blc.client.ReleaseLease(ctx, *blc.leaseID, renewLeaseBlobOptions, modifiedAccessConditions)
return toBlobReleaseLeaseResponse(resp), handleError(err)
}

View file

@ -0,0 +1,201 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"context"
"io"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
)
// BlockBlobClient defines a set of operations applicable to block blobs.
type BlockBlobClient struct {
BlobClient
client *blockBlobClient
}
// NewBlockBlobClient creates a BlockBlobClient object using the specified URL, Azure AD credential, and options.
func NewBlockBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*BlockBlobClient, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil)
conOptions := getConnectionOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
conn := newConnection(blobURL, conOptions)
bClient := newBlobClient(conn.Endpoint(), conn.Pipeline())
return &BlockBlobClient{
client: newBlockBlobClient(bClient.endpoint, bClient.pl),
BlobClient: BlobClient{
client: bClient,
},
}, nil
}
// NewBlockBlobClientWithNoCredential creates a BlockBlobClient object using the specified URL and options.
func NewBlockBlobClientWithNoCredential(blobURL string, options *ClientOptions) (*BlockBlobClient, error) {
conOptions := getConnectionOptions(options)
conn := newConnection(blobURL, conOptions)
bClient := newBlobClient(conn.Endpoint(), conn.Pipeline())
return &BlockBlobClient{
client: newBlockBlobClient(bClient.endpoint, bClient.pl),
BlobClient: BlobClient{
client: bClient,
},
}, nil
}
// NewBlockBlobClientWithSharedKey creates a BlockBlobClient object using the specified URL, shared key, and options.
func NewBlockBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*BlockBlobClient, error) {
authPolicy := newSharedKeyCredPolicy(cred)
conOptions := getConnectionOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
conn := newConnection(blobURL, conOptions)
bClient := newBlobClient(conn.Endpoint(), conn.Pipeline())
return &BlockBlobClient{
client: newBlockBlobClient(bClient.endpoint, bClient.pl),
BlobClient: BlobClient{
client: bClient,
sharedKey: cred,
},
}, nil
}
// WithSnapshot creates a new BlockBlobClient object identical to the source but with the specified snapshot timestamp.
// Pass "" to remove the snapshot returning a URL to the base blob.
func (bb *BlockBlobClient) WithSnapshot(snapshot string) (*BlockBlobClient, error) {
p, err := NewBlobURLParts(bb.URL())
if err != nil {
return nil, err
}
p.Snapshot = snapshot
endpoint := p.URL()
bClient := newBlobClient(endpoint, bb.client.pl)
return &BlockBlobClient{
client: newBlockBlobClient(bClient.endpoint, bClient.pl),
BlobClient: BlobClient{
client: bClient,
sharedKey: bb.sharedKey,
},
}, nil
}
// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id.
// Pass "" to remove the versionID returning a URL to the base blob.
func (bb *BlockBlobClient) WithVersionID(versionID string) (*BlockBlobClient, error) {
p, err := NewBlobURLParts(bb.URL())
if err != nil {
return nil, err
}
p.VersionID = versionID
endpoint := p.URL()
bClient := newBlobClient(endpoint, bb.client.pl)
return &BlockBlobClient{
client: newBlockBlobClient(bClient.endpoint, bClient.pl),
BlobClient: BlobClient{
client: bClient,
sharedKey: bb.sharedKey,
},
}, nil
}
// Upload creates a new block blob or overwrites an existing block blob.
// Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not
// supported with Upload; the content of the existing blob is overwritten with the new content. To
// perform a partial update of a block blob, use StageBlock and CommitBlockList.
// This method panics if the stream is not at position 0.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
func (bb *BlockBlobClient) Upload(ctx context.Context, body io.ReadSeekCloser, options *BlockBlobUploadOptions) (BlockBlobUploadResponse, error) {
count, err := validateSeekableStreamAt0AndGetCount(body)
if err != nil {
return BlockBlobUploadResponse{}, err
}
basics, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions := options.format()
resp, err := bb.client.Upload(ctx, count, body, basics, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions)
return toBlockBlobUploadResponse(resp), handleError(err)
}
// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block.
func (bb *BlockBlobClient) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeekCloser,
options *BlockBlobStageBlockOptions) (BlockBlobStageBlockResponse, error) {
count, err := validateSeekableStreamAt0AndGetCount(body)
if err != nil {
return BlockBlobStageBlockResponse{}, err
}
stageBlockOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo := options.format()
resp, err := bb.client.StageBlock(ctx, base64BlockID, count, body, stageBlockOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo)
return toBlockBlobStageBlockResponse(resp), handleError(err)
}
// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList.
// If count is CountToEnd (0), then data is read from specified offset to the end.
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url.
func (bb *BlockBlobClient) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL string,
contentLength int64, options *BlockBlobStageBlockFromURLOptions) (BlockBlobStageBlockFromURLResponse, error) {
stageBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions := options.format()
resp, err := bb.client.StageBlockFromURL(ctx, base64BlockID, contentLength, sourceURL, stageBlockFromURLOptions,
cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions)
return toBlockBlobStageBlockFromURLResponse(resp), handleError(err)
}
// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob.
// In order to be written as part of a blob, a block must have been successfully written
// to the server in a prior PutBlock operation. You can call PutBlockList to update a blob
// by uploading only those blocks that have changed, then committing the new and existing
// blocks together. Any blocks not specified in the block list and permanently deleted.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list.
func (bb *BlockBlobClient) CommitBlockList(ctx context.Context, base64BlockIDs []string, options *BlockBlobCommitBlockListOptions) (BlockBlobCommitBlockListResponse, error) {
// this is a code smell in the generated code
blockIds := make([]*string, len(base64BlockIDs))
for k, v := range base64BlockIDs {
blockIds[k] = to.Ptr(v)
}
blockLookupList := BlockLookupList{Latest: blockIds}
commitOptions, headers, leaseAccess, cpkInfo, cpkScope, modifiedAccess := options.format()
resp, err := bb.client.CommitBlockList(ctx, blockLookupList, commitOptions, headers, leaseAccess, cpkInfo, cpkScope, modifiedAccess)
return toBlockBlobCommitBlockListResponse(resp), handleError(err)
}
// GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list.
func (bb *BlockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, options *BlockBlobGetBlockListOptions) (BlockBlobGetBlockListResponse, error) {
o, lac, mac := options.format()
resp, err := bb.client.GetBlockList(ctx, listType, o, lac, mac)
return toBlockBlobGetBlockListResponse(resp), handleError(err)
}
// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB.
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url.
func (bb *BlockBlobClient) CopyFromURL(ctx context.Context, source string, options *BlockBlobCopyFromURLOptions) (BlockBlobCopyFromURLResponse, error) {
copyOptions, smac, mac, lac := options.format()
resp, err := bb.BlobClient.client.CopyFromURL(ctx, source, copyOptions, smac, mac, lac)
return toBlockBlobCopyFromURLResponse(resp), handleError(err)
}

View file

@ -0,0 +1,88 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"errors"
"fmt"
"strings"
)
var errConnectionString = errors.New("connection string is either blank or malformed. The expected connection string " +
"should contain key value pairs separated by semicolons. For example 'DefaultEndpointsProtocol=https;AccountName=<accountName>;" +
"AccountKey=<accountKey>;EndpointSuffix=core.windows.net'")
// convertConnStrToMap converts a connection string (in format key1=value1;key2=value2;key3=value3;) into a map of key-value pairs
func convertConnStrToMap(connStr string) (map[string]string, error) {
ret := make(map[string]string)
connStr = strings.TrimRight(connStr, ";")
splitString := strings.Split(connStr, ";")
if len(splitString) == 0 {
return ret, errConnectionString
}
for _, stringPart := range splitString {
parts := strings.SplitN(stringPart, "=", 2)
if len(parts) != 2 {
return ret, errConnectionString
}
ret[parts[0]] = parts[1]
}
return ret, nil
}
// parseConnectionString parses a connection string into a service URL and a SharedKeyCredential or a service url with the
// SharedAccessSignature combined.
func parseConnectionString(connectionString string) (string, *SharedKeyCredential, error) {
var serviceURL string
var cred *SharedKeyCredential
defaultScheme := "https"
defaultSuffix := "core.windows.net"
connStrMap, err := convertConnStrToMap(connectionString)
if err != nil {
return "", nil, err
}
accountName, ok := connStrMap["AccountName"]
if !ok {
return "", nil, errConnectionString
}
accountKey, ok := connStrMap["AccountKey"]
if !ok {
sharedAccessSignature, ok := connStrMap["SharedAccessSignature"]
if !ok {
return "", nil, errConnectionString
}
return fmt.Sprintf("%v://%v.blob.%v/?%v", defaultScheme, accountName, defaultSuffix, sharedAccessSignature), nil, nil
}
protocol, ok := connStrMap["DefaultEndpointsProtocol"]
if !ok {
protocol = defaultScheme
}
suffix, ok := connStrMap["EndpointSuffix"]
if !ok {
suffix = defaultSuffix
}
blobEndpoint, ok := connStrMap["BlobEndpoint"]
if ok {
cred, err = NewSharedKeyCredential(accountName, accountKey)
return blobEndpoint, cred, err
}
serviceURL = fmt.Sprintf("%v://%v.blob.%v", protocol, accountName, suffix)
cred, err = NewSharedKeyCredential(accountName, accountKey)
if err != nil {
return "", nil, err
}
return serviceURL, cred, nil
}

View file

@ -0,0 +1,253 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"context"
"errors"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
)
// ContainerClient represents a URL to the Azure Storage container allowing you to manipulate its blobs.
type ContainerClient struct {
client *containerClient
sharedKey *SharedKeyCredential
}
// URL returns the URL endpoint used by the ContainerClient object.
func (c *ContainerClient) URL() string {
return c.client.endpoint
}
// NewContainerClient creates a ContainerClient object using the specified URL, Azure AD credential, and options.
func NewContainerClient(containerURL string, cred azcore.TokenCredential, options *ClientOptions) (*ContainerClient, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil)
conOptions := getConnectionOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
conn := newConnection(containerURL, conOptions)
return &ContainerClient{
client: newContainerClient(conn.Endpoint(), conn.Pipeline()),
}, nil
}
// NewContainerClientWithNoCredential creates a ContainerClient object using the specified URL and options.
func NewContainerClientWithNoCredential(containerURL string, options *ClientOptions) (*ContainerClient, error) {
conOptions := getConnectionOptions(options)
conn := newConnection(containerURL, conOptions)
return &ContainerClient{
client: newContainerClient(conn.Endpoint(), conn.Pipeline()),
}, nil
}
// NewContainerClientWithSharedKey creates a ContainerClient object using the specified URL, shared key, and options.
func NewContainerClientWithSharedKey(containerURL string, cred *SharedKeyCredential, options *ClientOptions) (*ContainerClient, error) {
authPolicy := newSharedKeyCredPolicy(cred)
conOptions := getConnectionOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
conn := newConnection(containerURL, conOptions)
return &ContainerClient{
client: newContainerClient(conn.Endpoint(), conn.Pipeline()),
sharedKey: cred,
}, nil
}
// NewContainerClientFromConnectionString creates a ContainerClient object using connection string of an account
func NewContainerClientFromConnectionString(connectionString string, containerName string, options *ClientOptions) (*ContainerClient, error) {
svcClient, err := NewServiceClientFromConnectionString(connectionString, options)
if err != nil {
return nil, err
}
return svcClient.NewContainerClient(containerName)
}
// NewBlobClient creates a new BlobClient object by concatenating blobName to the end of
// ContainerClient's URL. The new BlobClient uses the same request policy pipeline as the ContainerClient.
// To change the pipeline, create the BlobClient and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewBlobClient instead of calling this object's
// NewBlobClient method.
func (c *ContainerClient) NewBlobClient(blobName string) (*BlobClient, error) {
blobURL := appendToURLPath(c.URL(), blobName)
return &BlobClient{
client: newBlobClient(blobURL, c.client.pl),
sharedKey: c.sharedKey,
}, nil
}
// NewAppendBlobClient creates a new AppendBlobURL object by concatenating blobName to the end of
// ContainerClient's URL. The new AppendBlobURL uses the same request policy pipeline as the ContainerClient.
// To change the pipeline, create the AppendBlobURL and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewAppendBlobClient instead of calling this object's
// NewAppendBlobClient method.
func (c *ContainerClient) NewAppendBlobClient(blobName string) (*AppendBlobClient, error) {
blobURL := appendToURLPath(c.URL(), blobName)
return &AppendBlobClient{
BlobClient: BlobClient{
client: newBlobClient(blobURL, c.client.pl),
sharedKey: c.sharedKey,
},
client: newAppendBlobClient(blobURL, c.client.pl),
}, nil
}
// NewBlockBlobClient creates a new BlockBlobClient object by concatenating blobName to the end of
// ContainerClient's URL. The new BlockBlobClient uses the same request policy pipeline as the ContainerClient.
// To change the pipeline, create the BlockBlobClient and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewBlockBlobClient instead of calling this object's
// NewBlockBlobClient method.
func (c *ContainerClient) NewBlockBlobClient(blobName string) (*BlockBlobClient, error) {
blobURL := appendToURLPath(c.URL(), blobName)
return &BlockBlobClient{
BlobClient: BlobClient{
client: newBlobClient(blobURL, c.client.pl),
sharedKey: c.sharedKey,
},
client: newBlockBlobClient(blobURL, c.client.pl),
}, nil
}
// NewPageBlobClient creates a new PageBlobURL object by concatenating blobName to the end of ContainerClient's URL. The new PageBlobURL uses the same request policy pipeline as the ContainerClient.
// To change the pipeline, create the PageBlobURL and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewPageBlobClient instead of calling this object's
// NewPageBlobClient method.
func (c *ContainerClient) NewPageBlobClient(blobName string) (*PageBlobClient, error) {
blobURL := appendToURLPath(c.URL(), blobName)
return &PageBlobClient{
BlobClient: BlobClient{
client: newBlobClient(blobURL, c.client.pl),
sharedKey: c.sharedKey,
},
client: newPageBlobClient(blobURL, c.client.pl),
}, nil
}
// Create creates a new container within a storage account. If a container with the same name already exists, the operation fails.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container.
func (c *ContainerClient) Create(ctx context.Context, options *ContainerCreateOptions) (ContainerCreateResponse, error) {
basics, cpkInfo := options.format()
resp, err := c.client.Create(ctx, basics, cpkInfo)
return toContainerCreateResponse(resp), handleError(err)
}
// Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container.
func (c *ContainerClient) Delete(ctx context.Context, o *ContainerDeleteOptions) (ContainerDeleteResponse, error) {
basics, leaseInfo, accessConditions := o.format()
resp, err := c.client.Delete(ctx, basics, leaseInfo, accessConditions)
return toContainerDeleteResponse(resp), handleError(err)
}
// GetProperties returns the container's properties.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata.
func (c *ContainerClient) GetProperties(ctx context.Context, o *ContainerGetPropertiesOptions) (ContainerGetPropertiesResponse, error) {
// NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties.
// This allows us to not expose a GetProperties method at all simplifying the API.
// The optionals are nil, like they were in track 1.5
options, leaseAccess := o.format()
resp, err := c.client.GetProperties(ctx, options, leaseAccess)
return toContainerGetPropertiesResponse(resp), handleError(err)
}
// SetMetadata sets the container's metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata.
func (c *ContainerClient) SetMetadata(ctx context.Context, o *ContainerSetMetadataOptions) (ContainerSetMetadataResponse, error) {
metadataOptions, lac, mac := o.format()
resp, err := c.client.SetMetadata(ctx, metadataOptions, lac, mac)
return toContainerSetMetadataResponse(resp), handleError(err)
}
// GetAccessPolicy returns the container's access policy. The access policy indicates whether container's blobs may be accessed publicly.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-acl.
func (c *ContainerClient) GetAccessPolicy(ctx context.Context, o *ContainerGetAccessPolicyOptions) (ContainerGetAccessPolicyResponse, error) {
options, ac := o.format()
resp, err := c.client.GetAccessPolicy(ctx, options, ac)
return toContainerGetAccessPolicyResponse(resp), handleError(err)
}
// SetAccessPolicy sets the container's permissions. The access policy indicates whether blobs in a container may be accessed publicly.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl.
func (c *ContainerClient) SetAccessPolicy(ctx context.Context, o *ContainerSetAccessPolicyOptions) (ContainerSetAccessPolicyResponse, error) {
accessPolicy, mac, lac := o.format()
resp, err := c.client.SetAccessPolicy(ctx, accessPolicy, mac, lac)
return toContainerSetAccessPolicyResponse(resp), handleError(err)
}
// ListBlobsFlat returns a pager for blobs starting from the specified Marker. Use an empty
// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
func (c *ContainerClient) ListBlobsFlat(o *ContainerListBlobsFlatOptions) *ContainerListBlobFlatPager {
listOptions := o.format()
pager := c.client.ListBlobFlatSegment(listOptions)
// override the advancer
pager.advancer = func(ctx context.Context, response containerClientListBlobFlatSegmentResponse) (*policy.Request, error) {
listOptions.Marker = response.NextMarker
return c.client.listBlobFlatSegmentCreateRequest(ctx, listOptions)
}
return toContainerListBlobFlatSegmentPager(pager)
}
// ListBlobsHierarchy returns a channel of blobs starting from the specified Marker. Use an empty
// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order.
// After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the the
// previously-returned Marker) to get the next segment.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
// AutoPagerTimeout specifies the amount of time with no read operations before the channel times out and closes. Specify no time and it will be ignored.
// AutoPagerBufferSize specifies the channel's buffer size.
// Both the blob item channel and error channel should be watched. Only one error will be released via this channel (or a nil error, to register a clean exit.)
func (c *ContainerClient) ListBlobsHierarchy(delimiter string, o *ContainerListBlobsHierarchyOptions) *ContainerListBlobHierarchyPager {
listOptions := o.format()
pager := c.client.ListBlobHierarchySegment(delimiter, listOptions)
// override the advancer
pager.advancer = func(ctx context.Context, response containerClientListBlobHierarchySegmentResponse) (*policy.Request, error) {
listOptions.Marker = response.NextMarker
return c.client.listBlobHierarchySegmentCreateRequest(ctx, delimiter, listOptions)
}
return toContainerListBlobHierarchySegmentPager(pager)
}
// GetSASURL is a convenience method for generating a SAS token for the currently pointed at container.
// It can only be used if the credential supplied during creation was a SharedKeyCredential.
func (c *ContainerClient) GetSASURL(permissions ContainerSASPermissions, start time.Time, expiry time.Time) (string, error) {
if c.sharedKey == nil {
return "", errors.New("SAS can only be signed with a SharedKeyCredential")
}
urlParts, err := NewBlobURLParts(c.URL())
if err != nil {
return "", err
}
// Containers do not have snapshots, nor versions.
urlParts.SAS, err = BlobSASSignatureValues{
ContainerName: urlParts.ContainerName,
Permissions: permissions.String(),
StartTime: start.UTC(),
ExpiryTime: expiry.UTC(),
}.NewSASQueryParameters(c.sharedKey)
return urlParts.URL(), err
}

View file

@ -0,0 +1,102 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"context"
"errors"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
)
//ContainerLeaseClient represents lease client of container
type ContainerLeaseClient struct {
ContainerClient
leaseID *string
}
// NewContainerLeaseClient is constructor of ContainerLeaseClient
func (c *ContainerClient) NewContainerLeaseClient(leaseID *string) (*ContainerLeaseClient, error) {
if leaseID == nil {
generatedUuid, err := uuid.New()
if err != nil {
return nil, err
}
leaseID = to.Ptr(generatedUuid.String())
}
return &ContainerLeaseClient{
ContainerClient: *c,
leaseID: leaseID,
}, nil
}
// AcquireLease acquires a lease on the container for delete operations. The lease Duration must be between 15 to 60 seconds, or infinite (-1).
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
func (clc *ContainerLeaseClient) AcquireLease(ctx context.Context, options *ContainerAcquireLeaseOptions) (ContainerAcquireLeaseResponse, error) {
containerAcquireLeaseOptions, modifiedAccessConditions := options.format()
containerAcquireLeaseOptions.ProposedLeaseID = clc.leaseID
resp, err := clc.client.AcquireLease(ctx, &containerAcquireLeaseOptions, modifiedAccessConditions)
if err == nil && resp.LeaseID != nil {
clc.leaseID = resp.LeaseID
}
return toContainerAcquireLeaseResponse(resp), handleError(err)
}
// BreakLease breaks the container's previously-acquired lease (if it exists).
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
func (clc *ContainerLeaseClient) BreakLease(ctx context.Context, options *ContainerBreakLeaseOptions) (ContainerBreakLeaseResponse, error) {
containerBreakLeaseOptions, modifiedAccessConditions := options.format()
resp, err := clc.client.BreakLease(ctx, containerBreakLeaseOptions, modifiedAccessConditions)
return toContainerBreakLeaseResponse(resp), handleError(err)
}
// ChangeLease changes the container's lease ID.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
func (clc *ContainerLeaseClient) ChangeLease(ctx context.Context, options *ContainerChangeLeaseOptions) (ContainerChangeLeaseResponse, error) {
if clc.leaseID == nil {
return ContainerChangeLeaseResponse{}, errors.New("leaseID cannot be nil")
}
proposedLeaseID, changeLeaseOptions, modifiedAccessConditions, err := options.format()
if err != nil {
return ContainerChangeLeaseResponse{}, err
}
resp, err := clc.client.ChangeLease(ctx, *clc.leaseID, *proposedLeaseID, changeLeaseOptions, modifiedAccessConditions)
if err == nil && resp.LeaseID != nil {
clc.leaseID = resp.LeaseID
}
return toContainerChangeLeaseResponse(resp), handleError(err)
}
// ReleaseLease releases the container's previously-acquired lease.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
func (clc *ContainerLeaseClient) ReleaseLease(ctx context.Context, options *ContainerReleaseLeaseOptions) (ContainerReleaseLeaseResponse, error) {
if clc.leaseID == nil {
return ContainerReleaseLeaseResponse{}, errors.New("leaseID cannot be nil")
}
containerReleaseLeaseOptions, modifiedAccessConditions := options.format()
resp, err := clc.client.ReleaseLease(ctx, *clc.leaseID, containerReleaseLeaseOptions, modifiedAccessConditions)
return toContainerReleaseLeaseResponse(resp), handleError(err)
}
// RenewLease renews the container's previously-acquired lease.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
func (clc *ContainerLeaseClient) RenewLease(ctx context.Context, options *ContainerRenewLeaseOptions) (ContainerRenewLeaseResponse, error) {
if clc.leaseID == nil {
return ContainerRenewLeaseResponse{}, errors.New("leaseID cannot be nil")
}
renewLeaseBlobOptions, modifiedAccessConditions := options.format()
resp, err := clc.client.RenewLease(ctx, *clc.leaseID, renewLeaseBlobOptions, modifiedAccessConditions)
if err == nil && resp.LeaseID != nil {
clc.leaseID = resp.LeaseID
}
return toContainerRenewLeaseResponse(resp), handleError(err)
}

View file

@ -0,0 +1,261 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"context"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"io"
"net/url"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
)
// PageBlobClient represents a client to an Azure Storage page blob;
type PageBlobClient struct {
BlobClient
client *pageBlobClient
}
// NewPageBlobClient creates a ServiceClient object using the specified URL, Azure AD credential, and options.
// Example of serviceURL: https://<your_storage_account>.blob.core.windows.net
func NewPageBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*PageBlobClient, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil)
conOptions := getConnectionOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
conn := newConnection(blobURL, conOptions)
return &PageBlobClient{
client: newPageBlobClient(conn.Endpoint(), conn.Pipeline()),
BlobClient: BlobClient{
client: newBlobClient(conn.Endpoint(), conn.Pipeline()),
},
}, nil
}
// NewPageBlobClientWithNoCredential creates a ServiceClient object using the specified URL and options.
// Example of serviceURL: https://<your_storage_account>.blob.core.windows.net?<SAS token>
func NewPageBlobClientWithNoCredential(blobURL string, options *ClientOptions) (*PageBlobClient, error) {
conOptions := getConnectionOptions(options)
conn := newConnection(blobURL, conOptions)
return &PageBlobClient{
client: newPageBlobClient(conn.Endpoint(), conn.Pipeline()),
BlobClient: BlobClient{
client: newBlobClient(conn.Endpoint(), conn.Pipeline()),
},
}, nil
}
// NewPageBlobClientWithSharedKey creates a ServiceClient object using the specified URL, shared key, and options.
// Example of serviceURL: https://<your_storage_account>.blob.core.windows.net
func NewPageBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*PageBlobClient, error) {
authPolicy := newSharedKeyCredPolicy(cred)
conOptions := getConnectionOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
conn := newConnection(blobURL, conOptions)
return &PageBlobClient{
client: newPageBlobClient(conn.Endpoint(), conn.Pipeline()),
BlobClient: BlobClient{
client: newBlobClient(conn.Endpoint(), conn.Pipeline()),
sharedKey: cred,
},
}, nil
}
// WithSnapshot creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp.
// Pass "" to remove the snapshot returning a URL to the base blob.
func (pb *PageBlobClient) WithSnapshot(snapshot string) (*PageBlobClient, error) {
p, err := NewBlobURLParts(pb.URL())
if err != nil {
return nil, err
}
p.Snapshot = snapshot
endpoint := p.URL()
pipeline := pb.client.pl
return &PageBlobClient{
client: newPageBlobClient(endpoint, pipeline),
BlobClient: BlobClient{
client: newBlobClient(endpoint, pipeline),
sharedKey: pb.sharedKey,
},
}, nil
}
// WithVersionID creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp.
// Pass "" to remove the version returning a URL to the base blob.
func (pb *PageBlobClient) WithVersionID(versionID string) (*PageBlobClient, error) {
p, err := NewBlobURLParts(pb.URL())
if err != nil {
return nil, err
}
p.VersionID = versionID
endpoint := p.URL()
pipeline := pb.client.pl
return &PageBlobClient{
client: newPageBlobClient(endpoint, pipeline),
BlobClient: BlobClient{
client: newBlobClient(endpoint, pipeline),
sharedKey: pb.sharedKey,
},
}, nil
}
// Create creates a page blob of the specified length. Call PutPage to upload data to a page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
func (pb *PageBlobClient) Create(ctx context.Context, size int64, o *PageBlobCreateOptions) (PageBlobCreateResponse, error) {
createOptions, HTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := o.format()
resp, err := pb.client.Create(ctx, 0, size, createOptions, HTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
return toPageBlobCreateResponse(resp), handleError(err)
}
// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes.
// This method panics if the stream is not at position 0.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
func (pb *PageBlobClient) UploadPages(ctx context.Context, body io.ReadSeekCloser, options *PageBlobUploadPagesOptions) (PageBlobUploadPagesResponse, error) {
count, err := validateSeekableStreamAt0AndGetCount(body)
if err != nil {
return PageBlobUploadPagesResponse{}, err
}
uploadPagesOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format()
resp, err := pb.client.UploadPages(ctx, count, body, uploadPagesOptions, leaseAccessConditions,
cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions)
return toPageBlobUploadPagesResponse(resp), handleError(err)
}
// UploadPagesFromURL copies 1 or more pages from a source URL to the page blob.
// The sourceOffset specifies the start offset of source data to copy from.
// The destOffset specifies the start offset of data in page blob will be written to.
// The count must be a multiple of 512 bytes.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page-from-url.
func (pb *PageBlobClient) UploadPagesFromURL(ctx context.Context, source string, sourceOffset, destOffset, count int64,
options *PageBlobUploadPagesFromURLOptions) (PageBlobUploadPagesFromURLResponse, error) {
uploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions := options.format()
resp, err := pb.client.UploadPagesFromURL(ctx, source, rangeToString(sourceOffset, count), 0,
rangeToString(destOffset, count), uploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions,
sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions)
return toPageBlobUploadPagesFromURLResponse(resp), handleError(err)
}
// ClearPages frees the specified pages from the page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
func (pb *PageBlobClient) ClearPages(ctx context.Context, pageRange HttpRange, options *PageBlobClearPagesOptions) (PageBlobClearPagesResponse, error) {
clearOptions := &pageBlobClientClearPagesOptions{
Range: pageRange.format(),
}
leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format()
resp, err := pb.client.ClearPages(ctx, 0, clearOptions, leaseAccessConditions, cpkInfo,
cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions)
return toPageBlobClearPagesResponse(resp), handleError(err)
}
// GetPageRanges returns the list of valid page ranges for a page blob or snapshot of a page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
func (pb *PageBlobClient) GetPageRanges(options *PageBlobGetPageRangesOptions) *PageBlobGetPageRangesPager {
getPageRangesOptions, leaseAccessConditions, modifiedAccessConditions := options.format()
pageBlobGetPageRangesPager := pb.client.GetPageRanges(getPageRangesOptions, leaseAccessConditions, modifiedAccessConditions)
// Fixing Advancer
pageBlobGetPageRangesPager.advancer = func(ctx context.Context, response pageBlobClientGetPageRangesResponse) (*policy.Request, error) {
getPageRangesOptions.Marker = response.NextMarker
req, err := pb.client.getPageRangesCreateRequest(ctx, getPageRangesOptions, leaseAccessConditions, modifiedAccessConditions)
if err != nil {
return nil, handleError(err)
}
queryValues, err := url.ParseQuery(req.Raw().URL.RawQuery)
if err != nil {
return nil, handleError(err)
}
req.Raw().URL.RawQuery = queryValues.Encode()
return req, nil
}
return toPageBlobGetPageRangesPager(pageBlobGetPageRangesPager)
}
// GetPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
func (pb *PageBlobClient) GetPageRangesDiff(options *PageBlobGetPageRangesDiffOptions) *PageBlobGetPageRangesDiffPager {
getPageRangesDiffOptions, leaseAccessConditions, modifiedAccessConditions := options.format()
getPageRangesDiffPager := pb.client.GetPageRangesDiff(getPageRangesDiffOptions, leaseAccessConditions, modifiedAccessConditions)
// Fixing Advancer
getPageRangesDiffPager.advancer = func(ctx context.Context, response pageBlobClientGetPageRangesDiffResponse) (*policy.Request, error) {
getPageRangesDiffOptions.Marker = response.NextMarker
req, err := pb.client.getPageRangesDiffCreateRequest(ctx, getPageRangesDiffOptions, leaseAccessConditions, modifiedAccessConditions)
if err != nil {
return nil, handleError(err)
}
queryValues, err := url.ParseQuery(req.Raw().URL.RawQuery)
if err != nil {
return nil, handleError(err)
}
req.Raw().URL.RawQuery = queryValues.Encode()
return req, nil
}
return toPageBlobGetPageRangesDiffPager(getPageRangesDiffPager)
}
// Resize resizes the page blob to the specified size (which must be a multiple of 512).
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
func (pb *PageBlobClient) Resize(ctx context.Context, size int64, options *PageBlobResizeOptions) (PageBlobResizeResponse, error) {
resizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := options.format()
resp, err := pb.client.Resize(ctx, size, resizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
return toPageBlobResizeResponse(resp), handleError(err)
}
// UpdateSequenceNumber sets the page blob's sequence number.
func (pb *PageBlobClient) UpdateSequenceNumber(ctx context.Context, options *PageBlobUpdateSequenceNumberOptions) (PageBlobUpdateSequenceNumberResponse, error) {
actionType, updateOptions, lac, mac := options.format()
resp, err := pb.client.UpdateSequenceNumber(ctx, *actionType, updateOptions, lac, mac)
return toPageBlobUpdateSequenceNumberResponse(resp), handleError(err)
}
// StartCopyIncremental begins an operation to start an incremental copy from one page blob's snapshot to this page blob.
// The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination.
// The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and
// https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots.
func (pb *PageBlobClient) StartCopyIncremental(ctx context.Context, copySource string, prevSnapshot string, options *PageBlobCopyIncrementalOptions) (PageBlobCopyIncrementalResponse, error) {
copySourceURL, err := url.Parse(copySource)
if err != nil {
return PageBlobCopyIncrementalResponse{}, err
}
queryParams := copySourceURL.Query()
queryParams.Set("snapshot", prevSnapshot)
copySourceURL.RawQuery = queryParams.Encode()
pageBlobCopyIncrementalOptions, modifiedAccessConditions := options.format()
resp, err := pb.client.CopyIncremental(ctx, copySourceURL.String(), pageBlobCopyIncrementalOptions, modifiedAccessConditions)
return toPageBlobCopyIncrementalResponse(resp), handleError(err)
}

View file

@ -0,0 +1,184 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"net"
"net/url"
"strings"
)
const (
snapshot = "snapshot"
versionId = "versionid"
SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00"
)
// BlobURLParts object represents the components that make up an Azure Storage Container/Blob URL. You parse an
// existing URL into its parts by calling NewBlobURLParts(). You construct a URL from parts by calling URL().
// NOTE: Changing any SAS-related field requires computing a new SAS signature.
type BlobURLParts struct {
Scheme string // Ex: "https://"
Host string // Ex: "account.blob.core.windows.net", "10.132.141.33", "10.132.141.33:80"
IPEndpointStyleInfo IPEndpointStyleInfo
ContainerName string // "" if no container
BlobName string // "" if no blob
Snapshot string // "" if not a snapshot
SAS SASQueryParameters
UnparsedParams string
VersionID string // "" if not versioning enabled
}
// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator.
// Ex: "https://10.132.141.33/accountname/containername"
type IPEndpointStyleInfo struct {
AccountName string // "" if not using IP endpoint style
}
// isIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as:
// http(s)://IP(:port)/storageaccount/container/...
// As url's Host property, host could be both host or host:port
func isIPEndpointStyle(host string) bool {
if host == "" {
return false
}
if h, _, err := net.SplitHostPort(host); err == nil {
host = h
}
// For IPv6, there could be case where SplitHostPort fails for cannot finding port.
// In this case, eliminate the '[' and ']' in the URL.
// For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732
if host[0] == '[' && host[len(host)-1] == ']' {
host = host[1 : len(host)-1]
}
return net.ParseIP(host) != nil
}
// NewBlobURLParts parses a URL initializing BlobURLParts' fields including any SAS-related & snapshot query parameters. Any other
// query parameters remain in the UnparsedParams field. This method overwrites all fields in the BlobURLParts object.
func NewBlobURLParts(u string) (BlobURLParts, error) {
uri, err := url.Parse(u)
if err != nil {
return BlobURLParts{}, err
}
up := BlobURLParts{
Scheme: uri.Scheme,
Host: uri.Host,
}
// Find the container & blob names (if any)
if uri.Path != "" {
path := uri.Path
if path[0] == '/' {
path = path[1:] // If path starts with a slash, remove it
}
if isIPEndpointStyle(up.Host) {
if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no container name or blob
up.IPEndpointStyleInfo.AccountName = path
path = "" // No ContainerName present in the URL so path should be empty
} else {
up.IPEndpointStyleInfo.AccountName = path[:accountEndIndex] // The account name is the part between the slashes
path = path[accountEndIndex+1:] // path refers to portion after the account name now (container & blob names)
}
}
containerEndIndex := strings.Index(path, "/") // Find the next slash (if it exists)
if containerEndIndex == -1 { // Slash not found; path has container name & no blob name
up.ContainerName = path
} else {
up.ContainerName = path[:containerEndIndex] // The container name is the part between the slashes
up.BlobName = path[containerEndIndex+1:] // The blob name is after the container slash
}
}
// Convert the query parameters to a case-sensitive map & trim whitespace
paramsMap := uri.Query()
up.Snapshot = "" // Assume no snapshot
if snapshotStr, ok := caseInsensitiveValues(paramsMap).Get(snapshot); ok {
up.Snapshot = snapshotStr[0]
// If we recognized the query parameter, remove it from the map
delete(paramsMap, snapshot)
}
up.VersionID = "" // Assume no versionID
if versionIDs, ok := caseInsensitiveValues(paramsMap).Get(versionId); ok {
up.VersionID = versionIDs[0]
// If we recognized the query parameter, remove it from the map
delete(paramsMap, versionId) // delete "versionid" from paramsMap
delete(paramsMap, "versionId") // delete "versionId" from paramsMap
}
up.SAS = newSASQueryParameters(paramsMap, true)
up.UnparsedParams = paramsMap.Encode()
return up, nil
}
type caseInsensitiveValues url.Values // map[string][]string
func (values caseInsensitiveValues) Get(key string) ([]string, bool) {
key = strings.ToLower(key)
for k, v := range values {
if strings.ToLower(k) == key {
return v, true
}
}
return []string{}, false
}
// URL returns a URL object whose fields are initialized from the BlobURLParts fields. The URL's RawQuery
// field contains the SAS, snapshot, and unparsed query parameters.
func (up BlobURLParts) URL() string {
path := ""
if isIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" {
path += "/" + up.IPEndpointStyleInfo.AccountName
}
// Concatenate container & blob names (if they exist)
if up.ContainerName != "" {
path += "/" + up.ContainerName
if up.BlobName != "" {
path += "/" + up.BlobName
}
}
rawQuery := up.UnparsedParams
//If no snapshot is initially provided, fill it in from the SAS query properties to help the user
if up.Snapshot == "" && !up.SAS.snapshotTime.IsZero() {
up.Snapshot = up.SAS.snapshotTime.Format(SnapshotTimeFormat)
}
// Concatenate blob version id query parameter (if it exists)
if up.VersionID != "" {
if len(rawQuery) > 0 {
rawQuery += "&"
}
rawQuery += versionId + "=" + up.VersionID
}
// Concatenate blob snapshot query parameter (if it exists)
if up.Snapshot != "" {
if len(rawQuery) > 0 {
rawQuery += "&"
}
rawQuery += snapshot + "=" + up.Snapshot
}
sas := up.SAS.Encode()
if sas != "" {
if len(rawQuery) > 0 {
rawQuery += "&"
}
rawQuery += sas
}
u := url.URL{
Scheme: up.Scheme,
Host: up.Host,
Path: path,
RawQuery: rawQuery,
}
return u.String()
}

View file

@ -0,0 +1,17 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import "net/http"
// ResponseError is a wrapper of error passed from service
type ResponseError interface {
Error() string
Unwrap() error
RawResponse() *http.Response
NonRetriable()
}

View file

@ -0,0 +1,35 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
// GetHTTPHeaders returns the user-modifiable properties for this blob.
func (bgpr BlobGetPropertiesResponse) GetHTTPHeaders() BlobHTTPHeaders {
return BlobHTTPHeaders{
BlobContentType: bgpr.ContentType,
BlobContentEncoding: bgpr.ContentEncoding,
BlobContentLanguage: bgpr.ContentLanguage,
BlobContentDisposition: bgpr.ContentDisposition,
BlobCacheControl: bgpr.CacheControl,
BlobContentMD5: bgpr.ContentMD5,
}
}
///////////////////////////////////////////////////////////////////////////////
// GetHTTPHeaders returns the user-modifiable properties for this blob.
func (r BlobDownloadResponse) GetHTTPHeaders() BlobHTTPHeaders {
return BlobHTTPHeaders{
BlobContentType: r.ContentType,
BlobContentEncoding: r.ContentEncoding,
BlobContentLanguage: r.ContentLanguage,
BlobContentDisposition: r.ContentDisposition,
BlobCacheControl: r.CacheControl,
BlobContentMD5: r.ContentMD5,
}
}
///////////////////////////////////////////////////////////////////////////////

View file

@ -0,0 +1,194 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"context"
"io"
"net"
"net/http"
"strings"
"sync"
)
const CountToEnd = 0
// HTTPGetter is a function type that refers to a method that performs an HTTP GET operation.
type HTTPGetter func(ctx context.Context, i HTTPGetterInfo) (*http.Response, error)
// HTTPGetterInfo is passed to an HTTPGetter function passing it parameters
// that should be used to make an HTTP GET request.
type HTTPGetterInfo struct {
// Offset specifies the start offset that should be used when
// creating the HTTP GET request's Range header
Offset int64
// Count specifies the count of bytes that should be used to calculate
// the end offset when creating the HTTP GET request's Range header
Count int64
// ETag specifies the resource's etag that should be used when creating
// the HTTP GET request's If-Match header
ETag string
}
// FailedReadNotifier is a function type that represents the notification function called when a read fails
type FailedReadNotifier func(failureCount int, lastError error, offset int64, count int64, willRetry bool)
// RetryReaderOptions contains properties which can help to decide when to do retry.
type RetryReaderOptions struct {
// MaxRetryRequests specifies the maximum number of HTTP GET requests that will be made
// while reading from a RetryReader. A value of zero means that no additional HTTP
// GET requests will be made.
MaxRetryRequests int
doInjectError bool
doInjectErrorRound int
injectedError error
// NotifyFailedRead is called, if non-nil, after any failure to read. Expected usage is diagnostic logging.
NotifyFailedRead FailedReadNotifier
// TreatEarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default,
// retryReader has the following special behaviour: closing the response body before it is all read is treated as a
// retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the =
// read is too slow, caller may want to force a retry in the hope that the retry will be quicker). If
// TreatEarlyCloseAsError is true, then retryReader's special behaviour is suppressed, and "read on closed body" is instead
// treated as a fatal (non-retryable) error.
// Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens
// from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors
// which will be retried.
TreatEarlyCloseAsError bool
CpkInfo *CpkInfo
CpkScopeInfo *CpkScopeInfo
}
// retryReader implements io.ReaderCloser methods.
// retryReader tries to read from response, and if there is retriable network error
// returned during reading, it will retry according to retry reader option through executing
// user defined action with provided data to get a new response, and continue the overall reading process
// through reading from the new response.
type retryReader struct {
ctx context.Context
info HTTPGetterInfo
countWasBounded bool
o RetryReaderOptions
getter HTTPGetter
// we support Close-ing during Reads (from other goroutines), so we protect the shared state, which is response
responseMu *sync.Mutex
response *http.Response
}
// NewRetryReader creates a retry reader.
func NewRetryReader(ctx context.Context, initialResponse *http.Response,
info HTTPGetterInfo, o RetryReaderOptions, getter HTTPGetter) io.ReadCloser {
return &retryReader{
ctx: ctx,
getter: getter,
info: info,
countWasBounded: info.Count != CountToEnd,
response: initialResponse,
responseMu: &sync.Mutex{},
o: o}
}
func (s *retryReader) setResponse(r *http.Response) {
s.responseMu.Lock()
defer s.responseMu.Unlock()
s.response = r
}
func (s *retryReader) Read(p []byte) (n int, err error) {
for try := 0; ; try++ {
//fmt.Println(try) // Comment out for debugging.
if s.countWasBounded && s.info.Count == CountToEnd {
// User specified an original count and the remaining bytes are 0, return 0, EOF
return 0, io.EOF
}
s.responseMu.Lock()
resp := s.response
s.responseMu.Unlock()
if resp == nil { // We don't have a response stream to read from, try to get one.
newResponse, err := s.getter(s.ctx, s.info)
if err != nil {
return 0, err
}
// Successful GET; this is the network stream we'll read from.
s.setResponse(newResponse)
resp = newResponse
}
n, err := resp.Body.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running)
// Injection mechanism for testing.
if s.o.doInjectError && try == s.o.doInjectErrorRound {
if s.o.injectedError != nil {
err = s.o.injectedError
} else {
err = &net.DNSError{IsTemporary: true}
}
}
// We successfully read data or end EOF.
if err == nil || err == io.EOF {
s.info.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future
if s.info.Count != CountToEnd {
s.info.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future
}
return n, err // Return the return to the caller
}
_ = s.Close()
s.setResponse(nil) // Our stream is no longer good
// Check the retry count and error code, and decide whether to retry.
retriesExhausted := try >= s.o.MaxRetryRequests
_, isNetError := err.(net.Error)
isUnexpectedEOF := err == io.ErrUnexpectedEOF
willRetry := (isNetError || isUnexpectedEOF || s.wasRetryableEarlyClose(err)) && !retriesExhausted
// Notify, for logging purposes, of any failures
if s.o.NotifyFailedRead != nil {
failureCount := try + 1 // because try is zero-based
s.o.NotifyFailedRead(failureCount, err, s.info.Offset, s.info.Count, willRetry)
}
if willRetry {
continue
// Loop around and try to get and read from new stream.
}
return n, err // Not retryable, or retries exhausted, so just return
}
}
// By default, we allow early Closing, from another concurrent goroutine, to be used to force a retry
// Is this safe, to close early from another goroutine? Early close ultimately ends up calling
// net.Conn.Close, and that is documented as "Any blocked Read or Write operations will be unblocked and return errors"
// which is exactly the behaviour we want.
// NOTE: that if caller has forced an early Close from a separate goroutine (separate from the Read)
// then there are two different types of error that may happen - either the one one we check for here,
// or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine
// to check for one, since the other is a net.Error, which our main Read retry loop is already handing.
func (s *retryReader) wasRetryableEarlyClose(err error) bool {
if s.o.TreatEarlyCloseAsError {
return false // user wants all early closes to be errors, and so not retryable
}
// unfortunately, http.errReadOnClosedResBody is private, so the best we can do here is to check for its text
return strings.HasSuffix(err.Error(), ReadOnClosedBodyMessage)
}
const ReadOnClosedBodyMessage = "read on closed response body"
func (s *retryReader) Close() error {
s.responseMu.Lock()
defer s.responseMu.Unlock()
if s.response != nil && s.response.Body != nil {
return s.response.Body.Close()
}
return nil
}

View file

@ -0,0 +1,243 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"bytes"
"errors"
"fmt"
"strings"
"time"
)
// AccountSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas
type AccountSASSignatureValues struct {
Version string `param:"sv"` // If not specified, this defaults to SASVersion
Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants
StartTime time.Time `param:"st"` // Not specified if IsZero
ExpiryTime time.Time `param:"se"` // Not specified if IsZero
Permissions string `param:"sp"` // Create by initializing a AccountSASPermissions and then call String()
IPRange IPRange `param:"sip"`
Services string `param:"ss"` // Create by initializing AccountSASServices and then call String()
ResourceTypes string `param:"srt"` // Create by initializing AccountSASResourceTypes and then call String()
}
// Sign uses an account's shared key credential to sign this signature values to produce
// the proper SAS query parameters.
func (v AccountSASSignatureValues) Sign(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) {
// https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS
if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" {
return SASQueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType")
}
if v.Version == "" {
v.Version = SASVersion
}
perms := &AccountSASPermissions{}
if err := perms.Parse(v.Permissions); err != nil {
return SASQueryParameters{}, err
}
v.Permissions = perms.String()
startTime, expiryTime, _ := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime, time.Time{})
stringToSign := strings.Join([]string{
sharedKeyCredential.AccountName(),
v.Permissions,
v.Services,
v.ResourceTypes,
startTime,
expiryTime,
v.IPRange.String(),
string(v.Protocol),
v.Version,
""}, // That right, the account SAS requires a terminating extra newline
"\n")
signature, err := sharedKeyCredential.ComputeHMACSHA256(stringToSign)
if err != nil {
return SASQueryParameters{}, err
}
p := SASQueryParameters{
// Common SAS parameters
version: v.Version,
protocol: v.Protocol,
startTime: v.StartTime,
expiryTime: v.ExpiryTime,
permissions: v.Permissions,
ipRange: v.IPRange,
// Account-specific SAS parameters
services: v.Services,
resourceTypes: v.ResourceTypes,
// Calculated SAS signature
signature: signature,
}
return p, nil
}
// AccountSASPermissions type simplifies creating the permissions string for an Azure Storage Account SAS.
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field.
type AccountSASPermissions struct {
Read, Write, Delete, DeletePreviousVersion, List, Add, Create, Update, Process, Tag, FilterByTags bool
}
// String produces the SAS permissions string for an Azure Storage account.
// Call this method to set AccountSASSignatureValues's Permissions field.
func (p AccountSASPermissions) String() string {
var buffer bytes.Buffer
if p.Read {
buffer.WriteRune('r')
}
if p.Write {
buffer.WriteRune('w')
}
if p.Delete {
buffer.WriteRune('d')
}
if p.DeletePreviousVersion {
buffer.WriteRune('x')
}
if p.List {
buffer.WriteRune('l')
}
if p.Add {
buffer.WriteRune('a')
}
if p.Create {
buffer.WriteRune('c')
}
if p.Update {
buffer.WriteRune('u')
}
if p.Process {
buffer.WriteRune('p')
}
if p.Tag {
buffer.WriteRune('t')
}
if p.FilterByTags {
buffer.WriteRune('f')
}
return buffer.String()
}
// Parse initializes the AccountSASPermissions's fields from a string.
func (p *AccountSASPermissions) Parse(s string) error {
*p = AccountSASPermissions{} // Clear out the flags
for _, r := range s {
switch r {
case 'r':
p.Read = true
case 'w':
p.Write = true
case 'd':
p.Delete = true
case 'l':
p.List = true
case 'a':
p.Add = true
case 'c':
p.Create = true
case 'u':
p.Update = true
case 'p':
p.Process = true
case 'x':
p.Process = true
case 't':
p.Tag = true
case 'f':
p.FilterByTags = true
default:
return fmt.Errorf("invalid permission character: '%v'", r)
}
}
return nil
}
// AccountSASServices type simplifies creating the services string for an Azure Storage Account SAS.
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Services field.
type AccountSASServices struct {
Blob, Queue, File bool
}
// String produces the SAS services string for an Azure Storage account.
// Call this method to set AccountSASSignatureValues's Services field.
func (s AccountSASServices) String() string {
var buffer bytes.Buffer
if s.Blob {
buffer.WriteRune('b')
}
if s.Queue {
buffer.WriteRune('q')
}
if s.File {
buffer.WriteRune('f')
}
return buffer.String()
}
// Parse initializes the AccountSASServices' fields from a string.
func (s *AccountSASServices) Parse(str string) error {
*s = AccountSASServices{} // Clear out the flags
for _, r := range str {
switch r {
case 'b':
s.Blob = true
case 'q':
s.Queue = true
case 'f':
s.File = true
default:
return fmt.Errorf("invalid service character: '%v'", r)
}
}
return nil
}
// AccountSASResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS.
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's ResourceTypes field.
type AccountSASResourceTypes struct {
Service, Container, Object bool
}
// String produces the SAS resource types string for an Azure Storage account.
// Call this method to set AccountSASSignatureValues's ResourceTypes field.
func (rt AccountSASResourceTypes) String() string {
var buffer bytes.Buffer
if rt.Service {
buffer.WriteRune('s')
}
if rt.Container {
buffer.WriteRune('c')
}
if rt.Object {
buffer.WriteRune('o')
}
return buffer.String()
}
// Parse initializes the AccountSASResourceType's fields from a string.
func (rt *AccountSASResourceTypes) Parse(s string) error {
*rt = AccountSASResourceTypes{} // Clear out the flags
for _, r := range s {
switch r {
case 's':
rt.Service = true
case 'c':
rt.Container = true
case 'o':
rt.Object = true
default:
return fmt.Errorf("invalid resource type: '%v'", r)
}
}
return nil
}

View file

@ -0,0 +1,427 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"errors"
"net"
"net/url"
"strings"
"time"
)
// SASProtocol indicates the http/https.
type SASProtocol string
const (
// SASProtocolHTTPS can be specified for a SAS protocol
SASProtocolHTTPS SASProtocol = "https"
// SASProtocolHTTPSandHTTP can be specified for a SAS protocol
//SASProtocolHTTPSandHTTP SASProtocol = "https,http"
)
// FormatTimesForSASSigning converts a time.Time to a snapshotTimeFormat string suitable for a
// SASField's StartTime or ExpiryTime fields. Returns "" if value.IsZero().
func FormatTimesForSASSigning(startTime, expiryTime, snapshotTime time.Time) (string, string, string) {
ss := ""
if !startTime.IsZero() {
ss = formatSASTimeWithDefaultFormat(&startTime)
}
se := ""
if !expiryTime.IsZero() {
se = formatSASTimeWithDefaultFormat(&expiryTime)
}
sh := ""
if !snapshotTime.IsZero() {
sh = snapshotTime.Format(SnapshotTimeFormat)
}
return ss, se, sh
}
// SASTimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time.
const SASTimeFormat = "2006-01-02T15:04:05Z" //"2017-07-27T00:00:00Z" // ISO 8601
var SASTimeFormats = []string{"2006-01-02T15:04:05.0000000Z", SASTimeFormat, "2006-01-02T15:04Z", "2006-01-02"} // ISO 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details.
// formatSASTimeWithDefaultFormat format time with ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ".
func formatSASTimeWithDefaultFormat(t *time.Time) string {
return formatSASTime(t, SASTimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used
}
// formatSASTime format time with given format, use ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ" by default.
func formatSASTime(t *time.Time, format string) string {
if format != "" {
return t.Format(format)
}
return t.Format(SASTimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used
}
// parseSASTimeString try to parse sas time string.
func parseSASTimeString(val string) (t time.Time, timeFormat string, err error) {
for _, sasTimeFormat := range SASTimeFormats {
t, err = time.Parse(sasTimeFormat, val)
if err == nil {
timeFormat = sasTimeFormat
break
}
}
if err != nil {
err = errors.New("fail to parse time with IOS 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details")
}
return
}
// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
// SASQueryParameters object represents the components that make up an Azure Storage SAS' query parameters.
// You parse a map of query parameters into its fields by calling NewSASQueryParameters(). You add the components
// to a query parameter map by calling AddToValues().
// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type.
// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues).
type SASQueryParameters struct {
// All members are immutable or values so copies of this struct are goroutine-safe.
version string `param:"sv"`
services string `param:"ss"`
resourceTypes string `param:"srt"`
protocol SASProtocol `param:"spr"`
startTime time.Time `param:"st"`
expiryTime time.Time `param:"se"`
snapshotTime time.Time `param:"snapshot"`
ipRange IPRange `param:"sip"`
identifier string `param:"si"`
resource string `param:"sr"`
permissions string `param:"sp"`
signature string `param:"sig"`
cacheControl string `param:"rscc"`
contentDisposition string `param:"rscd"`
contentEncoding string `param:"rsce"`
contentLanguage string `param:"rscl"`
contentType string `param:"rsct"`
signedOid string `param:"skoid"`
signedTid string `param:"sktid"`
signedStart time.Time `param:"skt"`
signedService string `param:"sks"`
signedExpiry time.Time `param:"ske"`
signedVersion string `param:"skv"`
signedDirectoryDepth string `param:"sdd"`
preauthorizedAgentObjectId string `param:"saoid"`
agentObjectId string `param:"suoid"`
correlationId string `param:"scid"`
// private member used for startTime and expiryTime formatting.
stTimeFormat string
seTimeFormat string
}
// PreauthorizedAgentObjectId returns preauthorizedAgentObjectId
func (p *SASQueryParameters) PreauthorizedAgentObjectId() string {
return p.preauthorizedAgentObjectId
}
// AgentObjectId returns agentObjectId
func (p *SASQueryParameters) AgentObjectId() string {
return p.agentObjectId
}
// SignedCorrelationId returns signedCorrelationId
func (p *SASQueryParameters) SignedCorrelationId() string {
return p.correlationId
}
// SignedTid returns aignedTid
func (p *SASQueryParameters) SignedTid() string {
return p.signedTid
}
// SignedStart returns signedStart
func (p *SASQueryParameters) SignedStart() time.Time {
return p.signedStart
}
// SignedExpiry returns signedExpiry
func (p *SASQueryParameters) SignedExpiry() time.Time {
return p.signedExpiry
}
// SignedService returns signedService
func (p *SASQueryParameters) SignedService() string {
return p.signedService
}
// SignedVersion returns signedVersion
func (p *SASQueryParameters) SignedVersion() string {
return p.signedVersion
}
// SnapshotTime returns snapshotTime
func (p *SASQueryParameters) SnapshotTime() time.Time {
return p.snapshotTime
}
// Version returns version
func (p *SASQueryParameters) Version() string {
return p.version
}
// Services returns services
func (p *SASQueryParameters) Services() string {
return p.services
}
// ResourceTypes returns resourceTypes
func (p *SASQueryParameters) ResourceTypes() string {
return p.resourceTypes
}
// Protocol returns protocol
func (p *SASQueryParameters) Protocol() SASProtocol {
return p.protocol
}
// StartTime returns startTime
func (p *SASQueryParameters) StartTime() time.Time {
return p.startTime
}
// ExpiryTime returns expiryTime
func (p *SASQueryParameters) ExpiryTime() time.Time {
return p.expiryTime
}
// IPRange returns ipRange
func (p *SASQueryParameters) IPRange() IPRange {
return p.ipRange
}
// Identifier returns identifier
func (p *SASQueryParameters) Identifier() string {
return p.identifier
}
// Resource returns resource
func (p *SASQueryParameters) Resource() string {
return p.resource
}
// Permissions returns permissions
func (p *SASQueryParameters) Permissions() string {
return p.permissions
}
// Signature returns signature
func (p *SASQueryParameters) Signature() string {
return p.signature
}
// CacheControl returns cacheControl
func (p *SASQueryParameters) CacheControl() string {
return p.cacheControl
}
// ContentDisposition returns contentDisposition
func (p *SASQueryParameters) ContentDisposition() string {
return p.contentDisposition
}
// ContentEncoding returns contentEncoding
func (p *SASQueryParameters) ContentEncoding() string {
return p.contentEncoding
}
// ContentLanguage returns contentLanguage
func (p *SASQueryParameters) ContentLanguage() string {
return p.contentLanguage
}
// ContentType returns sontentType
func (p *SASQueryParameters) ContentType() string {
return p.contentType
}
// SignedDirectoryDepth returns signedDirectoryDepth
func (p *SASQueryParameters) SignedDirectoryDepth() string {
return p.signedDirectoryDepth
}
// IPRange represents a SAS IP range's start IP and (optionally) end IP.
type IPRange struct {
Start net.IP // Not specified if length = 0
End net.IP // Not specified if length = 0
}
// String returns a string representation of an IPRange.
func (ipr *IPRange) String() string {
if len(ipr.Start) == 0 {
return ""
}
start := ipr.Start.String()
if len(ipr.End) == 0 {
return start
}
return start + "-" + ipr.End.String()
}
// NewSASQueryParameters creates and initializes a SASQueryParameters object based on the
// query parameter map's passed-in values. If deleteSASParametersFromValues is true,
// all SAS-related query parameters are removed from the passed-in map. If
// deleteSASParametersFromValues is false, the map passed-in map is unaltered.
func newSASQueryParameters(values url.Values, deleteSASParametersFromValues bool) SASQueryParameters {
p := SASQueryParameters{}
for k, v := range values {
val := v[0]
isSASKey := true
switch strings.ToLower(k) {
case "sv":
p.version = val
case "ss":
p.services = val
case "srt":
p.resourceTypes = val
case "spr":
p.protocol = SASProtocol(val)
case "snapshot":
p.snapshotTime, _ = time.Parse(SnapshotTimeFormat, val)
case "st":
p.startTime, p.stTimeFormat, _ = parseSASTimeString(val)
case "se":
p.expiryTime, p.seTimeFormat, _ = parseSASTimeString(val)
case "sip":
dashIndex := strings.Index(val, "-")
if dashIndex == -1 {
p.ipRange.Start = net.ParseIP(val)
} else {
p.ipRange.Start = net.ParseIP(val[:dashIndex])
p.ipRange.End = net.ParseIP(val[dashIndex+1:])
}
case "si":
p.identifier = val
case "sr":
p.resource = val
case "sp":
p.permissions = val
case "sig":
p.signature = val
case "rscc":
p.cacheControl = val
case "rscd":
p.contentDisposition = val
case "rsce":
p.contentEncoding = val
case "rscl":
p.contentLanguage = val
case "rsct":
p.contentType = val
case "skoid":
p.signedOid = val
case "sktid":
p.signedTid = val
case "skt":
p.signedStart, _ = time.Parse(SASTimeFormat, val)
case "ske":
p.signedExpiry, _ = time.Parse(SASTimeFormat, val)
case "sks":
p.signedService = val
case "skv":
p.signedVersion = val
case "sdd":
p.signedDirectoryDepth = val
case "saoid":
p.preauthorizedAgentObjectId = val
case "suoid":
p.agentObjectId = val
case "scid":
p.correlationId = val
default:
isSASKey = false // We didn't recognize the query parameter
}
if isSASKey && deleteSASParametersFromValues {
delete(values, k)
}
}
return p
}
// AddToValues adds the SAS components to the specified query parameters map.
func (p *SASQueryParameters) addToValues(v url.Values) url.Values {
if p.version != "" {
v.Add("sv", p.version)
}
if p.services != "" {
v.Add("ss", p.services)
}
if p.resourceTypes != "" {
v.Add("srt", p.resourceTypes)
}
if p.protocol != "" {
v.Add("spr", string(p.protocol))
}
if !p.startTime.IsZero() {
v.Add("st", formatSASTime(&(p.startTime), p.stTimeFormat))
}
if !p.expiryTime.IsZero() {
v.Add("se", formatSASTime(&(p.expiryTime), p.seTimeFormat))
}
if len(p.ipRange.Start) > 0 {
v.Add("sip", p.ipRange.String())
}
if p.identifier != "" {
v.Add("si", p.identifier)
}
if p.resource != "" {
v.Add("sr", p.resource)
}
if p.permissions != "" {
v.Add("sp", p.permissions)
}
if p.signedOid != "" {
v.Add("skoid", p.signedOid)
v.Add("sktid", p.signedTid)
v.Add("skt", p.signedStart.Format(SASTimeFormat))
v.Add("ske", p.signedExpiry.Format(SASTimeFormat))
v.Add("sks", p.signedService)
v.Add("skv", p.signedVersion)
}
if p.signature != "" {
v.Add("sig", p.signature)
}
if p.cacheControl != "" {
v.Add("rscc", p.cacheControl)
}
if p.contentDisposition != "" {
v.Add("rscd", p.contentDisposition)
}
if p.contentEncoding != "" {
v.Add("rsce", p.contentEncoding)
}
if p.contentLanguage != "" {
v.Add("rscl", p.contentLanguage)
}
if p.contentType != "" {
v.Add("rsct", p.contentType)
}
if p.signedDirectoryDepth != "" {
v.Add("sdd", p.signedDirectoryDepth)
}
if p.preauthorizedAgentObjectId != "" {
v.Add("saoid", p.preauthorizedAgentObjectId)
}
if p.agentObjectId != "" {
v.Add("suoid", p.agentObjectId)
}
if p.correlationId != "" {
v.Add("scid", p.correlationId)
}
return v
}
// Encode encodes the SAS query parameters into URL encoded form sorted by key.
func (p *SASQueryParameters) Encode() string {
v := url.Values{}
p.addToValues(v)
return v.Encode()
}

View file

@ -0,0 +1,365 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"bytes"
"fmt"
"strings"
"time"
)
// BlobSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas
type BlobSASSignatureValues struct {
Version string `param:"sv"` // If not specified, this defaults to SASVersion
Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants
StartTime time.Time `param:"st"` // Not specified if IsZero
ExpiryTime time.Time `param:"se"` // Not specified if IsZero
SnapshotTime time.Time
Permissions string `param:"sp"` // Create by initializing a ContainerSASPermissions or BlobSASPermissions and then call String()
IPRange IPRange `param:"sip"`
Identifier string `param:"si"`
ContainerName string
BlobName string // Use "" to create a Container SAS
Directory string // Not nil for a directory SAS (ie sr=d)
CacheControl string // rscc
ContentDisposition string // rscd
ContentEncoding string // rsce
ContentLanguage string // rscl
ContentType string // rsct
BlobVersion string // sr=bv
PreauthorizedAgentObjectId string
AgentObjectId string
CorrelationId string
}
func getDirectoryDepth(path string) string {
if path == "" {
return ""
}
return fmt.Sprint(strings.Count(path, "/") + 1)
}
// NewSASQueryParameters uses an account's StorageAccountCredential to sign this signature values to produce
// the proper SAS query parameters.
// See: StorageAccountCredential. Compatible with both UserDelegationCredential and SharedKeyCredential
func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) {
resource := "c"
if sharedKeyCredential == nil {
return SASQueryParameters{}, fmt.Errorf("cannot sign SAS query without Shared Key Credential")
}
if !v.SnapshotTime.IsZero() {
resource = "bs"
//Make sure the permission characters are in the correct order
perms := &BlobSASPermissions{}
if err := perms.Parse(v.Permissions); err != nil {
return SASQueryParameters{}, err
}
v.Permissions = perms.String()
} else if v.BlobVersion != "" {
resource = "bv"
//Make sure the permission characters are in the correct order
perms := &BlobSASPermissions{}
if err := perms.Parse(v.Permissions); err != nil {
return SASQueryParameters{}, err
}
v.Permissions = perms.String()
} else if v.Directory != "" {
resource = "d"
v.BlobName = ""
perms := &BlobSASPermissions{}
if err := perms.Parse(v.Permissions); err != nil {
return SASQueryParameters{}, err
}
v.Permissions = perms.String()
} else if v.BlobName == "" {
// Make sure the permission characters are in the correct order
perms := &ContainerSASPermissions{}
if err := perms.Parse(v.Permissions); err != nil {
return SASQueryParameters{}, err
}
v.Permissions = perms.String()
} else {
resource = "b"
// Make sure the permission characters are in the correct order
perms := &BlobSASPermissions{}
if err := perms.Parse(v.Permissions); err != nil {
return SASQueryParameters{}, err
}
v.Permissions = perms.String()
}
if v.Version == "" {
v.Version = SASVersion
}
startTime, expiryTime, snapshotTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime)
signedIdentifier := v.Identifier
//udk := sharedKeyCredential.getUDKParams()
//
//if udk != nil {
// udkStart, udkExpiry, _ := FormatTimesForSASSigning(udk.SignedStart, udk.SignedExpiry, time.Time{})
// //I don't like this answer to combining the functions
// //But because signedIdentifier and the user delegation key strings share a place, this is an _OK_ way to do it.
// signedIdentifier = strings.Join([]string{
// udk.SignedOid,
// udk.SignedTid,
// udkStart,
// udkExpiry,
// udk.SignedService,
// udk.SignedVersion,
// v.PreauthorizedAgentObjectId,
// v.AgentObjectId,
// v.CorrelationId,
// }, "\n")
//}
// String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
stringToSign := strings.Join([]string{
v.Permissions,
startTime,
expiryTime,
getCanonicalName(sharedKeyCredential.AccountName(), v.ContainerName, v.BlobName, v.Directory),
signedIdentifier,
v.IPRange.String(),
string(v.Protocol),
v.Version,
resource,
snapshotTime, // signed timestamp
v.CacheControl, // rscc
v.ContentDisposition, // rscd
v.ContentEncoding, // rsce
v.ContentLanguage, // rscl
v.ContentType}, // rsct
"\n")
signature := ""
signature, err := sharedKeyCredential.ComputeHMACSHA256(stringToSign)
if err != nil {
return SASQueryParameters{}, err
}
p := SASQueryParameters{
// Common SAS parameters
version: v.Version,
protocol: v.Protocol,
startTime: v.StartTime,
expiryTime: v.ExpiryTime,
permissions: v.Permissions,
ipRange: v.IPRange,
// Container/Blob-specific SAS parameters
resource: resource,
identifier: v.Identifier,
cacheControl: v.CacheControl,
contentDisposition: v.ContentDisposition,
contentEncoding: v.ContentEncoding,
contentLanguage: v.ContentLanguage,
contentType: v.ContentType,
snapshotTime: v.SnapshotTime,
signedDirectoryDepth: getDirectoryDepth(v.Directory),
preauthorizedAgentObjectId: v.PreauthorizedAgentObjectId,
agentObjectId: v.AgentObjectId,
correlationId: v.CorrelationId,
// Calculated SAS signature
signature: signature,
}
////User delegation SAS specific parameters
//if udk != nil {
// p.signedOid = udk.SignedOid
// p.signedTid = udk.SignedTid
// p.signedStart = udk.SignedStart
// p.signedExpiry = udk.SignedExpiry
// p.signedService = udk.SignedService
// p.signedVersion = udk.SignedVersion
//}
return p, nil
}
// getCanonicalName computes the canonical name for a container or blob resource for SAS signing.
func getCanonicalName(account string, containerName string, blobName string, directoryName string) string {
// Container: "/blob/account/containername"
// Blob: "/blob/account/containername/blobname"
elements := []string{"/blob/", account, "/", containerName}
if blobName != "" {
elements = append(elements, "/", strings.Replace(blobName, "\\", "/", -1))
} else if directoryName != "" {
elements = append(elements, "/", directoryName)
}
return strings.Join(elements, "")
}
// ContainerSASPermissions type simplifies creating the permissions string for an Azure Storage container SAS.
// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field.
// All permissions descriptions can be found here: https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas#permissions-for-a-directory-container-or-blob
type ContainerSASPermissions struct {
Read, Add, Create, Write, Delete, DeletePreviousVersion, List, Tag bool
Execute, ModifyOwnership, ModifyPermissions bool // Hierarchical Namespace only
}
// String produces the SAS permissions string for an Azure Storage container.
// Call this method to set BlobSASSignatureValues's Permissions field.
func (p ContainerSASPermissions) String() string {
var b bytes.Buffer
if p.Read {
b.WriteRune('r')
}
if p.Add {
b.WriteRune('a')
}
if p.Create {
b.WriteRune('c')
}
if p.Write {
b.WriteRune('w')
}
if p.Delete {
b.WriteRune('d')
}
if p.DeletePreviousVersion {
b.WriteRune('x')
}
if p.List {
b.WriteRune('l')
}
if p.Tag {
b.WriteRune('t')
}
if p.Execute {
b.WriteRune('e')
}
if p.ModifyOwnership {
b.WriteRune('o')
}
if p.ModifyPermissions {
b.WriteRune('p')
}
return b.String()
}
// Parse initializes the ContainerSASPermissions's fields from a string.
func (p *ContainerSASPermissions) Parse(s string) error {
*p = ContainerSASPermissions{} // Clear the flags
for _, r := range s {
switch r {
case 'r':
p.Read = true
case 'a':
p.Add = true
case 'c':
p.Create = true
case 'w':
p.Write = true
case 'd':
p.Delete = true
case 'x':
p.DeletePreviousVersion = true
case 'l':
p.List = true
case 't':
p.Tag = true
case 'e':
p.Execute = true
case 'o':
p.ModifyOwnership = true
case 'p':
p.ModifyPermissions = true
default:
return fmt.Errorf("invalid permission: '%v'", r)
}
}
return nil
}
// BlobSASPermissions type simplifies creating the permissions string for an Azure Storage blob SAS.
// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field.
type BlobSASPermissions struct {
Read, Add, Create, Write, Delete, DeletePreviousVersion, Tag, List, Move, Execute, Ownership, Permissions bool
}
// String produces the SAS permissions string for an Azure Storage blob.
// Call this method to set BlobSASSignatureValues's Permissions field.
func (p BlobSASPermissions) String() string {
var b bytes.Buffer
if p.Read {
b.WriteRune('r')
}
if p.Add {
b.WriteRune('a')
}
if p.Create {
b.WriteRune('c')
}
if p.Write {
b.WriteRune('w')
}
if p.Delete {
b.WriteRune('d')
}
if p.DeletePreviousVersion {
b.WriteRune('x')
}
if p.Tag {
b.WriteRune('t')
}
if p.List {
b.WriteRune('l')
}
if p.Move {
b.WriteRune('m')
}
if p.Execute {
b.WriteRune('e')
}
if p.Ownership {
b.WriteRune('o')
}
if p.Permissions {
b.WriteRune('p')
}
return b.String()
}
// Parse initializes the BlobSASPermissions's fields from a string.
func (p *BlobSASPermissions) Parse(s string) error {
*p = BlobSASPermissions{} // Clear the flags
for _, r := range s {
switch r {
case 'r':
p.Read = true
case 'a':
p.Add = true
case 'c':
p.Create = true
case 'w':
p.Write = true
case 'd':
p.Delete = true
case 'x':
p.DeletePreviousVersion = true
case 't':
p.Tag = true
case 'l':
p.List = true
case 'm':
p.Move = true
case 'e':
p.Execute = true
case 'o':
p.Ownership = true
case 'p':
p.Permissions = true
default:
return fmt.Errorf("invalid permission: '%v'", r)
}
}
return nil
}

View file

@ -0,0 +1,266 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"context"
"errors"
"net/url"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
)
//nolint
const (
// ContainerNameRoot is the special Azure Storage name used to identify a storage account's root container.
ContainerNameRoot = "$root"
// ContainerNameLogs is the special Azure Storage name used to identify a storage account's logs container.
ContainerNameLogs = "$logs"
)
// ServiceClient represents a URL to the Azure Blob Storage service allowing you to manipulate blob containers.
type ServiceClient struct {
client *serviceClient
sharedKey *SharedKeyCredential
}
// URL returns the URL endpoint used by the ServiceClient object.
func (s ServiceClient) URL() string {
return s.client.endpoint
}
// NewServiceClient creates a ServiceClient object using the specified URL, Azure AD credential, and options.
// Example of serviceURL: https://<your_storage_account>.blob.core.windows.net
func NewServiceClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*ServiceClient, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil)
conOptions := getConnectionOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
conn := newConnection(serviceURL, conOptions)
return &ServiceClient{
client: newServiceClient(conn.Endpoint(), conn.Pipeline()),
}, nil
}
// NewServiceClientWithNoCredential creates a ServiceClient object using the specified URL and options.
// Example of serviceURL: https://<your_storage_account>.blob.core.windows.net?<SAS token>
func NewServiceClientWithNoCredential(serviceURL string, options *ClientOptions) (*ServiceClient, error) {
conOptions := getConnectionOptions(options)
conn := newConnection(serviceURL, conOptions)
return &ServiceClient{
client: newServiceClient(conn.Endpoint(), conn.Pipeline()),
}, nil
}
// NewServiceClientWithSharedKey creates a ServiceClient object using the specified URL, shared key, and options.
// Example of serviceURL: https://<your_storage_account>.blob.core.windows.net
func NewServiceClientWithSharedKey(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*ServiceClient, error) {
authPolicy := newSharedKeyCredPolicy(cred)
conOptions := getConnectionOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
conn := newConnection(serviceURL, conOptions)
return &ServiceClient{
client: newServiceClient(conn.Endpoint(), conn.Pipeline()),
sharedKey: cred,
}, nil
}
// NewServiceClientFromConnectionString creates a service client from the given connection string.
//nolint
func NewServiceClientFromConnectionString(connectionString string, options *ClientOptions) (*ServiceClient, error) {
endpoint, credential, err := parseConnectionString(connectionString)
if err != nil {
return nil, err
}
return NewServiceClientWithSharedKey(endpoint, credential, options)
}
// NewContainerClient creates a new ContainerClient object by concatenating containerName to the end of
// ServiceClient's URL. The new ContainerClient uses the same request policy pipeline as the ServiceClient.
// To change the pipeline, create the ContainerClient and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewContainerClient instead of calling this object's
// NewContainerClient method.
func (s *ServiceClient) NewContainerClient(containerName string) (*ContainerClient, error) {
containerURL := appendToURLPath(s.client.endpoint, containerName)
return &ContainerClient{
client: newContainerClient(containerURL, s.client.pl),
sharedKey: s.sharedKey,
}, nil
}
// CreateContainer is a lifecycle method to creates a new container under the specified account.
// If the container with the same name already exists, a ResourceExistsError will be raised.
// This method returns a client with which to interact with the newly created container.
func (s *ServiceClient) CreateContainer(ctx context.Context, containerName string, options *ContainerCreateOptions) (ContainerCreateResponse, error) {
containerClient, err := s.NewContainerClient(containerName)
if err != nil {
return ContainerCreateResponse{}, err
}
containerCreateResp, err := containerClient.Create(ctx, options)
return containerCreateResp, err
}
// DeleteContainer is a lifecycle method that marks the specified container for deletion.
// The container and any blobs contained within it are later deleted during garbage collection.
// If the container is not found, a ResourceNotFoundError will be raised.
func (s *ServiceClient) DeleteContainer(ctx context.Context, containerName string, options *ContainerDeleteOptions) (ContainerDeleteResponse, error) {
containerClient, _ := s.NewContainerClient(containerName)
containerDeleteResp, err := containerClient.Delete(ctx, options)
return containerDeleteResp, err
}
// appendToURLPath appends a string to the end of a URL's path (prefixing the string with a '/' if required)
func appendToURLPath(u string, name string) string {
// e.g. "https://ms.com/a/b/?k1=v1&k2=v2#f"
// When you call url.Parse() this is what you'll get:
// Scheme: "https"
// Opaque: ""
// User: nil
// Host: "ms.com"
// Path: "/a/b/" This should start with a / and it might or might not have a trailing slash
// RawPath: ""
// ForceQuery: false
// RawQuery: "k1=v1&k2=v2"
// Fragment: "f"
uri, _ := url.Parse(u)
if len(uri.Path) == 0 || uri.Path[len(uri.Path)-1] != '/' {
uri.Path += "/" // Append "/" to end before appending name
}
uri.Path += name
return uri.String()
}
// GetAccountInfo provides account level information
func (s *ServiceClient) GetAccountInfo(ctx context.Context, o *ServiceGetAccountInfoOptions) (ServiceGetAccountInfoResponse, error) {
getAccountInfoOptions := o.format()
resp, err := s.client.GetAccountInfo(ctx, getAccountInfoOptions)
return toServiceGetAccountInfoResponse(resp), handleError(err)
}
// ListContainers operation returns a pager of the containers under the specified account.
// Use an empty Marker to start enumeration from the beginning. Container names are returned in lexicographic order.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-containers2.
func (s *ServiceClient) ListContainers(o *ListContainersOptions) *ServiceListContainersSegmentPager {
listOptions := o.format()
pager := s.client.ListContainersSegment(listOptions)
//TODO: .Err()?
//// override the generated advancer, which is incorrect
//if pager.Err() != nil {
// return pager
//}
pager.advancer = func(ctx context.Context, response serviceClientListContainersSegmentResponse) (*policy.Request, error) {
if response.ListContainersSegmentResponse.NextMarker == nil {
return nil, handleError(errors.New("unexpected missing NextMarker"))
}
req, err := s.client.listContainersSegmentCreateRequest(ctx, listOptions)
if err != nil {
return nil, handleError(err)
}
queryValues, _ := url.ParseQuery(req.Raw().URL.RawQuery)
queryValues.Set("marker", *response.ListContainersSegmentResponse.NextMarker)
req.Raw().URL.RawQuery = queryValues.Encode()
return req, nil
}
return toServiceListContainersSegmentPager(*pager)
}
// GetProperties - gets the properties of a storage account's Blob service, including properties for Storage Analytics
// and CORS (Cross-Origin Resource Sharing) rules.
func (s *ServiceClient) GetProperties(ctx context.Context, o *ServiceGetPropertiesOptions) (ServiceGetPropertiesResponse, error) {
getPropertiesOptions := o.format()
resp, err := s.client.GetProperties(ctx, getPropertiesOptions)
return toServiceGetPropertiesResponse(resp), handleError(err)
}
// SetProperties Sets the properties of a storage account's Blob service, including Azure Storage Analytics.
// If an element (e.g. analytics_logging) is left as None, the existing settings on the service for that functionality are preserved.
func (s *ServiceClient) SetProperties(ctx context.Context, o *ServiceSetPropertiesOptions) (ServiceSetPropertiesResponse, error) {
properties, setPropertiesOptions := o.format()
resp, err := s.client.SetProperties(ctx, properties, setPropertiesOptions)
return toServiceSetPropertiesResponse(resp), handleError(err)
}
// GetStatistics Retrieves statistics related to replication for the Blob service.
// It is only available when read-access geo-redundant replication is enabled for the storage account.
// With geo-redundant replication, Azure Storage maintains your data durable
// in two locations. In both locations, Azure Storage constantly maintains
// multiple healthy replicas of your data. The location where you read,
// create, update, or delete data is the primary storage account location.
// The primary location exists in the region you choose at the time you
// create an account via the Azure Management Azure classic portal, for
// example, North Central US. The location to which your data is replicated
// is the secondary location. The secondary location is automatically
// determined based on the location of the primary; it is in a second data
// center that resides in the same region as the primary location. Read-only
// access is available from the secondary location, if read-access geo-redundant
// replication is enabled for your storage account.
func (s *ServiceClient) GetStatistics(ctx context.Context, o *ServiceGetStatisticsOptions) (ServiceGetStatisticsResponse, error) {
getStatisticsOptions := o.format()
resp, err := s.client.GetStatistics(ctx, getStatisticsOptions)
return toServiceGetStatisticsResponse(resp), handleError(err)
}
// CanGetAccountSASToken checks if shared key in ServiceClient is nil
func (s *ServiceClient) CanGetAccountSASToken() bool {
return s.sharedKey != nil
}
// GetSASURL is a convenience method for generating a SAS token for the currently pointed at account.
// It can only be used if the credential supplied during creation was a SharedKeyCredential.
// This validity can be checked with CanGetAccountSASToken().
func (s *ServiceClient) GetSASURL(resources AccountSASResourceTypes, permissions AccountSASPermissions, start time.Time, expiry time.Time) (string, error) {
if s.sharedKey == nil {
return "", errors.New("SAS can only be signed with a SharedKeyCredential")
}
qps, err := AccountSASSignatureValues{
Version: SASVersion,
Protocol: SASProtocolHTTPS,
Permissions: permissions.String(),
Services: "b",
ResourceTypes: resources.String(),
StartTime: start.UTC(),
ExpiryTime: expiry.UTC(),
}.Sign(s.sharedKey)
if err != nil {
return "", err
}
endpoint := s.URL()
if !strings.HasSuffix(endpoint, "/") {
endpoint += "/"
}
endpoint += "?" + qps.Encode()
return endpoint, nil
}
// FindBlobsByTags operation finds all blobs in the storage account whose tags match a given search expression.
// Filter blobs searches across all containers within a storage account but can be scoped within the expression to a single container.
// https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags
// eg. "dog='germanshepherd' and penguin='emperorpenguin'"
// To specify a container, eg. "@container=containerName and Name = C"
func (s *ServiceClient) FindBlobsByTags(ctx context.Context, o *ServiceFilterBlobsOptions) (ServiceFilterBlobsResponse, error) {
// TODO: Use pager here? Missing support from zz_generated_pagers.go
serviceFilterBlobsOptions := o.pointer()
resp, err := s.client.FilterBlobs(ctx, serviceFilterBlobsOptions)
return toServiceFilterBlobsResponse(resp), err
}

View file

@ -0,0 +1,197 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"fmt"
"net/http"
"net/url"
"sort"
"strings"
"sync/atomic"
"time"
azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/internal/log"
)
// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the
// storage account's name and either its primary or secondary key.
func NewSharedKeyCredential(accountName string, accountKey string) (*SharedKeyCredential, error) {
c := SharedKeyCredential{accountName: accountName}
if err := c.SetAccountKey(accountKey); err != nil {
return nil, err
}
return &c, nil
}
// SharedKeyCredential contains an account's name and its primary or secondary key.
// It is immutable making it shareable and goroutine-safe.
type SharedKeyCredential struct {
// Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only
accountName string
accountKey atomic.Value // []byte
}
// AccountName returns the Storage account's name.
func (c *SharedKeyCredential) AccountName() string {
return c.accountName
}
// SetAccountKey replaces the existing account key with the specified account key.
func (c *SharedKeyCredential) SetAccountKey(accountKey string) error {
_bytes, err := base64.StdEncoding.DecodeString(accountKey)
if err != nil {
return fmt.Errorf("decode account key: %w", err)
}
c.accountKey.Store(_bytes)
return nil
}
// ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS.
func (c *SharedKeyCredential) ComputeHMACSHA256(message string) (string, error) {
h := hmac.New(sha256.New, c.accountKey.Load().([]byte))
_, err := h.Write([]byte(message))
return base64.StdEncoding.EncodeToString(h.Sum(nil)), err
}
func (c *SharedKeyCredential) buildStringToSign(req *http.Request) (string, error) {
// https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services
headers := req.Header
contentLength := headers.Get(headerContentLength)
if contentLength == "0" {
contentLength = ""
}
canonicalizedResource, err := c.buildCanonicalizedResource(req.URL)
if err != nil {
return "", err
}
stringToSign := strings.Join([]string{
req.Method,
headers.Get(headerContentEncoding),
headers.Get(headerContentLanguage),
contentLength,
headers.Get(headerContentMD5),
headers.Get(headerContentType),
"", // Empty date because x-ms-date is expected (as per web page above)
headers.Get(headerIfModifiedSince),
headers.Get(headerIfMatch),
headers.Get(headerIfNoneMatch),
headers.Get(headerIfUnmodifiedSince),
headers.Get(headerRange),
c.buildCanonicalizedHeader(headers),
canonicalizedResource,
}, "\n")
return stringToSign, nil
}
func (c *SharedKeyCredential) buildCanonicalizedHeader(headers http.Header) string {
cm := map[string][]string{}
for k, v := range headers {
headerName := strings.TrimSpace(strings.ToLower(k))
if strings.HasPrefix(headerName, "x-ms-") {
cm[headerName] = v // NOTE: the value must not have any whitespace around it.
}
}
if len(cm) == 0 {
return ""
}
keys := make([]string, 0, len(cm))
for key := range cm {
keys = append(keys, key)
}
sort.Strings(keys)
ch := bytes.NewBufferString("")
for i, key := range keys {
if i > 0 {
ch.WriteRune('\n')
}
ch.WriteString(key)
ch.WriteRune(':')
ch.WriteString(strings.Join(cm[key], ","))
}
return ch.String()
}
func (c *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, error) {
// https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services
cr := bytes.NewBufferString("/")
cr.WriteString(c.accountName)
if len(u.Path) > 0 {
// Any portion of the CanonicalizedResource string that is derived from
// the resource's URI should be encoded exactly as it is in the URI.
// -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
cr.WriteString(u.EscapedPath())
} else {
// a slash is required to indicate the root path
cr.WriteString("/")
}
// params is a map[string][]string; param name is key; params values is []string
params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values
if err != nil {
return "", fmt.Errorf("failed to parse query params: %w", err)
}
if len(params) > 0 { // There is at least 1 query parameter
var paramNames []string // We use this to sort the parameter key names
for paramName := range params {
paramNames = append(paramNames, paramName) // paramNames must be lowercase
}
sort.Strings(paramNames)
for _, paramName := range paramNames {
paramValues := params[paramName]
sort.Strings(paramValues)
// Join the sorted key values separated by ','
// Then prepend "keyName:"; then add this string to the buffer
cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ","))
}
}
return cr.String(), nil
}
type sharedKeyCredPolicy struct {
cred *SharedKeyCredential
}
func newSharedKeyCredPolicy(cred *SharedKeyCredential) *sharedKeyCredPolicy {
return &sharedKeyCredPolicy{cred: cred}
}
func (s *sharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) {
if d := req.Raw().Header.Get(headerXmsDate); d == "" {
req.Raw().Header.Set(headerXmsDate, time.Now().UTC().Format(http.TimeFormat))
}
stringToSign, err := s.cred.buildStringToSign(req.Raw())
if err != nil {
return nil, err
}
signature, err := s.cred.ComputeHMACSHA256(stringToSign)
if err != nil {
return nil, err
}
authHeader := strings.Join([]string{"SharedKey ", s.cred.AccountName(), ":", signature}, "")
req.Raw().Header.Set(headerAuthorization, authHeader)
response, err := req.Next()
if err != nil && response != nil && response.StatusCode == http.StatusForbidden {
// Service failed to authenticate request, log it
log.Write(azlog.EventResponse, "===== HTTP Forbidden status, String-to-NewSASQueryParameters:\n"+stringToSign+"\n===============================\n")
}
return response, err
}

View file

@ -0,0 +1,236 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"bytes"
"encoding/xml"
"errors"
"fmt"
"net/http"
"sort"
"strings"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
)
// InternalError is an internal error type that all errors get wrapped in.
type InternalError struct {
cause error
}
// Error checks if InternalError can be cast as StorageError
func (e *InternalError) Error() string {
if (errors.Is(e.cause, StorageError{})) {
return e.cause.Error()
}
return fmt.Sprintf("===== INTERNAL ERROR =====\n%s", e.cause.Error())
}
// Is casts err into InternalError
func (e *InternalError) Is(err error) bool {
_, ok := err.(*InternalError)
return ok
}
// As casts target interface into InternalError
func (e *InternalError) As(target interface{}) bool {
nt, ok := target.(**InternalError)
if ok {
*nt = e
return ok
}
//goland:noinspection GoErrorsAs
return errors.As(e.cause, target)
}
// StorageError is the internal struct that replaces the generated StorageError.
// TL;DR: This implements xml.Unmarshaler, and when the original StorageError is substituted, this unmarshaler kicks in.
// This handles the description and details. defunkifyStorageError handles the response, cause, and service code.
type StorageError struct {
response *http.Response
description string
ErrorCode StorageErrorCode
details map[string]string
}
func handleError(err error) error {
if err == nil {
return nil
}
var respErr *azcore.ResponseError
if errors.As(err, &respErr) {
return &InternalError{responseErrorToStorageError(respErr)}
}
if err != nil {
return &InternalError{err}
}
return nil
}
// converts an *azcore.ResponseError to a *StorageError, or if that fails, a *InternalError
func responseErrorToStorageError(responseError *azcore.ResponseError) error {
var storageError StorageError
body, err := runtime.Payload(responseError.RawResponse)
if err != nil {
goto Default
}
if len(body) > 0 {
if err := xml.Unmarshal(body, &storageError); err != nil {
goto Default
}
}
storageError.response = responseError.RawResponse
storageError.ErrorCode = StorageErrorCode(responseError.RawResponse.Header.Get("x-ms-error-code"))
if code, ok := storageError.details["Code"]; ok {
storageError.ErrorCode = StorageErrorCode(code)
delete(storageError.details, "Code")
}
return &storageError
Default:
return &InternalError{
cause: responseError,
}
}
// StatusCode returns service-error information. The caller may examine these values but should not modify any of them.
func (e *StorageError) StatusCode() int {
return e.response.StatusCode
}
// Error implements the error interface's Error method to return a string representation of the error.
func (e StorageError) Error() string {
b := &bytes.Buffer{}
if e.response != nil {
_, _ = fmt.Fprintf(b, "===== RESPONSE ERROR (ErrorCode=%s) =====\n", e.ErrorCode)
_, _ = fmt.Fprintf(b, "Description=%s, Details: ", e.description)
if len(e.details) == 0 {
b.WriteString("(none)\n")
} else {
b.WriteRune('\n')
keys := make([]string, 0, len(e.details))
// Alphabetize the details
for k := range e.details {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
_, _ = fmt.Fprintf(b, " %s: %+v\n", k, e.details[k])
}
}
// req := azcore.Request{Request: e.response.Request}.Copy() // Make a copy of the response's request
// TODO: Come Here Mohit Adele
//writeRequestWithResponse(b, &azcore.Request{Request: e.response.Request}, e.response)
}
return b.String()
///azcore.writeRequestWithResponse(b, prepareRequestForLogging(req), e.response, nil)
// return e.ErrorNode.Error(b.String())
}
// Is checks if err can be cast as StorageError
func (e StorageError) Is(err error) bool {
_, ok := err.(StorageError)
_, ok2 := err.(*StorageError)
return ok || ok2
}
// Response returns StorageError.response
func (e StorageError) Response() *http.Response {
return e.response
}
//nolint
func writeRequestWithResponse(b *bytes.Buffer, request *policy.Request, response *http.Response) {
// Write the request into the buffer.
_, _ = fmt.Fprint(b, " "+request.Raw().Method+" "+request.Raw().URL.String()+"\n")
writeHeader(b, request.Raw().Header)
if response != nil {
_, _ = fmt.Fprintln(b, " --------------------------------------------------------------------------------")
_, _ = fmt.Fprint(b, " RESPONSE Status: "+response.Status+"\n")
writeHeader(b, response.Header)
}
}
// formatHeaders appends an HTTP request's or response's header into a Buffer.
//nolint
func writeHeader(b *bytes.Buffer, header map[string][]string) {
if len(header) == 0 {
b.WriteString(" (no headers)\n")
return
}
keys := make([]string, 0, len(header))
// Alphabetize the headers
for k := range header {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
// Redact the value of any Authorization header to prevent security information from persisting in logs
value := interface{}("REDACTED")
if !strings.EqualFold(k, "Authorization") {
value = header[k]
}
_, _ = fmt.Fprintf(b, " %s: %+v\n", k, value)
}
}
// Temporary returns true if the error occurred due to a temporary condition (including an HTTP status of 500 or 503).
func (e *StorageError) Temporary() bool {
if e.response != nil {
if (e.response.StatusCode == http.StatusInternalServerError) || (e.response.StatusCode == http.StatusServiceUnavailable) || (e.response.StatusCode == http.StatusBadGateway) {
return true
}
}
return false
}
// UnmarshalXML performs custom unmarshalling of XML-formatted Azure storage request errors.
//nolint
func (e *StorageError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) {
tokName := ""
var t xml.Token
for t, err = d.Token(); err == nil; t, err = d.Token() {
switch tt := t.(type) {
case xml.StartElement:
tokName = tt.Name.Local
case xml.EndElement:
tokName = ""
case xml.CharData:
switch tokName {
case "":
continue
case "Message":
e.description = string(tt)
default:
if e.details == nil {
e.details = map[string]string{}
}
e.details[tokName] = string(tt)
}
}
}
return nil
}

View file

@ -0,0 +1,107 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"errors"
"fmt"
"io"
"strconv"
)
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Raw converts PageRange into primitive start, end integers of type int64
func (pr *PageRange) Raw() (start, end int64) {
if pr.Start != nil {
start = *pr.Start
}
if pr.End != nil {
end = *pr.End
}
return
}
// HttpRange defines a range of bytes within an HTTP resource, starting at offset and
// ending at offset+count. A zero-value HttpRange indicates the entire resource. An HttpRange
// which has an offset but na zero value count indicates from the offset to the resource's end.
type HttpRange struct {
Offset int64
Count int64
}
func NewHttpRange(offset, count int64) *HttpRange {
return &HttpRange{Offset: offset, Count: count}
}
func (r *HttpRange) format() *string {
if r == nil || (r.Offset == 0 && r.Count == 0) { // Do common case first for performance
return nil // No specified range
}
endOffset := "" // if count == CountToEnd (0)
if r.Count > 0 {
endOffset = strconv.FormatInt((r.Offset+r.Count)-1, 10)
}
dataRange := fmt.Sprintf("bytes=%v-%s", r.Offset, endOffset)
return &dataRange
}
func getSourceRange(offset, count *int64) *string {
if offset == nil && count == nil {
return nil
}
newOffset := int64(0)
newCount := int64(CountToEnd)
if offset != nil {
newOffset = *offset
}
if count != nil {
newCount = *count
}
return (&HttpRange{Offset: newOffset, Count: newCount}).format()
}
func validateSeekableStreamAt0AndGetCount(body io.ReadSeeker) (int64, error) {
if body == nil { // nil body's are "logically" seekable to 0 and are 0 bytes long
return 0, nil
}
err := validateSeekableStreamAt0(body)
if err != nil {
return 0, err
}
count, err := body.Seek(0, io.SeekEnd)
if err != nil {
return 0, errors.New("body stream must be seekable")
}
_, err = body.Seek(0, io.SeekStart)
if err != nil {
return 0, err
}
return count, nil
}
// return an error if body is not a valid seekable stream at 0
func validateSeekableStreamAt0(body io.ReadSeeker) error {
if body == nil { // nil body's are "logically" seekable to 0
return nil
}
if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil {
// Help detect programmer error
if err != nil {
return errors.New("body stream must be seekable")
}
return errors.New("body stream must be set to position 0")
}
return nil
}

View file

@ -0,0 +1,43 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
const (
// ETagNone represents an empty entity tag.
ETagNone = ""
// ETagAny matches any entity tag.
ETagAny = "*"
)
// ContainerAccessConditions identifies container-specific access conditions which you optionally set.
type ContainerAccessConditions struct {
ModifiedAccessConditions *ModifiedAccessConditions
LeaseAccessConditions *LeaseAccessConditions
}
func (ac *ContainerAccessConditions) format() (*ModifiedAccessConditions, *LeaseAccessConditions) {
if ac == nil {
return nil, nil
}
return ac.ModifiedAccessConditions, ac.LeaseAccessConditions
}
// BlobAccessConditions identifies blob-specific access conditions which you optionally set.
type BlobAccessConditions struct {
LeaseAccessConditions *LeaseAccessConditions
ModifiedAccessConditions *ModifiedAccessConditions
}
func (ac *BlobAccessConditions) format() (*LeaseAccessConditions, *ModifiedAccessConditions) {
if ac == nil {
return nil, nil
}
return ac.LeaseAccessConditions, ac.ModifiedAccessConditions
}

View file

@ -0,0 +1,184 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import "time"
// ---------------------------------------------------------------------------------------------------------------------
// AppendBlobCreateOptions provides set of configurations for Create Append Blob operation
type AppendBlobCreateOptions struct {
// Specifies the date time when the blobs immutability policy is set to expire.
ImmutabilityPolicyExpiry *time.Time
// Specifies the immutability policy mode to set on the blob.
ImmutabilityPolicyMode *BlobImmutabilityPolicyMode
// Specified if a legal hold should be set on the blob.
LegalHold *bool
BlobAccessConditions *BlobAccessConditions
HTTPHeaders *BlobHTTPHeaders
CpkInfo *CpkInfo
CpkScopeInfo *CpkScopeInfo
// Optional. Used to set blob tags in various blob operations.
TagsMap map[string]string
// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
// operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs
// are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source
// blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers.
// See Naming and Referencing Containers, Blobs, and Metadata for more information.
Metadata map[string]string
}
func (o *AppendBlobCreateOptions) format() (*appendBlobClientCreateOptions, *BlobHTTPHeaders, *LeaseAccessConditions,
*CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil, nil, nil, nil
}
options := appendBlobClientCreateOptions{
BlobTagsString: serializeBlobTagsToStrPtr(o.TagsMap),
Metadata: o.Metadata,
ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry,
ImmutabilityPolicyMode: o.ImmutabilityPolicyMode,
LegalHold: o.LegalHold,
}
leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format()
return &options, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions
}
// AppendBlobCreateResponse contains the response from method AppendBlobClient.Create.
type AppendBlobCreateResponse struct {
appendBlobClientCreateResponse
}
func toAppendBlobCreateResponse(resp appendBlobClientCreateResponse) AppendBlobCreateResponse {
return AppendBlobCreateResponse{resp}
}
// ---------------------------------------------------------------------------------------------------------------------
// AppendBlobAppendBlockOptions provides set of configurations for AppendBlock operation
type AppendBlobAppendBlockOptions struct {
// Specify the transactional crc64 for the body, to be validated by the service.
TransactionalContentCRC64 []byte
// Specify the transactional md5 for the body, to be validated by the service.
TransactionalContentMD5 []byte
AppendPositionAccessConditions *AppendPositionAccessConditions
CpkInfo *CpkInfo
CpkScopeInfo *CpkScopeInfo
BlobAccessConditions *BlobAccessConditions
}
func (o *AppendBlobAppendBlockOptions) format() (*appendBlobClientAppendBlockOptions, *AppendPositionAccessConditions, *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions, *LeaseAccessConditions) {
if o == nil {
return nil, nil, nil, nil, nil, nil
}
options := &appendBlobClientAppendBlockOptions{
TransactionalContentCRC64: o.TransactionalContentCRC64,
TransactionalContentMD5: o.TransactionalContentMD5,
}
leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format()
return options, o.AppendPositionAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions, leaseAccessConditions
}
// AppendBlobAppendBlockResponse contains the response from method AppendBlobClient.AppendBlock.
type AppendBlobAppendBlockResponse struct {
appendBlobClientAppendBlockResponse
}
func toAppendBlobAppendBlockResponse(resp appendBlobClientAppendBlockResponse) AppendBlobAppendBlockResponse {
return AppendBlobAppendBlockResponse{resp}
}
// ---------------------------------------------------------------------------------------------------------------------
// AppendBlobAppendBlockFromURLOptions provides set of configurations for AppendBlockFromURL operation
type AppendBlobAppendBlockFromURLOptions struct {
// Specify the md5 calculated for the range of bytes that must be read from the copy source.
SourceContentMD5 []byte
// Specify the crc64 calculated for the range of bytes that must be read from the copy source.
SourceContentCRC64 []byte
// Specify the transactional md5 for the body, to be validated by the service.
TransactionalContentMD5 []byte
AppendPositionAccessConditions *AppendPositionAccessConditions
CpkInfo *CpkInfo
CpkScopeInfo *CpkScopeInfo
SourceModifiedAccessConditions *SourceModifiedAccessConditions
BlobAccessConditions *BlobAccessConditions
// Optional, you can specify whether a particular range of the blob is read
Offset *int64
Count *int64
}
func (o *AppendBlobAppendBlockFromURLOptions) format() (*appendBlobClientAppendBlockFromURLOptions, *CpkInfo, *CpkScopeInfo, *LeaseAccessConditions, *AppendPositionAccessConditions, *ModifiedAccessConditions, *SourceModifiedAccessConditions) {
if o == nil {
return nil, nil, nil, nil, nil, nil, nil
}
options := &appendBlobClientAppendBlockFromURLOptions{
SourceRange: getSourceRange(o.Offset, o.Count),
SourceContentMD5: o.SourceContentMD5,
SourceContentcrc64: o.SourceContentCRC64,
TransactionalContentMD5: o.TransactionalContentMD5,
}
leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format()
return options, o.CpkInfo, o.CpkScopeInfo, leaseAccessConditions, o.AppendPositionAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions
}
// AppendBlobAppendBlockFromURLResponse contains the response from method AppendBlobClient.AppendBlockFromURL.
type AppendBlobAppendBlockFromURLResponse struct {
appendBlobClientAppendBlockFromURLResponse
}
func toAppendBlobAppendBlockFromURLResponse(resp appendBlobClientAppendBlockFromURLResponse) AppendBlobAppendBlockFromURLResponse {
return AppendBlobAppendBlockFromURLResponse{resp}
}
// ---------------------------------------------------------------------------------------------------------------------
// AppendBlobSealOptions provides set of configurations for SealAppendBlob operation
type AppendBlobSealOptions struct {
BlobAccessConditions *BlobAccessConditions
AppendPositionAccessConditions *AppendPositionAccessConditions
}
func (o *AppendBlobSealOptions) format() (leaseAccessConditions *LeaseAccessConditions,
modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) {
if o == nil {
return nil, nil, nil
}
return
}
// AppendBlobSealResponse contains the response from method AppendBlobClient.Seal.
type AppendBlobSealResponse struct {
appendBlobClientSealResponse
}
func toAppendBlobSealResponse(resp appendBlobClientSealResponse) AppendBlobSealResponse {
return AppendBlobSealResponse{resp}
}
// ---------------------------------------------------------------------------------------------------------------------

View file

@ -0,0 +1,478 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"context"
"io"
"net/http"
"time"
)
// ---------------------------------------------------------------------------------------------------------------------
// BlobDownloadOptions provides set of configurations for Download blob operation
type BlobDownloadOptions struct {
// When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the
// range is less than or equal to 4 MB in size.
RangeGetContentMD5 *bool
// Optional, you can specify whether a particular range of the blob is read
Offset *int64
Count *int64
BlobAccessConditions *BlobAccessConditions
CpkInfo *CpkInfo
CpkScopeInfo *CpkScopeInfo
}
func (o *BlobDownloadOptions) format() (*blobClientDownloadOptions, *LeaseAccessConditions, *CpkInfo, *ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil, nil
}
offset := int64(0)
count := int64(CountToEnd)
if o.Offset != nil {
offset = *o.Offset
}
if o.Count != nil {
count = *o.Count
}
basics := blobClientDownloadOptions{
RangeGetContentMD5: o.RangeGetContentMD5,
Range: (&HttpRange{Offset: offset, Count: count}).format(),
}
leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format()
return &basics, leaseAccessConditions, o.CpkInfo, modifiedAccessConditions
}
// BlobDownloadResponse wraps AutoRest generated BlobDownloadResponse and helps to provide info for retry.
type BlobDownloadResponse struct {
blobClientDownloadResponse
ctx context.Context
b *BlobClient
getInfo HTTPGetterInfo
ObjectReplicationRules []ObjectReplicationPolicy
}
// Body constructs new RetryReader stream for reading data. If a connection fails
// while reading, it will make additional requests to reestablish a connection and
// continue reading. Specifying a RetryReaderOption's with MaxRetryRequests set to 0
// (the default), returns the original response body and no retries will be performed.
// Pass in nil for options to accept the default options.
func (r *BlobDownloadResponse) Body(options *RetryReaderOptions) io.ReadCloser {
if options == nil {
options = &RetryReaderOptions{}
}
if options.MaxRetryRequests == 0 { // No additional retries
return r.RawResponse.Body
}
return NewRetryReader(r.ctx, r.RawResponse, r.getInfo, *options,
func(ctx context.Context, getInfo HTTPGetterInfo) (*http.Response, error) {
accessConditions := &BlobAccessConditions{
ModifiedAccessConditions: &ModifiedAccessConditions{IfMatch: &getInfo.ETag},
}
options := BlobDownloadOptions{
Offset: &getInfo.Offset,
Count: &getInfo.Count,
BlobAccessConditions: accessConditions,
CpkInfo: options.CpkInfo,
//CpkScopeInfo: o.CpkScopeInfo,
}
resp, err := r.b.Download(ctx, &options)
if err != nil {
return nil, err
}
return resp.RawResponse, err
},
)
}
// ---------------------------------------------------------------------------------------------------------------------
// BlobDeleteOptions provides set of configurations for Delete blob operation
type BlobDeleteOptions struct {
// Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob
// and all of its snapshots. only: Delete only the blob's snapshots and not the blob itself
DeleteSnapshots *DeleteSnapshotsOptionType
BlobAccessConditions *BlobAccessConditions
}
func (o *BlobDeleteOptions) format() (*blobClientDeleteOptions, *LeaseAccessConditions, *ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil
}
basics := blobClientDeleteOptions{
DeleteSnapshots: o.DeleteSnapshots,
}
if o.BlobAccessConditions == nil {
return &basics, nil, nil
}
return &basics, o.BlobAccessConditions.LeaseAccessConditions, o.BlobAccessConditions.ModifiedAccessConditions
}
// BlobDeleteResponse contains the response from method BlobClient.Delete.
type BlobDeleteResponse struct {
blobClientDeleteResponse
}
func toBlobDeleteResponse(resp blobClientDeleteResponse) BlobDeleteResponse {
return BlobDeleteResponse{resp}
}
// ---------------------------------------------------------------------------------------------------------------------
// BlobUndeleteOptions provides set of configurations for Blob Undelete operation
type BlobUndeleteOptions struct {
}
func (o *BlobUndeleteOptions) format() *blobClientUndeleteOptions {
return nil
}
// BlobUndeleteResponse contains the response from method BlobClient.Undelete.
type BlobUndeleteResponse struct {
blobClientUndeleteResponse
}
func toBlobUndeleteResponse(resp blobClientUndeleteResponse) BlobUndeleteResponse {
return BlobUndeleteResponse{resp}
}
// ---------------------------------------------------------------------------------------------------------------------
// BlobSetTierOptions provides set of configurations for SetTier on blob operation
type BlobSetTierOptions struct {
// Optional: Indicates the priority with which to rehydrate an archived blob.
RehydratePriority *RehydratePriority
LeaseAccessConditions *LeaseAccessConditions
ModifiedAccessConditions *ModifiedAccessConditions
}
func (o *BlobSetTierOptions) format() (*blobClientSetTierOptions, *LeaseAccessConditions, *ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil
}
basics := blobClientSetTierOptions{RehydratePriority: o.RehydratePriority}
return &basics, o.LeaseAccessConditions, o.ModifiedAccessConditions
}
// BlobSetTierResponse contains the response from method BlobClient.SetTier.
type BlobSetTierResponse struct {
blobClientSetTierResponse
}
func toBlobSetTierResponse(resp blobClientSetTierResponse) BlobSetTierResponse {
return BlobSetTierResponse{resp}
}
// ---------------------------------------------------------------------------------------------------------------------
// BlobGetPropertiesOptions provides set of configurations for GetProperties blob operation
type BlobGetPropertiesOptions struct {
BlobAccessConditions *BlobAccessConditions
CpkInfo *CpkInfo
}
func (o *BlobGetPropertiesOptions) format() (blobClientGetPropertiesOptions *blobClientGetPropertiesOptions,
leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil, nil
}
leaseAccessConditions, modifiedAccessConditions = o.BlobAccessConditions.format()
return nil, leaseAccessConditions, o.CpkInfo, modifiedAccessConditions
}
// ObjectReplicationRules struct
type ObjectReplicationRules struct {
RuleId string
Status string
}
// ObjectReplicationPolicy are deserialized attributes
type ObjectReplicationPolicy struct {
PolicyId *string
Rules *[]ObjectReplicationRules
}
// BlobGetPropertiesResponse reformat the GetPropertiesResponse object for easy consumption
type BlobGetPropertiesResponse struct {
blobClientGetPropertiesResponse
// deserialized attributes
ObjectReplicationRules []ObjectReplicationPolicy
}
func toGetBlobPropertiesResponse(resp blobClientGetPropertiesResponse) BlobGetPropertiesResponse {
getResp := BlobGetPropertiesResponse{
blobClientGetPropertiesResponse: resp,
ObjectReplicationRules: deserializeORSPolicies(resp.ObjectReplicationRules),
}
return getResp
}
// ---------------------------------------------------------------------------------------------------------------------
// BlobSetHTTPHeadersOptions provides set of configurations for SetHTTPHeaders on blob operation
type BlobSetHTTPHeadersOptions struct {
LeaseAccessConditions *LeaseAccessConditions
ModifiedAccessConditions *ModifiedAccessConditions
}
func (o *BlobSetHTTPHeadersOptions) format() (*blobClientSetHTTPHeadersOptions, *LeaseAccessConditions, *ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil
}
return nil, o.LeaseAccessConditions, o.ModifiedAccessConditions
}
// BlobSetHTTPHeadersResponse contains the response from method BlobClient.SetHTTPHeaders.
type BlobSetHTTPHeadersResponse struct {
blobClientSetHTTPHeadersResponse
}
func toBlobSetHTTPHeadersResponse(resp blobClientSetHTTPHeadersResponse) BlobSetHTTPHeadersResponse {
return BlobSetHTTPHeadersResponse{resp}
}
// ---------------------------------------------------------------------------------------------------------------------
// BlobSetMetadataOptions provides set of configurations for Set Metadata on blob operation
type BlobSetMetadataOptions struct {
LeaseAccessConditions *LeaseAccessConditions
CpkInfo *CpkInfo
CpkScopeInfo *CpkScopeInfo
ModifiedAccessConditions *ModifiedAccessConditions
}
func (o *BlobSetMetadataOptions) format() (leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo,
cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil, nil
}
return o.LeaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, o.ModifiedAccessConditions
}
// BlobSetMetadataResponse contains the response from method BlobClient.SetMetadata.
type BlobSetMetadataResponse struct {
blobClientSetMetadataResponse
}
func toBlobSetMetadataResponse(resp blobClientSetMetadataResponse) BlobSetMetadataResponse {
return BlobSetMetadataResponse{resp}
}
// ---------------------------------------------------------------------------------------------------------------------
// BlobCreateSnapshotOptions provides set of configurations for CreateSnapshot of blob operation
type BlobCreateSnapshotOptions struct {
Metadata map[string]string
LeaseAccessConditions *LeaseAccessConditions
CpkInfo *CpkInfo
CpkScopeInfo *CpkScopeInfo
ModifiedAccessConditions *ModifiedAccessConditions
}
func (o *BlobCreateSnapshotOptions) format() (blobSetMetadataOptions *blobClientCreateSnapshotOptions, cpkInfo *CpkInfo,
cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) {
if o == nil {
return nil, nil, nil, nil, nil
}
basics := blobClientCreateSnapshotOptions{
Metadata: o.Metadata,
}
return &basics, o.CpkInfo, o.CpkScopeInfo, o.ModifiedAccessConditions, o.LeaseAccessConditions
}
// BlobCreateSnapshotResponse contains the response from method BlobClient.CreateSnapshot
type BlobCreateSnapshotResponse struct {
blobClientCreateSnapshotResponse
}
func toBlobCreateSnapshotResponse(resp blobClientCreateSnapshotResponse) BlobCreateSnapshotResponse {
return BlobCreateSnapshotResponse{resp}
}
// ---------------------------------------------------------------------------------------------------------------------
// BlobStartCopyOptions provides set of configurations for StartCopyFromURL blob operation
type BlobStartCopyOptions struct {
// Specifies the date time when the blobs immutability policy is set to expire.
ImmutabilityPolicyExpiry *time.Time
// Specifies the immutability policy mode to set on the blob.
ImmutabilityPolicyMode *BlobImmutabilityPolicyMode
// Specified if a legal hold should be set on the blob.
LegalHold *bool
// Optional. Used to set blob tags in various blob operations.
TagsMap map[string]string
// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
// operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs
// are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source
// blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers.
// See Naming and Referencing Containers, Blobs, and Metadata for more information.
Metadata map[string]string
// Optional: Indicates the priority with which to rehydrate an archived blob.
RehydratePriority *RehydratePriority
// Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer.
SealBlob *bool
// Optional. Indicates the tier to be set on the blob.
Tier *AccessTier
SourceModifiedAccessConditions *SourceModifiedAccessConditions
ModifiedAccessConditions *ModifiedAccessConditions
LeaseAccessConditions *LeaseAccessConditions
}
func (o *BlobStartCopyOptions) format() (blobStartCopyFromUrlOptions *blobClientStartCopyFromURLOptions,
sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) {
if o == nil {
return nil, nil, nil, nil
}
basics := blobClientStartCopyFromURLOptions{
BlobTagsString: serializeBlobTagsToStrPtr(o.TagsMap),
Metadata: o.Metadata,
RehydratePriority: o.RehydratePriority,
SealBlob: o.SealBlob,
Tier: o.Tier,
ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry,
ImmutabilityPolicyMode: o.ImmutabilityPolicyMode,
LegalHold: o.LegalHold,
}
return &basics, o.SourceModifiedAccessConditions, o.ModifiedAccessConditions, o.LeaseAccessConditions
}
// BlobStartCopyFromURLResponse contains the response from method BlobClient.StartCopyFromURL.
type BlobStartCopyFromURLResponse struct {
blobClientStartCopyFromURLResponse
}
func toBlobStartCopyFromURLResponse(resp blobClientStartCopyFromURLResponse) BlobStartCopyFromURLResponse {
return BlobStartCopyFromURLResponse{resp}
}
// ---------------------------------------------------------------------------------------------------------------------
// BlobAbortCopyOptions provides set of configurations for AbortCopyFromURL operation
type BlobAbortCopyOptions struct {
LeaseAccessConditions *LeaseAccessConditions
}
func (o *BlobAbortCopyOptions) format() (blobAbortCopyFromUrlOptions *blobClientAbortCopyFromURLOptions,
leaseAccessConditions *LeaseAccessConditions) {
if o == nil {
return nil, nil
}
return nil, o.LeaseAccessConditions
}
// BlobAbortCopyFromURLResponse contains the response from method BlobClient.AbortCopyFromURL
type BlobAbortCopyFromURLResponse struct {
blobClientAbortCopyFromURLResponse
}
func toBlobAbortCopyFromURLResponse(resp blobClientAbortCopyFromURLResponse) BlobAbortCopyFromURLResponse {
return BlobAbortCopyFromURLResponse{resp}
}
// ---------------------------------------------------------------------------------------------------------------------
// BlobSetTagsOptions provides set of configurations for SetTags operation
type BlobSetTagsOptions struct {
// The version id parameter is an opaque DateTime value that, when present,
// specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer.
VersionID *string
// Optional header, Specifies the transactional crc64 for the body, to be validated by the service.
TransactionalContentCRC64 []byte
// Optional header, Specifies the transactional md5 for the body, to be validated by the service.
TransactionalContentMD5 []byte
TagsMap map[string]string
ModifiedAccessConditions *ModifiedAccessConditions
LeaseAccessConditions *LeaseAccessConditions
}
func (o *BlobSetTagsOptions) format() (*blobClientSetTagsOptions, *ModifiedAccessConditions, *LeaseAccessConditions) {
if o == nil {
return nil, nil, nil
}
options := &blobClientSetTagsOptions{
Tags: serializeBlobTags(o.TagsMap),
TransactionalContentMD5: o.TransactionalContentMD5,
TransactionalContentCRC64: o.TransactionalContentCRC64,
VersionID: o.VersionID,
}
return options, o.ModifiedAccessConditions, o.LeaseAccessConditions
}
// BlobSetTagsResponse contains the response from method BlobClient.SetTags
type BlobSetTagsResponse struct {
blobClientSetTagsResponse
}
func toBlobSetTagsResponse(resp blobClientSetTagsResponse) BlobSetTagsResponse {
return BlobSetTagsResponse{resp}
}
// ---------------------------------------------------------------------------------------------------------------------
// BlobGetTagsOptions provides set of configurations for GetTags operation
type BlobGetTagsOptions struct {
// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve.
Snapshot *string
// The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on.
// It's for service version 2019-10-10 and newer.
VersionID *string
BlobAccessConditions *BlobAccessConditions
}
func (o *BlobGetTagsOptions) format() (*blobClientGetTagsOptions, *ModifiedAccessConditions, *LeaseAccessConditions) {
if o == nil {
return nil, nil, nil
}
options := &blobClientGetTagsOptions{
Snapshot: o.Snapshot,
VersionID: o.VersionID,
}
leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format()
return options, modifiedAccessConditions, leaseAccessConditions
}
// BlobGetTagsResponse contains the response from method BlobClient.GetTags
type BlobGetTagsResponse struct {
blobClientGetTagsResponse
}
func toBlobGetTagsResponse(resp blobClientGetTagsResponse) BlobGetTagsResponse {
return BlobGetTagsResponse{resp}
}

View file

@ -0,0 +1,160 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
)
// ---------------------------------------------------------------------------------------------------------------------
// BlobAcquireLeaseOptions provides set of configurations for AcquireLeaseBlob operation
type BlobAcquireLeaseOptions struct {
// Specifies the Duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease
// can be between 15 and 60 seconds. A lease Duration cannot be changed using renew or change.
Duration *int32
ModifiedAccessConditions *ModifiedAccessConditions
}
func (o *BlobAcquireLeaseOptions) format() (blobClientAcquireLeaseOptions, *ModifiedAccessConditions) {
if o == nil {
return blobClientAcquireLeaseOptions{}, nil
}
return blobClientAcquireLeaseOptions{
Duration: o.Duration,
}, o.ModifiedAccessConditions
}
// BlobAcquireLeaseResponse contains the response from method BlobLeaseClient.AcquireLease.
type BlobAcquireLeaseResponse struct {
blobClientAcquireLeaseResponse
}
func toBlobAcquireLeaseResponse(resp blobClientAcquireLeaseResponse) BlobAcquireLeaseResponse {
return BlobAcquireLeaseResponse{resp}
}
// ---------------------------------------------------------------------------------------------------------------------
// BlobBreakLeaseOptions provides set of configurations for BreakLeaseBlob operation
type BlobBreakLeaseOptions struct {
// For a break operation, proposed Duration the lease should continue before it is broken, in seconds, between 0 and 60. This
// break period is only used if it is shorter than the time remaining on the lease. If longer, the time remaining on the lease
// is used. A new lease will not be available before the break period has expired, but the lease may be held for longer than
// the break period. If this header does not appear with a break operation, a fixed-Duration lease breaks after the remaining
// lease period elapses, and an infinite lease breaks immediately.
BreakPeriod *int32
ModifiedAccessConditions *ModifiedAccessConditions
}
func (o *BlobBreakLeaseOptions) format() (*blobClientBreakLeaseOptions, *ModifiedAccessConditions) {
if o == nil {
return nil, nil
}
if o.BreakPeriod != nil {
period := leasePeriodPointer(*o.BreakPeriod)
return &blobClientBreakLeaseOptions{
BreakPeriod: period,
}, o.ModifiedAccessConditions
}
return nil, o.ModifiedAccessConditions
}
// BlobBreakLeaseResponse contains the response from method BlobLeaseClient.BreakLease.
type BlobBreakLeaseResponse struct {
blobClientBreakLeaseResponse
}
func toBlobBreakLeaseResponse(resp blobClientBreakLeaseResponse) BlobBreakLeaseResponse {
return BlobBreakLeaseResponse{resp}
}
// ---------------------------------------------------------------------------------------------------------------------
// BlobChangeLeaseOptions provides set of configurations for ChangeLeaseBlob operation
type BlobChangeLeaseOptions struct {
ProposedLeaseID *string
ModifiedAccessConditions *ModifiedAccessConditions
}
func (o *BlobChangeLeaseOptions) format() (*string, *blobClientChangeLeaseOptions, *ModifiedAccessConditions, error) {
generatedUuid, err := uuid.New()
if err != nil {
return nil, nil, nil, err
}
leaseID := to.Ptr(generatedUuid.String())
if o == nil {
return leaseID, nil, nil, nil
}
if o.ProposedLeaseID == nil {
o.ProposedLeaseID = leaseID
}
return o.ProposedLeaseID, nil, o.ModifiedAccessConditions, nil
}
// BlobChangeLeaseResponse contains the response from method BlobLeaseClient.ChangeLease
type BlobChangeLeaseResponse struct {
blobClientChangeLeaseResponse
}
func toBlobChangeLeaseResponse(resp blobClientChangeLeaseResponse) BlobChangeLeaseResponse {
return BlobChangeLeaseResponse{resp}
}
// ---------------------------------------------------------------------------------------------------------------------
// BlobRenewLeaseOptions provides set of configurations for RenewLeaseBlob operation
type BlobRenewLeaseOptions struct {
ModifiedAccessConditions *ModifiedAccessConditions
}
func (o *BlobRenewLeaseOptions) format() (*blobClientRenewLeaseOptions, *ModifiedAccessConditions) {
if o == nil {
return nil, nil
}
return nil, o.ModifiedAccessConditions
}
// BlobRenewLeaseResponse contains the response from method BlobClient.RenewLease.
type BlobRenewLeaseResponse struct {
blobClientRenewLeaseResponse
}
func toBlobRenewLeaseResponse(resp blobClientRenewLeaseResponse) BlobRenewLeaseResponse {
return BlobRenewLeaseResponse{resp}
}
// ---------------------------------------------------------------------------------------------------------------------
// ReleaseLeaseBlobOptions provides set of configurations for ReleaseLeaseBlob operation
type ReleaseLeaseBlobOptions struct {
ModifiedAccessConditions *ModifiedAccessConditions
}
func (o *ReleaseLeaseBlobOptions) format() (*blobClientReleaseLeaseOptions, *ModifiedAccessConditions) {
if o == nil {
return nil, nil
}
return nil, o.ModifiedAccessConditions
}
// BlobReleaseLeaseResponse contains the response from method BlobClient.ReleaseLease.
type BlobReleaseLeaseResponse struct {
blobClientReleaseLeaseResponse
}
func toBlobReleaseLeaseResponse(resp blobClientReleaseLeaseResponse) BlobReleaseLeaseResponse {
return BlobReleaseLeaseResponse{resp}
}

Some files were not shown because too many files have changed in this diff Show more