2019-11-07 19:05:39 +00:00
|
|
|
// Copyright 2016 Google LLC
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
/*
|
|
|
|
Package storage provides an easy way to work with Google Cloud Storage.
|
|
|
|
Google Cloud Storage stores data in named objects, which are grouped into buckets.
|
|
|
|
|
|
|
|
More information about Google Cloud Storage is available at
|
|
|
|
https://cloud.google.com/storage/docs.
|
|
|
|
|
2022-01-27 11:16:16 +00:00
|
|
|
See https://pkg.go.dev/cloud.google.com/go for authentication, timeouts,
|
2019-11-07 19:05:39 +00:00
|
|
|
connection pooling and similar aspects of this package.
|
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
# Creating a Client
|
2019-11-07 19:05:39 +00:00
|
|
|
|
2022-09-26 12:44:55 +00:00
|
|
|
To start working with this package, create a [Client]:
|
2019-11-07 19:05:39 +00:00
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
ctx := context.Background()
|
|
|
|
client, err := storage.NewClient(ctx)
|
|
|
|
if err != nil {
|
|
|
|
// TODO: Handle error.
|
|
|
|
}
|
2019-11-07 19:05:39 +00:00
|
|
|
|
2020-09-01 14:41:27 +00:00
|
|
|
The client will use your default application credentials. Clients should be
|
2022-09-26 12:44:55 +00:00
|
|
|
reused instead of created as needed. The methods of [Client] are safe for
|
2020-09-01 14:41:27 +00:00
|
|
|
concurrent use by multiple goroutines.
|
2019-11-07 19:05:39 +00:00
|
|
|
|
2023-03-25 05:28:55 +00:00
|
|
|
You may configure the client by passing in options from the [google.golang.org/api/option]
|
|
|
|
package. You may also use options defined in this package, such as [WithJSONReads].
|
|
|
|
|
2019-11-07 19:05:39 +00:00
|
|
|
If you only wish to access public data, you can create
|
|
|
|
an unauthenticated client with
|
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
|
2019-11-07 19:05:39 +00:00
|
|
|
|
2021-09-01 09:51:42 +00:00
|
|
|
To use an emulator with this library, you can set the STORAGE_EMULATOR_HOST
|
|
|
|
environment variable to the address at which your emulator is running. This will
|
|
|
|
send requests to that address instead of to Cloud Storage. You can then create
|
|
|
|
and use a client as usual:
|
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
// Set STORAGE_EMULATOR_HOST environment variable.
|
|
|
|
err := os.Setenv("STORAGE_EMULATOR_HOST", "localhost:9000")
|
|
|
|
if err != nil {
|
|
|
|
// TODO: Handle error.
|
|
|
|
}
|
2021-09-01 09:51:42 +00:00
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
// Create client as usual.
|
|
|
|
client, err := storage.NewClient(ctx)
|
|
|
|
if err != nil {
|
|
|
|
// TODO: Handle error.
|
|
|
|
}
|
2021-09-01 09:51:42 +00:00
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
// This request is now directed to http://localhost:9000/storage/v1/b
|
|
|
|
// instead of https://storage.googleapis.com/storage/v1/b
|
|
|
|
if err := client.Bucket("my-bucket").Create(ctx, projectID, nil); err != nil {
|
|
|
|
// TODO: Handle error.
|
|
|
|
}
|
2021-09-01 09:51:42 +00:00
|
|
|
|
|
|
|
Please note that there is no official emulator for Cloud Storage.
|
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
# Buckets
|
2019-11-07 19:05:39 +00:00
|
|
|
|
|
|
|
A Google Cloud Storage bucket is a collection of objects. To work with a
|
|
|
|
bucket, make a bucket handle:
|
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
bkt := client.Bucket(bucketName)
|
2019-11-07 19:05:39 +00:00
|
|
|
|
|
|
|
A handle is a reference to a bucket. You can have a handle even if the
|
|
|
|
bucket doesn't exist yet. To create a bucket in Google Cloud Storage,
|
2022-09-26 12:44:55 +00:00
|
|
|
call [BucketHandle.Create]:
|
2019-11-07 19:05:39 +00:00
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
if err := bkt.Create(ctx, projectID, nil); err != nil {
|
|
|
|
// TODO: Handle error.
|
|
|
|
}
|
2019-11-07 19:05:39 +00:00
|
|
|
|
|
|
|
Note that although buckets are associated with projects, bucket names are
|
|
|
|
global across all projects.
|
|
|
|
|
|
|
|
Each bucket has associated metadata, represented in this package by
|
2022-09-26 12:44:55 +00:00
|
|
|
[BucketAttrs]. The third argument to [BucketHandle.Create] allows you to set
|
|
|
|
the initial [BucketAttrs] of a bucket. To retrieve a bucket's attributes, use
|
|
|
|
[BucketHandle.Attrs]:
|
2019-11-07 19:05:39 +00:00
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
attrs, err := bkt.Attrs(ctx)
|
|
|
|
if err != nil {
|
|
|
|
// TODO: Handle error.
|
|
|
|
}
|
|
|
|
fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n",
|
|
|
|
attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass)
|
2019-11-07 19:05:39 +00:00
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
# Objects
|
2019-11-07 19:05:39 +00:00
|
|
|
|
|
|
|
An object holds arbitrary data as a sequence of bytes, like a file. You
|
|
|
|
refer to objects using a handle, just as with buckets, but unlike buckets
|
|
|
|
you don't explicitly create an object. Instead, the first time you write
|
2022-09-26 12:44:55 +00:00
|
|
|
to an object it will be created. You can use the standard Go [io.Reader]
|
|
|
|
and [io.Writer] interfaces to read and write object data:
|
2019-11-07 19:05:39 +00:00
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
obj := bkt.Object("data")
|
|
|
|
// Write something to obj.
|
|
|
|
// w implements io.Writer.
|
|
|
|
w := obj.NewWriter(ctx)
|
|
|
|
// Write some text to obj. This will either create the object or overwrite whatever is there already.
|
|
|
|
if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil {
|
|
|
|
// TODO: Handle error.
|
|
|
|
}
|
|
|
|
// Close, just like writing a file.
|
|
|
|
if err := w.Close(); err != nil {
|
|
|
|
// TODO: Handle error.
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read it back.
|
|
|
|
r, err := obj.NewReader(ctx)
|
|
|
|
if err != nil {
|
|
|
|
// TODO: Handle error.
|
|
|
|
}
|
|
|
|
defer r.Close()
|
|
|
|
if _, err := io.Copy(os.Stdout, r); err != nil {
|
|
|
|
// TODO: Handle error.
|
|
|
|
}
|
|
|
|
// Prints "This object contains text."
|
2019-11-07 19:05:39 +00:00
|
|
|
|
2022-09-26 12:44:55 +00:00
|
|
|
Objects also have attributes, which you can fetch with [ObjectHandle.Attrs]:
|
2019-11-07 19:05:39 +00:00
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
objAttrs, err := obj.Attrs(ctx)
|
|
|
|
if err != nil {
|
|
|
|
// TODO: Handle error.
|
|
|
|
}
|
|
|
|
fmt.Printf("object %s has size %d and can be read using %s\n",
|
|
|
|
objAttrs.Name, objAttrs.Size, objAttrs.MediaLink)
|
2019-11-07 19:05:39 +00:00
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
# Listing objects
|
2019-11-19 19:05:37 +00:00
|
|
|
|
2022-09-26 12:44:55 +00:00
|
|
|
Listing objects in a bucket is done with the [BucketHandle.Objects] method:
|
2019-11-19 19:05:37 +00:00
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
query := &storage.Query{Prefix: ""}
|
|
|
|
|
|
|
|
var names []string
|
|
|
|
it := bkt.Objects(ctx, query)
|
|
|
|
for {
|
|
|
|
attrs, err := it.Next()
|
|
|
|
if err == iterator.Done {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
log.Fatal(err)
|
|
|
|
}
|
|
|
|
names = append(names, attrs.Name)
|
|
|
|
}
|
2019-11-19 19:05:37 +00:00
|
|
|
|
2020-09-23 11:23:39 +00:00
|
|
|
Objects are listed lexicographically by name. To filter objects
|
2022-09-26 12:44:55 +00:00
|
|
|
lexicographically, [Query.StartOffset] and/or [Query.EndOffset] can be used:
|
2020-09-23 11:23:39 +00:00
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
query := &storage.Query{
|
|
|
|
Prefix: "",
|
|
|
|
StartOffset: "bar/", // Only list objects lexicographically >= "bar/"
|
|
|
|
EndOffset: "foo/", // Only list objects lexicographically < "foo/"
|
|
|
|
}
|
2020-09-23 11:23:39 +00:00
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
// ... as before
|
2020-09-23 11:23:39 +00:00
|
|
|
|
2019-11-19 19:05:37 +00:00
|
|
|
If only a subset of object attributes is needed when listing, specifying this
|
2022-09-26 12:44:55 +00:00
|
|
|
subset using [Query.SetAttrSelection] may speed up the listing process:
|
2019-11-19 19:05:37 +00:00
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
query := &storage.Query{Prefix: ""}
|
|
|
|
query.SetAttrSelection([]string{"Name"})
|
2019-11-19 19:05:37 +00:00
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
// ... as before
|
2019-11-19 19:05:37 +00:00
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
# ACLs
|
2019-11-07 19:05:39 +00:00
|
|
|
|
|
|
|
Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of
|
|
|
|
ACLRules, each of which specifies the role of a user, group or project. ACLs
|
|
|
|
are suitable for fine-grained control, but you may prefer using IAM to control
|
2022-09-26 12:44:55 +00:00
|
|
|
access at the project level (see [Cloud Storage IAM docs].
|
2019-11-07 19:05:39 +00:00
|
|
|
|
2022-09-26 12:44:55 +00:00
|
|
|
To list the ACLs of a bucket or object, obtain an [ACLHandle] and call [ACLHandle.List]:
|
2019-11-07 19:05:39 +00:00
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
acls, err := obj.ACL().List(ctx)
|
|
|
|
if err != nil {
|
|
|
|
// TODO: Handle error.
|
|
|
|
}
|
|
|
|
for _, rule := range acls {
|
|
|
|
fmt.Printf("%s has role %s\n", rule.Entity, rule.Role)
|
|
|
|
}
|
2019-11-07 19:05:39 +00:00
|
|
|
|
|
|
|
You can also set and delete ACLs.
|
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
# Conditions
|
2019-11-07 19:05:39 +00:00
|
|
|
|
|
|
|
Every object has a generation and a metageneration. The generation changes
|
|
|
|
whenever the content changes, and the metageneration changes whenever the
|
2022-09-26 12:44:55 +00:00
|
|
|
metadata changes. [Conditions] let you check these values before an operation;
|
2019-11-07 19:05:39 +00:00
|
|
|
the operation only executes if the conditions match. You can use conditions to
|
|
|
|
prevent race conditions in read-modify-write operations.
|
|
|
|
|
|
|
|
For example, say you've read an object's metadata into objAttrs. Now
|
|
|
|
you want to write to that object, but only if its contents haven't changed
|
|
|
|
since you read it. Here is how to express that:
|
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx)
|
|
|
|
// Proceed with writing as above.
|
2019-11-07 19:05:39 +00:00
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
# Signed URLs
|
2019-11-07 19:05:39 +00:00
|
|
|
|
|
|
|
You can obtain a URL that lets anyone read or write an object for a limited time.
|
2022-04-12 09:51:54 +00:00
|
|
|
Signing a URL requires credentials authorized to sign a URL. To use the same
|
2022-09-26 12:44:55 +00:00
|
|
|
authentication that was used when instantiating the Storage client, use
|
|
|
|
[BucketHandle.SignedURL].
|
2022-04-12 09:51:54 +00:00
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
url, err := client.Bucket(bucketName).SignedURL(objectName, opts)
|
|
|
|
if err != nil {
|
|
|
|
// TODO: Handle error.
|
|
|
|
}
|
|
|
|
fmt.Println(url)
|
2022-04-12 09:51:54 +00:00
|
|
|
|
2022-09-26 12:44:55 +00:00
|
|
|
You can also sign a URL without creating a client. See the documentation of
|
|
|
|
[SignedURL] for details.
|
2019-11-07 19:05:39 +00:00
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
url, err := storage.SignedURL(bucketName, "shared-object", opts)
|
|
|
|
if err != nil {
|
|
|
|
// TODO: Handle error.
|
|
|
|
}
|
|
|
|
fmt.Println(url)
|
2019-11-07 19:05:39 +00:00
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
# Post Policy V4 Signed Request
|
2020-05-15 12:02:03 +00:00
|
|
|
|
|
|
|
A type of signed request that allows uploads through HTML forms directly to Cloud Storage with
|
|
|
|
temporary permission. Conditions can be applied to restrict how the HTML form is used and exercised
|
|
|
|
by a user.
|
|
|
|
|
2022-09-26 12:44:55 +00:00
|
|
|
For more information, please see the [XML POST Object docs] as well
|
|
|
|
as the documentation of [BucketHandle.GenerateSignedPostPolicyV4].
|
2020-05-15 12:02:03 +00:00
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
pv4, err := client.Bucket(bucketName).GenerateSignedPostPolicyV4(objectName, opts)
|
|
|
|
if err != nil {
|
|
|
|
// TODO: Handle error.
|
|
|
|
}
|
|
|
|
fmt.Printf("URL: %s\nFields; %v\n", pv4.URL, pv4.Fields)
|
2020-05-15 12:02:03 +00:00
|
|
|
|
2022-09-26 12:44:55 +00:00
|
|
|
# Credential requirements for signing
|
|
|
|
|
|
|
|
If the GoogleAccessID and PrivateKey option fields are not provided, they will
|
|
|
|
be automatically detected by [BucketHandle.SignedURL] and
|
|
|
|
[BucketHandle.GenerateSignedPostPolicyV4] if any of the following are true:
|
|
|
|
- you are authenticated to the Storage Client with a service account's
|
|
|
|
downloaded private key, either directly in code or by setting the
|
|
|
|
GOOGLE_APPLICATION_CREDENTIALS environment variable (see [Other Environments]),
|
|
|
|
- your application is running on Google Compute Engine (GCE), or
|
|
|
|
- you are logged into [gcloud using application default credentials]
|
|
|
|
with [impersonation enabled].
|
|
|
|
|
|
|
|
Detecting GoogleAccessID may not be possible if you are authenticated using a
|
|
|
|
token source or using [option.WithHTTPClient]. In this case, you can provide a
|
|
|
|
service account email for GoogleAccessID and the client will attempt to sign
|
|
|
|
the URL or Post Policy using that service account.
|
|
|
|
|
|
|
|
To generate the signature, you must have:
|
|
|
|
- iam.serviceAccounts.signBlob permissions on the GoogleAccessID service
|
|
|
|
account, and
|
|
|
|
- the [IAM Service Account Credentials API] enabled (unless authenticating
|
|
|
|
with a downloaded private key).
|
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
# Errors
|
2019-11-07 19:05:39 +00:00
|
|
|
|
2022-09-26 12:44:55 +00:00
|
|
|
Errors returned by this client are often of the type [googleapi.Error].
|
|
|
|
These errors can be introspected for more information by using [errors.As]
|
|
|
|
with the richer [googleapi.Error] type. For example:
|
2019-11-07 19:05:39 +00:00
|
|
|
|
2021-10-11 18:51:32 +00:00
|
|
|
var e *googleapi.Error
|
|
|
|
if ok := errors.As(err, &e); ok {
|
2019-11-07 19:05:39 +00:00
|
|
|
if e.Code == 409 { ... }
|
|
|
|
}
|
2022-01-27 11:16:16 +00:00
|
|
|
|
2022-08-14 21:53:41 +00:00
|
|
|
# Retrying failed requests
|
2022-01-27 11:16:16 +00:00
|
|
|
|
|
|
|
Methods in this package may retry calls that fail with transient errors.
|
|
|
|
Retrying continues indefinitely unless the controlling context is canceled, the
|
|
|
|
client is closed, or a non-transient error is received. To stop retries from
|
|
|
|
continuing, use context timeouts or cancellation.
|
|
|
|
|
|
|
|
The retry strategy in this library follows best practices for Cloud Storage. By
|
|
|
|
default, operations are retried only if they are idempotent, and exponential
|
|
|
|
backoff with jitter is employed. In addition, errors are only retried if they
|
2022-09-26 12:44:55 +00:00
|
|
|
are defined as transient by the service. See the [Cloud Storage retry docs]
|
|
|
|
for more information.
|
2022-01-27 11:16:16 +00:00
|
|
|
|
|
|
|
Users can configure non-default retry behavior for a single library call (using
|
2022-09-26 12:44:55 +00:00
|
|
|
[BucketHandle.Retryer] and [ObjectHandle.Retryer]) or for all calls made by a
|
|
|
|
client (using [Client.SetRetry]). For example:
|
2022-01-27 11:16:16 +00:00
|
|
|
|
|
|
|
o := client.Bucket(bucket).Object(object).Retryer(
|
|
|
|
// Use WithBackoff to change the timing of the exponential backoff.
|
|
|
|
storage.WithBackoff(gax.Backoff{
|
|
|
|
Initial: 2 * time.Second,
|
|
|
|
}),
|
|
|
|
// Use WithPolicy to configure the idempotency policy. RetryAlways will
|
|
|
|
// retry the operation even if it is non-idempotent.
|
|
|
|
storage.WithPolicy(storage.RetryAlways),
|
|
|
|
)
|
|
|
|
|
|
|
|
// Use a context timeout to set an overall deadline on the call, including all
|
|
|
|
// potential retries.
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
// Delete an object using the specified strategy and timeout.
|
|
|
|
if err := o.Delete(ctx); err != nil {
|
|
|
|
// Handle err.
|
|
|
|
}
|
2022-09-26 12:44:55 +00:00
|
|
|
|
2023-09-01 09:26:09 +00:00
|
|
|
# Sending Custom Headers
|
|
|
|
|
|
|
|
You can add custom headers to any API call made by this package by using
|
|
|
|
[callctx.SetHeaders] on the context which is passed to the method. For example,
|
|
|
|
to add a [custom audit logging] header:
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
ctx = callctx.SetHeaders(ctx, "x-goog-custom-audit-<key>", "<value>")
|
|
|
|
// Use client as usual with the context and the additional headers will be sent.
|
|
|
|
client.Bucket("my-bucket").Attrs(ctx)
|
|
|
|
|
2023-11-02 20:18:17 +00:00
|
|
|
# Experimental gRPC API
|
|
|
|
|
|
|
|
This package includes support for the Cloud Storage gRPC API, which is currently
|
|
|
|
in preview. This implementation uses gRPC rather than the current JSON & XML
|
|
|
|
APIs to make requests to Cloud Storage. If you would like to try the API,
|
|
|
|
please contact your GCP account rep for more information. The gRPC API is not
|
|
|
|
yet generally available, so it may be subject to breaking changes.
|
|
|
|
|
|
|
|
To create a client which will use gRPC, use the alternate constructor:
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
client, err := storage.NewGRPCClient(ctx)
|
|
|
|
if err != nil {
|
|
|
|
// TODO: Handle error.
|
|
|
|
}
|
|
|
|
// Use client as usual.
|
|
|
|
|
|
|
|
If the application is running within GCP, users may get better performance by
|
|
|
|
enabling DirectPath (enabling requests to skip some proxy steps). To enable,
|
|
|
|
set the environment variable `GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS=true` and add
|
|
|
|
the following side-effect imports to your application:
|
|
|
|
|
|
|
|
import (
|
|
|
|
_ "google.golang.org/grpc/balancer/rls"
|
|
|
|
_ "google.golang.org/grpc/xds/googledirectpath"
|
|
|
|
)
|
|
|
|
|
2022-09-26 12:44:55 +00:00
|
|
|
[Cloud Storage IAM docs]: https://cloud.google.com/storage/docs/access-control/iam
|
|
|
|
[XML POST Object docs]: https://cloud.google.com/storage/docs/xml-api/post-object
|
|
|
|
[Cloud Storage retry docs]: https://cloud.google.com/storage/docs/retry-strategy
|
|
|
|
[Other Environments]: https://cloud.google.com/storage/docs/authentication#libauth
|
|
|
|
[gcloud using application default credentials]: https://cloud.google.com/sdk/gcloud/reference/auth/application-default/login
|
|
|
|
[impersonation enabled]: https://cloud.google.com/sdk/gcloud/reference#--impersonate-service-account
|
|
|
|
[IAM Service Account Credentials API]: https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview
|
2023-09-01 09:26:09 +00:00
|
|
|
[custom audit logging]: https://cloud.google.com/storage/docs/audit-logging#add-custom-metadata
|
2019-11-07 19:05:39 +00:00
|
|
|
*/
|
|
|
|
package storage // import "cloud.google.com/go/storage"
|