Add option to provide custom endpoint for S3, add option to specify S3 config profile (#243)

* Add option to provide custom endpoint for S3 for use with s3-compatible storages, add option to specify S3 config profile

* make fmt
This commit is contained in:
glebsam 2019-11-29 18:59:56 +03:00 committed by Aliaksandr Valialkin
parent 4810f1dde6
commit 4a192cb832
4 changed files with 35 additions and 9 deletions

View file

@ -129,6 +129,10 @@ Run `vmbackup -help` in order to see all the available options:
-credsFilePath string -credsFilePath string
Path to file with GCS or S3 credentials. Credentials are loaded from default locations if not set. Path to file with GCS or S3 credentials. Credentials are loaded from default locations if not set.
See https://cloud.google.com/iam/docs/creating-managing-service-account-keys and https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html See https://cloud.google.com/iam/docs/creating-managing-service-account-keys and https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html
-configProfile string
Profile name for S3 configs. (default "default")
-customS3Endpoint string
Custom S3 endpoint for use with S3-compatible storages (e.g. MinIO)
-dst string -dst string
Where to put the backup on the remote storage. Example: gcs://bucket/path/to/backup/dir, s3://bucket/path/to/backup/dir or fs:///path/to/local/backup/dir Where to put the backup on the remote storage. Example: gcs://bucket/path/to/backup/dir, s3://bucket/path/to/backup/dir or fs:///path/to/local/backup/dir
-dst can point to the previous backup. In this case incremental backup is performed, i.e. only changed data is uploaded -dst can point to the previous backup. In this case incremental backup is performed, i.e. only changed data is uploaded

View file

@ -43,6 +43,10 @@ Run `vmrestore -help` in order to see all the available options:
-credsFilePath string -credsFilePath string
Path to file with GCS or S3 credentials. Credentials are loaded from default locations if not set. Path to file with GCS or S3 credentials. Credentials are loaded from default locations if not set.
See https://cloud.google.com/iam/docs/creating-managing-service-account-keys and https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html See https://cloud.google.com/iam/docs/creating-managing-service-account-keys and https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html
-configProfile string
Profile name for S3 configs. (default "default")
-customS3Endpoint string
Custom S3 endpoint for use with S3-compatible storages (e.g. MinIO)
-loggerLevel string -loggerLevel string
Minimum level of errors to log. Possible values: INFO, ERROR, FATAL, PANIC (default "INFO") Minimum level of errors to log. Possible values: INFO, ERROR, FATAL, PANIC (default "INFO")
-maxBytesPerSecond int -maxBytesPerSecond int

View file

@ -18,6 +18,8 @@ var (
"See https://cloud.google.com/iam/docs/creating-managing-service-account-keys and https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html") "See https://cloud.google.com/iam/docs/creating-managing-service-account-keys and https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html")
configFilePath = flag.String("configFilePath", "", "Path to file with S3 configs. Configs are loaded from default location if not set.\n"+ configFilePath = flag.String("configFilePath", "", "Path to file with S3 configs. Configs are loaded from default location if not set.\n"+
"See https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html") "See https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html")
configProfile = flag.String("configProfile", "default", "Profile name for S3 configs. ")
customS3Endpoint = flag.String("customS3Endpoint", "", "Custom S3 endpoint for use with S3-compatible storages (e.g. MinIO)")
) )
func runParallel(concurrency int, parts []common.Part, f func(p common.Part) error, progress func(elapsed time.Duration)) error { func runParallel(concurrency int, parts []common.Part, f func(p common.Part) error, progress func(elapsed time.Duration)) error {
@ -218,6 +220,8 @@ func NewRemoteFS(path string) (common.RemoteFS, error) {
fs := &s3remote.FS{ fs := &s3remote.FS{
CredsFilePath: *credsFilePath, CredsFilePath: *credsFilePath,
ConfigFilePath: *configFilePath, ConfigFilePath: *configFilePath,
CustomEndpoint: *customS3Endpoint,
ProfileName: *configProfile,
Bucket: bucket, Bucket: bucket,
Dir: dir, Dir: dir,
} }

View file

@ -30,8 +30,10 @@ type FS struct {
// Directory in the bucket to write to. // Directory in the bucket to write to.
Dir string Dir string
s3 *s3.S3 s3 *s3.S3
uploader *s3manager.Uploader uploader *s3manager.Uploader
CustomEndpoint string
ProfileName string
} }
// Init initializes fs. // Init initializes fs.
@ -47,6 +49,7 @@ func (fs *FS) Init() error {
} }
opts := session.Options{ opts := session.Options{
SharedConfigState: session.SharedConfigEnable, SharedConfigState: session.SharedConfigEnable,
Profile: fs.ProfileName,
} }
if len(fs.CredsFilePath) > 0 { if len(fs.CredsFilePath) > 0 {
opts.SharedConfigFiles = []string{ opts.SharedConfigFiles = []string{
@ -59,14 +62,25 @@ func (fs *FS) Init() error {
return fmt.Errorf("cannot create S3 session: %s", err) return fmt.Errorf("cannot create S3 session: %s", err)
} }
// Determine bucket region. if len(fs.CustomEndpoint) > 0 {
ctx := context.Background()
region, err := s3manager.GetBucketRegion(ctx, sess, fs.Bucket, "") // Use provided custom endpoint for S3
if err != nil { logger.Infof("Using provided custom S3 endpoint: %q", fs.CustomEndpoint)
return fmt.Errorf("cannot determine region for bucket %q: %s", fs.Bucket, err) sess.Config.WithEndpoint(fs.CustomEndpoint)
// Disable prefixing endpoint with bucket name
sess.Config.WithS3ForcePathStyle(true)
} else {
// Determine bucket region.
ctx := context.Background()
region, err := s3manager.GetBucketRegion(ctx, sess, fs.Bucket, "")
if err != nil {
return fmt.Errorf("cannot determine region for bucket %q: %s", fs.Bucket, err)
}
sess.Config.WithRegion(region)
logger.Infof("bucket %q is stored at region %q; switching to this region", fs.Bucket, region)
} }
sess.Config.WithRegion(region)
logger.Infof("bucket %q is stored at region %q; switching to this region", fs.Bucket, region)
fs.s3 = s3.New(sess) fs.s3 = s3.New(sess)
fs.uploader = s3manager.NewUploader(sess, func(u *s3manager.Uploader) { fs.uploader = s3manager.NewUploader(sess, func(u *s3manager.Uploader) {