mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
Add option to provide custom endpoint for S3, add option to specify S3 config profile (#243)
* Add option to provide custom endpoint for S3 for use with s3-compatible storages, add option to specify S3 config profile * make fmt
This commit is contained in:
parent
9010c6a1d6
commit
15b7406f7b
4 changed files with 35 additions and 9 deletions
|
@ -132,6 +132,10 @@ Run `vmbackup -help` in order to see all the available options:
|
|||
-credsFilePath string
|
||||
Path to file with GCS or S3 credentials. Credentials are loaded from default locations if not set.
|
||||
See https://cloud.google.com/iam/docs/creating-managing-service-account-keys and https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html
|
||||
-configProfile string
|
||||
Profile name for S3 configs. (default "default")
|
||||
-customS3Endpoint string
|
||||
Custom S3 endpoint for use with S3-compatible storages (e.g. MinIO)
|
||||
-dst string
|
||||
Where to put the backup on the remote storage. Example: gcs://bucket/path/to/backup/dir, s3://bucket/path/to/backup/dir or fs:///path/to/local/backup/dir
|
||||
-dst can point to the previous backup. In this case incremental backup is performed, i.e. only changed data is uploaded
|
||||
|
|
|
@ -43,6 +43,10 @@ Run `vmrestore -help` in order to see all the available options:
|
|||
-credsFilePath string
|
||||
Path to file with GCS or S3 credentials. Credentials are loaded from default locations if not set.
|
||||
See https://cloud.google.com/iam/docs/creating-managing-service-account-keys and https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html
|
||||
-configProfile string
|
||||
Profile name for S3 configs. (default "default")
|
||||
-customS3Endpoint string
|
||||
Custom S3 endpoint for use with S3-compatible storages (e.g. MinIO)
|
||||
-loggerLevel string
|
||||
Minimum level of errors to log. Possible values: INFO, ERROR, FATAL, PANIC (default "INFO")
|
||||
-maxBytesPerSecond int
|
||||
|
|
|
@ -18,6 +18,8 @@ var (
|
|||
"See https://cloud.google.com/iam/docs/creating-managing-service-account-keys and https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html")
|
||||
configFilePath = flag.String("configFilePath", "", "Path to file with S3 configs. Configs are loaded from default location if not set.\n"+
|
||||
"See https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html")
|
||||
configProfile = flag.String("configProfile", "default", "Profile name for S3 configs. ")
|
||||
customS3Endpoint = flag.String("customS3Endpoint", "", "Custom S3 endpoint for use with S3-compatible storages (e.g. MinIO)")
|
||||
)
|
||||
|
||||
func runParallel(concurrency int, parts []common.Part, f func(p common.Part) error, progress func(elapsed time.Duration)) error {
|
||||
|
@ -218,6 +220,8 @@ func NewRemoteFS(path string) (common.RemoteFS, error) {
|
|||
fs := &s3remote.FS{
|
||||
CredsFilePath: *credsFilePath,
|
||||
ConfigFilePath: *configFilePath,
|
||||
CustomEndpoint: *customS3Endpoint,
|
||||
ProfileName: *configProfile,
|
||||
Bucket: bucket,
|
||||
Dir: dir,
|
||||
}
|
||||
|
|
|
@ -30,8 +30,10 @@ type FS struct {
|
|||
// Directory in the bucket to write to.
|
||||
Dir string
|
||||
|
||||
s3 *s3.S3
|
||||
uploader *s3manager.Uploader
|
||||
s3 *s3.S3
|
||||
uploader *s3manager.Uploader
|
||||
CustomEndpoint string
|
||||
ProfileName string
|
||||
}
|
||||
|
||||
// Init initializes fs.
|
||||
|
@ -47,6 +49,7 @@ func (fs *FS) Init() error {
|
|||
}
|
||||
opts := session.Options{
|
||||
SharedConfigState: session.SharedConfigEnable,
|
||||
Profile: fs.ProfileName,
|
||||
}
|
||||
if len(fs.CredsFilePath) > 0 {
|
||||
opts.SharedConfigFiles = []string{
|
||||
|
@ -59,14 +62,25 @@ func (fs *FS) Init() error {
|
|||
return fmt.Errorf("cannot create S3 session: %s", err)
|
||||
}
|
||||
|
||||
// Determine bucket region.
|
||||
ctx := context.Background()
|
||||
region, err := s3manager.GetBucketRegion(ctx, sess, fs.Bucket, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot determine region for bucket %q: %s", fs.Bucket, err)
|
||||
if len(fs.CustomEndpoint) > 0 {
|
||||
|
||||
// Use provided custom endpoint for S3
|
||||
logger.Infof("Using provided custom S3 endpoint: %q", fs.CustomEndpoint)
|
||||
sess.Config.WithEndpoint(fs.CustomEndpoint)
|
||||
|
||||
// Disable prefixing endpoint with bucket name
|
||||
sess.Config.WithS3ForcePathStyle(true)
|
||||
} else {
|
||||
|
||||
// Determine bucket region.
|
||||
ctx := context.Background()
|
||||
region, err := s3manager.GetBucketRegion(ctx, sess, fs.Bucket, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot determine region for bucket %q: %s", fs.Bucket, err)
|
||||
}
|
||||
sess.Config.WithRegion(region)
|
||||
logger.Infof("bucket %q is stored at region %q; switching to this region", fs.Bucket, region)
|
||||
}
|
||||
sess.Config.WithRegion(region)
|
||||
logger.Infof("bucket %q is stored at region %q; switching to this region", fs.Bucket, region)
|
||||
|
||||
fs.s3 = s3.New(sess)
|
||||
fs.uploader = s3manager.NewUploader(sess, func(u *s3manager.Uploader) {
|
||||
|
|
Loading…
Reference in a new issue