mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
vendor: update github.com/aws/aws-sdk-go from v1.29.10 to v1.29.22
This commit is contained in:
parent
fa4d70b428
commit
d91790543f
10 changed files with 322 additions and 72 deletions
2
go.mod
2
go.mod
|
@ -4,7 +4,7 @@ require (
|
||||||
cloud.google.com/go/storage v1.6.0
|
cloud.google.com/go/storage v1.6.0
|
||||||
github.com/VictoriaMetrics/fastcache v1.5.7
|
github.com/VictoriaMetrics/fastcache v1.5.7
|
||||||
github.com/VictoriaMetrics/metrics v1.11.0
|
github.com/VictoriaMetrics/metrics v1.11.0
|
||||||
github.com/aws/aws-sdk-go v1.29.10
|
github.com/aws/aws-sdk-go v1.29.22
|
||||||
github.com/cespare/xxhash/v2 v2.1.1
|
github.com/cespare/xxhash/v2 v2.1.1
|
||||||
github.com/golang/snappy v0.0.1
|
github.com/golang/snappy v0.0.1
|
||||||
github.com/klauspost/compress v1.10.3
|
github.com/klauspost/compress v1.10.3
|
||||||
|
|
4
go.sum
4
go.sum
|
@ -41,8 +41,8 @@ github.com/VictoriaMetrics/metrics v1.11.0 h1:sfRmbgk7hGrxNXrziwyTmU8FZFLFrPNC7g
|
||||||
github.com/VictoriaMetrics/metrics v1.11.0/go.mod h1:LU2j9qq7xqZYXz8tF3/RQnB2z2MbZms5TDiIg9/NHiQ=
|
github.com/VictoriaMetrics/metrics v1.11.0/go.mod h1:LU2j9qq7xqZYXz8tF3/RQnB2z2MbZms5TDiIg9/NHiQ=
|
||||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
|
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
|
||||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||||
github.com/aws/aws-sdk-go v1.29.10 h1:QJOQq1xNmdrY5mXUmC8CHXzZPve8134Bx/Ux0o6s38s=
|
github.com/aws/aws-sdk-go v1.29.22 h1:3WmsCj3C30l6/4f50mPkDZoTPWSvaRCjcVJOWdCJoIE=
|
||||||
github.com/aws/aws-sdk-go v1.29.10/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg=
|
github.com/aws/aws-sdk-go v1.29.22/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg=
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
|
4
vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
generated
vendored
|
@ -19,7 +19,9 @@ type StaticProvider struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStaticCredentials returns a pointer to a new Credentials object
|
// NewStaticCredentials returns a pointer to a new Credentials object
|
||||||
// wrapping a static credentials value provider.
|
// wrapping a static credentials value provider. Token is only required
|
||||||
|
// for temporary security credentials retrieved via STS, otherwise an empty
|
||||||
|
// string can be passed for this parameter.
|
||||||
func NewStaticCredentials(id, secret, token string) *Credentials {
|
func NewStaticCredentials(id, secret, token string) *Credentials {
|
||||||
return NewCredentials(&StaticProvider{Value: Value{
|
return NewCredentials(&StaticProvider{Value: Value{
|
||||||
AccessKeyID: id,
|
AccessKeyID: id,
|
||||||
|
|
35
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
35
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
|
@ -534,6 +534,7 @@ var awsPartition = partition{
|
||||||
"appmesh": service{
|
"appmesh": service{
|
||||||
|
|
||||||
Endpoints: endpoints{
|
Endpoints: endpoints{
|
||||||
|
"ap-east-1": endpoint{},
|
||||||
"ap-northeast-1": endpoint{},
|
"ap-northeast-1": endpoint{},
|
||||||
"ap-northeast-2": endpoint{},
|
"ap-northeast-2": endpoint{},
|
||||||
"ap-south-1": endpoint{},
|
"ap-south-1": endpoint{},
|
||||||
|
@ -1052,6 +1053,7 @@ var awsPartition = partition{
|
||||||
"ap-southeast-2": endpoint{},
|
"ap-southeast-2": endpoint{},
|
||||||
"ca-central-1": endpoint{},
|
"ca-central-1": endpoint{},
|
||||||
"eu-central-1": endpoint{},
|
"eu-central-1": endpoint{},
|
||||||
|
"eu-north-1": endpoint{},
|
||||||
"eu-west-1": endpoint{},
|
"eu-west-1": endpoint{},
|
||||||
"eu-west-2": endpoint{},
|
"eu-west-2": endpoint{},
|
||||||
"us-east-1": endpoint{},
|
"us-east-1": endpoint{},
|
||||||
|
@ -1571,17 +1573,6 @@ var awsPartition = partition{
|
||||||
"us-west-2": endpoint{},
|
"us-west-2": endpoint{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"elastic-inference": service{
|
|
||||||
|
|
||||||
Endpoints: endpoints{
|
|
||||||
"ap-northeast-1": endpoint{},
|
|
||||||
"ap-northeast-2": endpoint{},
|
|
||||||
"eu-west-1": endpoint{},
|
|
||||||
"us-east-1": endpoint{},
|
|
||||||
"us-east-2": endpoint{},
|
|
||||||
"us-west-2": endpoint{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"elasticache": service{
|
"elasticache": service{
|
||||||
|
|
||||||
Endpoints: endpoints{
|
Endpoints: endpoints{
|
||||||
|
@ -3701,6 +3692,7 @@ var awsPartition = partition{
|
||||||
"servicecatalog": service{
|
"servicecatalog": service{
|
||||||
|
|
||||||
Endpoints: endpoints{
|
Endpoints: endpoints{
|
||||||
|
"ap-east-1": endpoint{},
|
||||||
"ap-northeast-1": endpoint{},
|
"ap-northeast-1": endpoint{},
|
||||||
"ap-northeast-2": endpoint{},
|
"ap-northeast-2": endpoint{},
|
||||||
"ap-south-1": endpoint{},
|
"ap-south-1": endpoint{},
|
||||||
|
@ -3712,6 +3704,7 @@ var awsPartition = partition{
|
||||||
"eu-west-1": endpoint{},
|
"eu-west-1": endpoint{},
|
||||||
"eu-west-2": endpoint{},
|
"eu-west-2": endpoint{},
|
||||||
"eu-west-3": endpoint{},
|
"eu-west-3": endpoint{},
|
||||||
|
"me-south-1": endpoint{},
|
||||||
"sa-east-1": endpoint{},
|
"sa-east-1": endpoint{},
|
||||||
"us-east-1": endpoint{},
|
"us-east-1": endpoint{},
|
||||||
"us-east-1-fips": endpoint{
|
"us-east-1-fips": endpoint{
|
||||||
|
@ -4378,6 +4371,13 @@ var awscnPartition = partition{
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Services: services{
|
Services: services{
|
||||||
|
"acm": service{
|
||||||
|
|
||||||
|
Endpoints: endpoints{
|
||||||
|
"cn-north-1": endpoint{},
|
||||||
|
"cn-northwest-1": endpoint{},
|
||||||
|
},
|
||||||
|
},
|
||||||
"api.ecr": service{
|
"api.ecr": service{
|
||||||
|
|
||||||
Endpoints: endpoints{
|
Endpoints: endpoints{
|
||||||
|
@ -5829,6 +5829,13 @@ var awsusgovPartition = partition{
|
||||||
"us-gov-west-1": endpoint{},
|
"us-gov-west-1": endpoint{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"xray": service{
|
||||||
|
|
||||||
|
Endpoints: endpoints{
|
||||||
|
"us-gov-east-1": endpoint{},
|
||||||
|
"us-gov-west-1": endpoint{},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6407,6 +6414,12 @@ var awsisobPartition = partition{
|
||||||
"us-isob-east-1": endpoint{},
|
"us-isob-east-1": endpoint{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"ssm": service{
|
||||||
|
|
||||||
|
Endpoints: endpoints{
|
||||||
|
"us-isob-east-1": endpoint{},
|
||||||
|
},
|
||||||
|
},
|
||||||
"states": service{
|
"states": service{
|
||||||
|
|
||||||
Endpoints: endpoints{
|
Endpoints: endpoints{
|
||||||
|
|
10
vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
generated
vendored
10
vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
generated
vendored
|
@ -3,6 +3,7 @@ package session
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
@ -206,7 +207,14 @@ func credsFromAssumeRole(cfg aws.Config,
|
||||||
sharedCfg.RoleARN,
|
sharedCfg.RoleARN,
|
||||||
func(opt *stscreds.AssumeRoleProvider) {
|
func(opt *stscreds.AssumeRoleProvider) {
|
||||||
opt.RoleSessionName = sharedCfg.RoleSessionName
|
opt.RoleSessionName = sharedCfg.RoleSessionName
|
||||||
opt.Duration = sessOpts.AssumeRoleDuration
|
|
||||||
|
if sessOpts.AssumeRoleDuration == 0 &&
|
||||||
|
sharedCfg.AssumeRoleDuration != nil &&
|
||||||
|
*sharedCfg.AssumeRoleDuration/time.Minute > 15 {
|
||||||
|
opt.Duration = *sharedCfg.AssumeRoleDuration
|
||||||
|
} else if sessOpts.AssumeRoleDuration != 0 {
|
||||||
|
opt.Duration = sessOpts.AssumeRoleDuration
|
||||||
|
}
|
||||||
|
|
||||||
// Assume role with external ID
|
// Assume role with external ID
|
||||||
if len(sharedCfg.ExternalID) > 0 {
|
if len(sharedCfg.ExternalID) > 0 {
|
||||||
|
|
28
vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
generated
vendored
28
vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
generated
vendored
|
@ -2,6 +2,7 @@ package session
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
@ -16,12 +17,13 @@ const (
|
||||||
sessionTokenKey = `aws_session_token` // optional
|
sessionTokenKey = `aws_session_token` // optional
|
||||||
|
|
||||||
// Assume Role Credentials group
|
// Assume Role Credentials group
|
||||||
roleArnKey = `role_arn` // group required
|
roleArnKey = `role_arn` // group required
|
||||||
sourceProfileKey = `source_profile` // group required (or credential_source)
|
sourceProfileKey = `source_profile` // group required (or credential_source)
|
||||||
credentialSourceKey = `credential_source` // group required (or source_profile)
|
credentialSourceKey = `credential_source` // group required (or source_profile)
|
||||||
externalIDKey = `external_id` // optional
|
externalIDKey = `external_id` // optional
|
||||||
mfaSerialKey = `mfa_serial` // optional
|
mfaSerialKey = `mfa_serial` // optional
|
||||||
roleSessionNameKey = `role_session_name` // optional
|
roleSessionNameKey = `role_session_name` // optional
|
||||||
|
roleDurationSecondsKey = "duration_seconds" // optional
|
||||||
|
|
||||||
// CSM options
|
// CSM options
|
||||||
csmEnabledKey = `csm_enabled`
|
csmEnabledKey = `csm_enabled`
|
||||||
|
@ -73,10 +75,11 @@ type sharedConfig struct {
|
||||||
CredentialProcess string
|
CredentialProcess string
|
||||||
WebIdentityTokenFile string
|
WebIdentityTokenFile string
|
||||||
|
|
||||||
RoleARN string
|
RoleARN string
|
||||||
RoleSessionName string
|
RoleSessionName string
|
||||||
ExternalID string
|
ExternalID string
|
||||||
MFASerial string
|
MFASerial string
|
||||||
|
AssumeRoleDuration *time.Duration
|
||||||
|
|
||||||
SourceProfileName string
|
SourceProfileName string
|
||||||
SourceProfile *sharedConfig
|
SourceProfile *sharedConfig
|
||||||
|
@ -274,6 +277,11 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e
|
||||||
updateString(&cfg.CredentialSource, section, credentialSourceKey)
|
updateString(&cfg.CredentialSource, section, credentialSourceKey)
|
||||||
updateString(&cfg.Region, section, regionKey)
|
updateString(&cfg.Region, section, regionKey)
|
||||||
|
|
||||||
|
if section.Has(roleDurationSecondsKey) {
|
||||||
|
d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second
|
||||||
|
cfg.AssumeRoleDuration = &d
|
||||||
|
}
|
||||||
|
|
||||||
if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 {
|
if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 {
|
||||||
sre, err := endpoints.GetSTSRegionalEndpoint(v)
|
sre, err := endpoints.GetSTSRegionalEndpoint(v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
@ -5,4 +5,4 @@ package aws
|
||||||
const SDKName = "aws-sdk-go"
|
const SDKName = "aws-sdk-go"
|
||||||
|
|
||||||
// SDKVersion is the version of this SDK
|
// SDKVersion is the version of this SDK
|
||||||
const SDKVersion = "1.29.10"
|
const SDKVersion = "1.29.22"
|
||||||
|
|
244
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/pool.go
generated
vendored
Normal file
244
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/pool.go
generated
vendored
Normal file
|
@ -0,0 +1,244 @@
|
||||||
|
package s3manager
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
)
|
||||||
|
|
||||||
|
type byteSlicePool interface {
|
||||||
|
Get(aws.Context) (*[]byte, error)
|
||||||
|
Put(*[]byte)
|
||||||
|
ModifyCapacity(int)
|
||||||
|
SliceSize() int64
|
||||||
|
Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
type maxSlicePool struct {
|
||||||
|
// allocator is defined as a function pointer to allow
|
||||||
|
// for test cases to instrument custom tracers when allocations
|
||||||
|
// occur.
|
||||||
|
allocator sliceAllocator
|
||||||
|
|
||||||
|
slices chan *[]byte
|
||||||
|
allocations chan struct{}
|
||||||
|
capacityChange chan struct{}
|
||||||
|
|
||||||
|
max int
|
||||||
|
sliceSize int64
|
||||||
|
|
||||||
|
mtx sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMaxSlicePool(sliceSize int64) *maxSlicePool {
|
||||||
|
p := &maxSlicePool{sliceSize: sliceSize}
|
||||||
|
p.allocator = p.newSlice
|
||||||
|
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
var errZeroCapacity = fmt.Errorf("get called on zero capacity pool")
|
||||||
|
|
||||||
|
func (p *maxSlicePool) Get(ctx aws.Context) (*[]byte, error) {
|
||||||
|
// check if context is canceled before attempting to get a slice
|
||||||
|
// this ensures priority is given to the cancel case first
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil, ctx.Err()
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
p.mtx.RLock()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case bs, ok := <-p.slices:
|
||||||
|
p.mtx.RUnlock()
|
||||||
|
if !ok {
|
||||||
|
// attempt to get on a zero capacity pool
|
||||||
|
return nil, errZeroCapacity
|
||||||
|
}
|
||||||
|
return bs, nil
|
||||||
|
case _, ok := <-p.allocations:
|
||||||
|
p.mtx.RUnlock()
|
||||||
|
if !ok {
|
||||||
|
// attempt to get on a zero capacity pool
|
||||||
|
return nil, errZeroCapacity
|
||||||
|
}
|
||||||
|
return p.allocator(), nil
|
||||||
|
case <-ctx.Done():
|
||||||
|
p.mtx.RUnlock()
|
||||||
|
return nil, ctx.Err()
|
||||||
|
default:
|
||||||
|
// In the event that there are no slices or allocations available
|
||||||
|
// This prevents some deadlock situations that can occur around sync.RWMutex
|
||||||
|
// When a lock request occurs on ModifyCapacity, no new readers are allowed to acquire a read lock.
|
||||||
|
// By releasing the read lock here and waiting for a notification, we prevent a deadlock situation where
|
||||||
|
// Get could hold the read lock indefinitely waiting for capacity, ModifyCapacity is waiting for a write lock,
|
||||||
|
// and a Put is blocked trying to get a read-lock which is blocked by ModifyCapacity.
|
||||||
|
|
||||||
|
// Short-circuit if the pool capacity is zero.
|
||||||
|
if p.max == 0 {
|
||||||
|
p.mtx.RUnlock()
|
||||||
|
return nil, errZeroCapacity
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since we will be releasing the read-lock we need to take the reference to the channel.
|
||||||
|
// Since channels are references we will still get notified if slices are added, or if
|
||||||
|
// the channel is closed due to a capacity modification. This specifically avoids a data race condition
|
||||||
|
// where ModifyCapacity both closes a channel and initializes a new one while we don't have a read-lock.
|
||||||
|
c := p.capacityChange
|
||||||
|
|
||||||
|
p.mtx.RUnlock()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case _ = <-c:
|
||||||
|
p.mtx.RLock()
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil, ctx.Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *maxSlicePool) Put(bs *[]byte) {
|
||||||
|
p.mtx.RLock()
|
||||||
|
defer p.mtx.RUnlock()
|
||||||
|
|
||||||
|
if p.max == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case p.slices <- bs:
|
||||||
|
p.notifyCapacity()
|
||||||
|
default:
|
||||||
|
// If the new channel when attempting to add the slice then we drop the slice.
|
||||||
|
// The logic here is to prevent a deadlock situation if channel is already at max capacity.
|
||||||
|
// Allows us to reap allocations that are returned and are no longer needed.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *maxSlicePool) ModifyCapacity(delta int) {
|
||||||
|
if delta == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
p.mtx.Lock()
|
||||||
|
defer p.mtx.Unlock()
|
||||||
|
|
||||||
|
p.max += delta
|
||||||
|
|
||||||
|
if p.max == 0 {
|
||||||
|
p.empty()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.capacityChange != nil {
|
||||||
|
close(p.capacityChange)
|
||||||
|
}
|
||||||
|
p.capacityChange = make(chan struct{}, p.max)
|
||||||
|
|
||||||
|
origAllocations := p.allocations
|
||||||
|
p.allocations = make(chan struct{}, p.max)
|
||||||
|
|
||||||
|
newAllocs := len(origAllocations) + delta
|
||||||
|
for i := 0; i < newAllocs; i++ {
|
||||||
|
p.allocations <- struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if origAllocations != nil {
|
||||||
|
close(origAllocations)
|
||||||
|
}
|
||||||
|
|
||||||
|
origSlices := p.slices
|
||||||
|
p.slices = make(chan *[]byte, p.max)
|
||||||
|
if origSlices == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
close(origSlices)
|
||||||
|
for bs := range origSlices {
|
||||||
|
select {
|
||||||
|
case p.slices <- bs:
|
||||||
|
default:
|
||||||
|
// If the new channel blocks while adding slices from the old channel
|
||||||
|
// then we drop the slice. The logic here is to prevent a deadlock situation
|
||||||
|
// if the new channel has a smaller capacity then the old.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *maxSlicePool) notifyCapacity() {
|
||||||
|
select {
|
||||||
|
case p.capacityChange <- struct{}{}:
|
||||||
|
default:
|
||||||
|
// This *shouldn't* happen as the channel is both buffered to the max pool capacity size and is resized
|
||||||
|
// on capacity modifications. This is just a safety to ensure that a blocking situation can't occur.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *maxSlicePool) SliceSize() int64 {
|
||||||
|
return p.sliceSize
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *maxSlicePool) Close() {
|
||||||
|
p.mtx.Lock()
|
||||||
|
defer p.mtx.Unlock()
|
||||||
|
p.empty()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *maxSlicePool) empty() {
|
||||||
|
p.max = 0
|
||||||
|
|
||||||
|
if p.capacityChange != nil {
|
||||||
|
close(p.capacityChange)
|
||||||
|
p.capacityChange = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.allocations != nil {
|
||||||
|
close(p.allocations)
|
||||||
|
for range p.allocations {
|
||||||
|
// drain channel
|
||||||
|
}
|
||||||
|
p.allocations = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.slices != nil {
|
||||||
|
close(p.slices)
|
||||||
|
for range p.slices {
|
||||||
|
// drain channel
|
||||||
|
}
|
||||||
|
p.slices = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *maxSlicePool) newSlice() *[]byte {
|
||||||
|
bs := make([]byte, p.sliceSize)
|
||||||
|
return &bs
|
||||||
|
}
|
||||||
|
|
||||||
|
type returnCapacityPoolCloser struct {
|
||||||
|
byteSlicePool
|
||||||
|
returnCapacity int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *returnCapacityPoolCloser) ModifyCapacity(delta int) {
|
||||||
|
if delta > 0 {
|
||||||
|
n.returnCapacity = -1 * delta
|
||||||
|
}
|
||||||
|
n.byteSlicePool.ModifyCapacity(delta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *returnCapacityPoolCloser) Close() {
|
||||||
|
if n.returnCapacity < 0 {
|
||||||
|
n.byteSlicePool.ModifyCapacity(n.returnCapacity)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type sliceAllocator func() *[]byte
|
||||||
|
|
||||||
|
var newByteSlicePool = func(sliceSize int64) byteSlicePool {
|
||||||
|
return newMaxSlicePool(sliceSize)
|
||||||
|
}
|
63
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
generated
vendored
63
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
generated
vendored
|
@ -366,6 +366,7 @@ func (u *uploader) upload() (*UploadOutput, error) {
|
||||||
if err := u.init(); err != nil {
|
if err := u.init(); err != nil {
|
||||||
return nil, awserr.New("ReadRequestBody", "unable to initialize upload", err)
|
return nil, awserr.New("ReadRequestBody", "unable to initialize upload", err)
|
||||||
}
|
}
|
||||||
|
defer u.cfg.partPool.Close()
|
||||||
|
|
||||||
if u.cfg.PartSize < MinUploadPartSize {
|
if u.cfg.PartSize < MinUploadPartSize {
|
||||||
msg := fmt.Sprintf("part size must be at least %d bytes", MinUploadPartSize)
|
msg := fmt.Sprintf("part size must be at least %d bytes", MinUploadPartSize)
|
||||||
|
@ -404,8 +405,13 @@ func (u *uploader) init() error {
|
||||||
|
|
||||||
// If PartSize was changed or partPool was never setup then we need to allocated a new pool
|
// If PartSize was changed or partPool was never setup then we need to allocated a new pool
|
||||||
// so that we return []byte slices of the correct size
|
// so that we return []byte slices of the correct size
|
||||||
if u.cfg.partPool == nil || u.cfg.partPool.Size() != u.cfg.PartSize {
|
poolCap := u.cfg.Concurrency + 1
|
||||||
|
if u.cfg.partPool == nil || u.cfg.partPool.SliceSize() != u.cfg.PartSize {
|
||||||
u.cfg.partPool = newByteSlicePool(u.cfg.PartSize)
|
u.cfg.partPool = newByteSlicePool(u.cfg.PartSize)
|
||||||
|
u.cfg.partPool.ModifyCapacity(poolCap)
|
||||||
|
} else {
|
||||||
|
u.cfg.partPool = &returnCapacityPoolCloser{byteSlicePool: u.cfg.partPool}
|
||||||
|
u.cfg.partPool.ModifyCapacity(poolCap)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -441,10 +447,6 @@ func (u *uploader) initSize() error {
|
||||||
// does not need to be wrapped in a mutex because nextReader is only called
|
// does not need to be wrapped in a mutex because nextReader is only called
|
||||||
// from the main thread.
|
// from the main thread.
|
||||||
func (u *uploader) nextReader() (io.ReadSeeker, int, func(), error) {
|
func (u *uploader) nextReader() (io.ReadSeeker, int, func(), error) {
|
||||||
type readerAtSeeker interface {
|
|
||||||
io.ReaderAt
|
|
||||||
io.ReadSeeker
|
|
||||||
}
|
|
||||||
switch r := u.in.Body.(type) {
|
switch r := u.in.Body.(type) {
|
||||||
case readerAtSeeker:
|
case readerAtSeeker:
|
||||||
var err error
|
var err error
|
||||||
|
@ -476,15 +478,19 @@ func (u *uploader) nextReader() (io.ReadSeeker, int, func(), error) {
|
||||||
return reader, int(n), cleanup, err
|
return reader, int(n), cleanup, err
|
||||||
|
|
||||||
default:
|
default:
|
||||||
part := u.cfg.partPool.Get()
|
part, err := u.cfg.partPool.Get(u.ctx)
|
||||||
n, err := readFillBuf(r, part)
|
if err != nil {
|
||||||
|
return nil, 0, func() {}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := readFillBuf(r, *part)
|
||||||
u.readerPos += int64(n)
|
u.readerPos += int64(n)
|
||||||
|
|
||||||
cleanup := func() {
|
cleanup := func() {
|
||||||
u.cfg.partPool.Put(part)
|
u.cfg.partPool.Put(part)
|
||||||
}
|
}
|
||||||
|
|
||||||
return bytes.NewReader(part[0:n]), n, cleanup, err
|
return bytes.NewReader((*part)[0:n]), n, cleanup, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -673,6 +679,8 @@ func (u *multiuploader) readChunk(ch chan chunk) {
|
||||||
u.seterr(err)
|
u.seterr(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
data.cleanup()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -690,7 +698,6 @@ func (u *multiuploader) send(c chunk) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := u.cfg.S3.UploadPartWithContext(u.ctx, params, u.cfg.RequestOptions...)
|
resp, err := u.cfg.S3.UploadPartWithContext(u.ctx, params, u.cfg.RequestOptions...)
|
||||||
c.cleanup()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -763,39 +770,7 @@ func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput {
|
||||||
return resp
|
return resp
|
||||||
}
|
}
|
||||||
|
|
||||||
type byteSlicePool interface {
|
type readerAtSeeker interface {
|
||||||
Get() []byte
|
io.ReaderAt
|
||||||
Put([]byte)
|
io.ReadSeeker
|
||||||
Size() int64
|
|
||||||
}
|
|
||||||
|
|
||||||
type partPool struct {
|
|
||||||
partSize int64
|
|
||||||
sync.Pool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *partPool) Get() []byte {
|
|
||||||
return p.Pool.Get().([]byte)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *partPool) Put(b []byte) {
|
|
||||||
p.Pool.Put(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *partPool) Size() int64 {
|
|
||||||
return p.partSize
|
|
||||||
}
|
|
||||||
|
|
||||||
func newPartPool(partSize int64) *partPool {
|
|
||||||
p := &partPool{partSize: partSize}
|
|
||||||
|
|
||||||
p.New = func() interface{} {
|
|
||||||
return make([]byte, p.partSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
var newByteSlicePool = func(partSize int64) byteSlicePool {
|
|
||||||
return newPartPool(partSize)
|
|
||||||
}
|
}
|
||||||
|
|
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
|
@ -14,7 +14,7 @@ github.com/BurntSushi/toml
|
||||||
github.com/VictoriaMetrics/fastcache
|
github.com/VictoriaMetrics/fastcache
|
||||||
# github.com/VictoriaMetrics/metrics v1.11.0
|
# github.com/VictoriaMetrics/metrics v1.11.0
|
||||||
github.com/VictoriaMetrics/metrics
|
github.com/VictoriaMetrics/metrics
|
||||||
# github.com/aws/aws-sdk-go v1.29.10
|
# github.com/aws/aws-sdk-go v1.29.22
|
||||||
github.com/aws/aws-sdk-go/aws
|
github.com/aws/aws-sdk-go/aws
|
||||||
github.com/aws/aws-sdk-go/aws/arn
|
github.com/aws/aws-sdk-go/aws/arn
|
||||||
github.com/aws/aws-sdk-go/aws/awserr
|
github.com/aws/aws-sdk-go/aws/awserr
|
||||||
|
|
Loading…
Reference in a new issue