vendor: make vendor-update

This commit is contained in:
Aliaksandr Valialkin 2022-05-20 14:45:24 +03:00
parent a175a57084
commit d87733fe1c
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
165 changed files with 10127 additions and 4957 deletions

30
go.mod
View file

@ -3,7 +3,7 @@ module github.com/VictoriaMetrics/VictoriaMetrics
go 1.17
require (
cloud.google.com/go/storage v1.22.0
cloud.google.com/go/storage v1.22.1
github.com/VictoriaMetrics/fastcache v1.10.0
// Do not use the original github.com/valyala/fasthttp because of issues
@ -11,7 +11,7 @@ require (
github.com/VictoriaMetrics/fasthttp v1.1.0
github.com/VictoriaMetrics/metrics v1.18.1
github.com/VictoriaMetrics/metricsql v0.43.0
github.com/aws/aws-sdk-go v1.44.9
github.com/aws/aws-sdk-go v1.44.18
github.com/cespare/xxhash/v2 v2.1.2
// TODO: switch back to https://github.com/cheggaaa/pb/v3 when v3-pooling branch
@ -20,18 +20,18 @@ require (
github.com/dmitryk-dk/pb/v3 v3.0.9
github.com/golang/snappy v0.0.4
github.com/influxdata/influxdb v1.9.7
github.com/klauspost/compress v1.15.3
github.com/klauspost/compress v1.15.4
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
github.com/urfave/cli/v2 v2.6.0
github.com/urfave/cli/v2 v2.7.1
github.com/valyala/fastjson v1.6.3
github.com/valyala/fastrand v1.1.0
github.com/valyala/fasttemplate v1.2.1
github.com/valyala/gozstd v1.17.0
github.com/valyala/quicktemplate v1.7.0
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4
golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6
google.golang.org/api v0.78.0
golang.org/x/sys v0.0.0-20220519141025-dcacdad47464
google.golang.org/api v0.80.0
gopkg.in/yaml.v2 v2.4.0
)
@ -40,16 +40,18 @@ require (
cloud.google.com/go/compute v1.6.1 // indirect
cloud.google.com/go/iam v0.3.0 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/antzucaro/matchr v0.0.0-20210222213004-b04723ef80f0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/go-kit/kit v0.12.0 // indirect
github.com/go-kit/log v0.2.0 // indirect
github.com/go-kit/log v0.2.1 // indirect
github.com/go-logfmt/logfmt v0.5.1 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-cmp v0.5.8 // indirect
github.com/googleapis/gax-go/v2 v2.3.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/googleapis/gax-go/v2 v2.4.0 // indirect
github.com/googleapis/go-type-adapters v1.0.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/mattn/go-colorable v0.1.12 // indirect
@ -58,7 +60,7 @@ require (
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/oklog/ulid v1.3.1 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.12.1 // indirect
github.com/prometheus/client_golang v1.12.2 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.34.0 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
@ -69,12 +71,12 @@ require (
go.opencensus.io v0.23.0 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3 // indirect
google.golang.org/grpc v1.46.0 // indirect
google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect
google.golang.org/grpc v1.46.2 // indirect
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
)

438
go.sum

File diff suppressed because it is too large Load diff

View file

@ -1,3 +1,3 @@
{
"storage": "1.22.0"
"storage": "1.22.1"
}

View file

@ -1,6 +1,14 @@
# Changes
### [1.22.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.22.0...storage/v1.22.1) (2022-05-19)
### Bug Fixes
* **storage:** bump genproto, remove deadcode ([#6059](https://github.com/googleapis/google-cloud-go/issues/6059)) ([bb10f9f](https://github.com/googleapis/google-cloud-go/commit/bb10f9faca57dc3b987e0fb601090887b3507f07))
* **storage:** remove field that no longer exists ([#6061](https://github.com/googleapis/google-cloud-go/issues/6061)) ([ee150cf](https://github.com/googleapis/google-cloud-go/commit/ee150cfd194463ddfcb59898cfb0237e47777973))
## [1.22.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.21.0...storage/v1.22.0) (2022-03-31)

View file

@ -121,12 +121,12 @@ func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) {
func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
var acls *raw.ObjectAccessControls
var err error
req := a.c.raw.DefaultObjectAccessControls.List(a.bucket)
a.configureCall(ctx, req)
err = run(ctx, func() error {
req := a.c.raw.DefaultObjectAccessControls.List(a.bucket)
a.configureCall(ctx, req)
acls, err = req.Do()
return err
}, a.retry, true)
}, a.retry, true, setRetryHeaderHTTP(req))
if err != nil {
return nil, err
}
@ -139,18 +139,18 @@ func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) e
return run(ctx, func() error {
return req.Do()
}, a.retry, false)
}, a.retry, false, setRetryHeaderHTTP(req))
}
func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
var acls *raw.BucketAccessControls
var err error
req := a.c.raw.BucketAccessControls.List(a.bucket)
a.configureCall(ctx, req)
err = run(ctx, func() error {
req := a.c.raw.BucketAccessControls.List(a.bucket)
a.configureCall(ctx, req)
acls, err = req.Do()
return err
}, a.retry, true)
}, a.retry, true, setRetryHeaderHTTP(req))
if err != nil {
return nil, err
}
@ -168,7 +168,7 @@ func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRol
return run(ctx, func() error {
_, err := req.Do()
return err
}, a.retry, false)
}, a.retry, false, setRetryHeaderHTTP(req))
}
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
@ -176,18 +176,18 @@ func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
a.configureCall(ctx, req)
return run(ctx, func() error {
return req.Do()
}, a.retry, false)
}, a.retry, false, setRetryHeaderHTTP(req))
}
func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
var acls *raw.ObjectAccessControls
var err error
req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object)
a.configureCall(ctx, req)
err = run(ctx, func() error {
req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object)
a.configureCall(ctx, req)
acls, err = req.Do()
return err
}, a.retry, true)
}, a.retry, true, setRetryHeaderHTTP(req))
if err != nil {
return nil, err
}
@ -215,7 +215,7 @@ func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRol
return run(ctx, func() error {
_, err := req.Do()
return err
}, a.retry, false)
}, a.retry, false, setRetryHeaderHTTP(req))
}
func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
@ -223,7 +223,7 @@ func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
a.configureCall(ctx, req)
return run(ctx, func() error {
return req.Do()
}, a.retry, false)
}, a.retry, false, setRetryHeaderHTTP(req))
}
func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) {

View file

@ -28,7 +28,6 @@ import (
"cloud.google.com/go/internal/optional"
"cloud.google.com/go/internal/trace"
"github.com/googleapis/go-type-adapters/adapters"
"golang.org/x/xerrors"
"google.golang.org/api/googleapi"
"google.golang.org/api/iamcredentials/v1"
"google.golang.org/api/iterator"
@ -104,7 +103,7 @@ func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *Buck
if attrs != nil && attrs.PredefinedDefaultObjectACL != "" {
req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL)
}
return run(ctx, func() error { _, err := req.Context(ctx).Do(); return err }, b.retry, true)
return run(ctx, func() error { _, err := req.Context(ctx).Do(); return err }, b.retry, true, setRetryHeaderHTTP(req))
}
// Delete deletes the Bucket.
@ -117,7 +116,7 @@ func (b *BucketHandle) Delete(ctx context.Context) (err error) {
return err
}
return run(ctx, func() error { return req.Context(ctx).Do() }, b.retry, true)
return run(ctx, func() error { return req.Context(ctx).Do() }, b.retry, true, setRetryHeaderHTTP(req))
}
func (b *BucketHandle) newDeleteCall() (*raw.BucketsDeleteCall, error) {
@ -185,9 +184,9 @@ func (b *BucketHandle) Attrs(ctx context.Context) (attrs *BucketAttrs, err error
err = run(ctx, func() error {
resp, err = req.Context(ctx).Do()
return err
}, b.retry, true)
}, b.retry, true, setRetryHeaderHTTP(req))
var e *googleapi.Error
if ok := xerrors.As(err, &e); ok && e.Code == http.StatusNotFound {
if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound {
return nil, ErrBucketNotExist
}
if err != nil {
@ -233,7 +232,7 @@ func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (
return err
}
if err := run(ctx, call, b.retry, isIdempotent); err != nil {
if err := run(ctx, call, b.retry, isIdempotent, setRetryHeaderHTTP(req)); err != nil {
return nil, err
}
return newBucket(rawBucket)
@ -921,11 +920,9 @@ func (b *BucketAttrs) toProtoBucket() *storagepb.Bucket {
Enabled: true,
}
}
// TODO(noahdietz): This will be switched to a string.
//
// if b.PublicAccessPrevention != PublicAccessPreventionUnknown {
// bktIAM.PublicAccessPrevention = b.PublicAccessPrevention.String()
// }
if b.PublicAccessPrevention != PublicAccessPreventionUnknown {
bktIAM.PublicAccessPrevention = b.PublicAccessPrevention.String()
}
}
return &storagepb.Bucket{
@ -948,6 +945,81 @@ func (b *BucketAttrs) toProtoBucket() *storagepb.Bucket {
}
}
func (ua *BucketAttrsToUpdate) toProtoBucket() *storagepb.Bucket {
if ua == nil {
return &storagepb.Bucket{}
}
// TODO(cathyo): Handle labels. Pending b/230510191.
var v *storagepb.Bucket_Versioning
if ua.VersioningEnabled != nil {
v = &storagepb.Bucket_Versioning{Enabled: optional.ToBool(ua.VersioningEnabled)}
}
var bb *storagepb.Bucket_Billing
if ua.RequesterPays != nil {
bb = &storage.Bucket_Billing{RequesterPays: optional.ToBool(ua.RequesterPays)}
}
var bktIAM *storagepb.Bucket_IamConfig
var ublaEnabled bool
var bktPolicyOnlyEnabled bool
if ua.UniformBucketLevelAccess != nil {
ublaEnabled = optional.ToBool(ua.UniformBucketLevelAccess.Enabled)
}
if ua.BucketPolicyOnly != nil {
bktPolicyOnlyEnabled = optional.ToBool(ua.BucketPolicyOnly.Enabled)
}
if ublaEnabled || bktPolicyOnlyEnabled {
bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{
Enabled: true,
}
}
if ua.PublicAccessPrevention != PublicAccessPreventionUnknown {
bktIAM.PublicAccessPrevention = ua.PublicAccessPrevention.String()
}
var defaultHold bool
if ua.DefaultEventBasedHold != nil {
defaultHold = optional.ToBool(ua.DefaultEventBasedHold)
}
var lifecycle Lifecycle
if ua.Lifecycle != nil {
lifecycle = *ua.Lifecycle
}
var bktACL []*storagepb.BucketAccessControl
if ua.acl != nil {
bktACL = toProtoBucketACL(ua.acl)
}
if ua.PredefinedACL != "" {
// Clear ACL or the call will fail.
bktACL = nil
}
var bktDefaultObjectACL []*storagepb.ObjectAccessControl
if ua.defaultObjectACL != nil {
bktDefaultObjectACL = toProtoObjectACL(ua.defaultObjectACL)
}
if ua.PredefinedDefaultObjectACL != "" {
// Clear ACLs or the call will fail.
bktDefaultObjectACL = nil
}
return &storagepb.Bucket{
StorageClass: ua.StorageClass,
Acl: bktACL,
DefaultObjectAcl: bktDefaultObjectACL,
DefaultEventBasedHold: defaultHold,
Versioning: v,
Billing: bb,
Lifecycle: toProtoLifecycle(lifecycle),
RetentionPolicy: ua.RetentionPolicy.toProtoRetentionPolicy(),
Cors: toProtoCORS(ua.CORS),
Encryption: ua.Encryption.toProtoBucketEncryption(),
Logging: ua.Logging.toProtoBucketLogging(),
Website: ua.Website.toProtoBucketWebsite(),
IamConfig: bktIAM,
Rpo: ua.RPO.String(),
}
}
// CORS is the bucket's Cross-Origin Resource Sharing (CORS) configuration.
type CORS struct {
// MaxAge is the value to return in the Access-Control-Max-Age
@ -1060,6 +1132,17 @@ type BucketAttrsToUpdate struct {
// more information.
RPO RPO
// acl is the list of access control rules on the bucket.
// It is unexported and only used internally by the gRPC client.
// Library users should use ACLHandle methods directly.
acl []ACLRule
// defaultObjectACL is the list of access controls to
// apply to new objects when no object ACL is provided.
// It is unexported and only used internally by the gRPC client.
// Library users should use ACLHandle methods directly.
defaultObjectACL []ACLRule
setLabels map[string]string
deleteLabels map[string]bool
}
@ -1258,7 +1341,7 @@ func (b *BucketHandle) LockRetentionPolicy(ctx context.Context) error {
return run(ctx, func() error {
_, err := req.Context(ctx).Do()
return err
}, b.retry, true)
}, b.retry, true, setRetryHeaderHTTP(req))
}
// applyBucketConds modifies the provided call using the conditions in conds.
@ -1329,7 +1412,7 @@ func (rp *RetentionPolicy) toProtoRetentionPolicy() *storagepb.Bucket_RetentionP
}
func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error) {
if rp == nil {
if rp == nil || rp.EffectiveTime == "" {
return nil, nil
}
t, err := time.Parse(time.RFC3339, rp.EffectiveTime)
@ -1916,10 +1999,10 @@ func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error)
err = run(it.ctx, func() error {
resp, err = req.Context(it.ctx).Do()
return err
}, it.bucket.retry, true)
}, it.bucket.retry, true, setRetryHeaderHTTP(req))
if err != nil {
var e *googleapi.Error
if ok := xerrors.As(err, &e); ok && e.Code == http.StatusNotFound {
if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound {
err = ErrBucketNotExist
}
return "", err
@ -1987,6 +2070,9 @@ func (it *BucketIterator) Next() (*BucketAttrs, error) {
// Note: This method is not safe for concurrent operations without explicit synchronization.
func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
// TODO: When the transport-agnostic client interface is integrated into the Veneer,
// this method should be removed, and the iterator should be initialized by the
// transport-specific client implementations.
func (it *BucketIterator) fetch(pageSize int, pageToken string) (token string, err error) {
req := it.client.raw.Buckets.List(it.projectID)
setClientHeader(req.Header())
@ -2000,7 +2086,7 @@ func (it *BucketIterator) fetch(pageSize int, pageToken string) (token string, e
err = run(it.ctx, func() error {
resp, err = req.Context(it.ctx).Do()
return err
}, it.client.retry, true)
}, it.client.retry, true, setRetryHeaderHTTP(req))
if err != nil {
return "", err
}

View file

@ -43,16 +43,16 @@ type storageClient interface {
GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error)
CreateBucket(ctx context.Context, project string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error)
ListBuckets(ctx context.Context, project string, opts ...storageOption) (*BucketIterator, error)
ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator
Close() error
// Bucket methods.
DeleteBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error
GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error)
UpdateBucket(ctx context.Context, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error)
UpdateBucket(ctx context.Context, bucket string, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error)
LockBucketRetentionPolicy(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error
ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) (*ObjectIterator, error)
ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) *ObjectIterator
// Object metadata methods.

View file

@ -142,7 +142,7 @@ func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.Rewr
retryCall := func() error { res, err = call.Do(); return err }
isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist)
if err := run(ctx, retryCall, c.dst.retry, isIdempotent); err != nil {
if err := run(ctx, retryCall, c.dst.retry, isIdempotent, setRetryHeaderHTTP(call)); err != nil {
return nil, err
}
c.RewriteToken = res.RewriteToken
@ -237,7 +237,7 @@ func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
retryCall := func() error { obj, err = call.Do(); return err }
isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist)
if err := run(ctx, retryCall, c.dst.retry, isIdempotent); err != nil {
if err := run(ctx, retryCall, c.dst.retry, isIdempotent, setRetryHeaderHTTP(call)); err != nil {
return nil, err
}
return newObject(obj), nil

View file

@ -19,12 +19,15 @@ import (
"os"
gapic "cloud.google.com/go/storage/internal/apiv2"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
iampb "google.golang.org/genproto/googleapis/iam/v1"
storagepb "google.golang.org/genproto/googleapis/storage/v2"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
)
const (
@ -110,8 +113,22 @@ func (c *grpcStorageClient) Close() error {
// Top-level methods.
func (c *grpcStorageClient) GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) {
return "", errMethodNotSupported
s := callSettings(c.settings, opts...)
req := &storagepb.GetServiceAccountRequest{
Project: toProjectResource(project),
}
var resp *storagepb.ServiceAccount
err := run(ctx, func() error {
var err error
resp, err = c.raw.GetServiceAccount(ctx, req, s.gax...)
return err
}, s.retry, s.idempotent, setRetryHeaderGRPC(ctx))
if err != nil {
return "", err
}
return resp.EmailAddress, err
}
func (c *grpcStorageClient) CreateBucket(ctx context.Context, project string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) {
s := callSettings(c.settings, opts...)
b := attrs.toProtoBucket()
@ -123,13 +140,11 @@ func (c *grpcStorageClient) CreateBucket(ctx context.Context, project string, at
}
req := &storagepb.CreateBucketRequest{
Parent: toProjectResource(project),
Bucket: b,
BucketId: b.GetName(),
// TODO(noahdietz): This will be switched to a string.
//
// PredefinedAcl: attrs.PredefinedACL,
// PredefinedDefaultObjectAcl: attrs.PredefinedDefaultObjectACL,
Parent: toProjectResource(project),
Bucket: b,
BucketId: b.GetName(),
PredefinedAcl: attrs.PredefinedACL,
PredefinedDefaultObjectAcl: attrs.PredefinedDefaultObjectACL,
}
var battrs *BucketAttrs
@ -139,13 +154,57 @@ func (c *grpcStorageClient) CreateBucket(ctx context.Context, project string, at
battrs = newBucketFromProto(res)
return err
}, s.retry, s.idempotent)
}, s.retry, s.idempotent, setRetryHeaderGRPC(ctx))
return battrs, err
}
func (c *grpcStorageClient) ListBuckets(ctx context.Context, project string, opts ...storageOption) (*BucketIterator, error) {
return nil, errMethodNotSupported
func (c *grpcStorageClient) ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator {
s := callSettings(c.settings, opts...)
it := &BucketIterator{
ctx: ctx,
projectID: project,
}
var gitr *gapic.BucketIterator
fetch := func(pageSize int, pageToken string) (token string, err error) {
// Initialize GAPIC-based iterator when pageToken is empty, which
// indicates that this fetch call is attempting to get the first page.
//
// Note: Initializing the GAPIC-based iterator lazily is necessary to
// capture the BucketIterator.Prefix set by the user *after* the
// BucketIterator is returned to them from the veneer.
if pageToken == "" {
req := &storagepb.ListBucketsRequest{
Parent: toProjectResource(it.projectID),
Prefix: it.Prefix,
}
gitr = c.raw.ListBuckets(it.ctx, req, s.gax...)
}
var buckets []*storagepb.Bucket
var next string
err = run(it.ctx, func() error {
buckets, next, err = gitr.InternalFetch(pageSize, pageToken)
return err
}, s.retry, s.idempotent, setRetryHeaderGRPC(ctx))
if err != nil {
return "", err
}
for _, bkt := range buckets {
b := newBucketFromProto(bkt)
it.buckets = append(it.buckets, b)
}
return next, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
fetch,
func() int { return len(it.buckets) },
func() interface{} { b := it.buckets; it.buckets = nil; return b })
return it
}
// Bucket methods.
@ -159,14 +218,12 @@ func (c *grpcStorageClient) DeleteBucket(ctx context.Context, bucket string, con
return err
}
if s.userProject != "" {
req.CommonRequestParams = &storagepb.CommonRequestParams{
UserProject: toProjectResource(s.userProject),
}
ctx = setUserProjectMetadata(ctx, s.userProject)
}
return run(ctx, func() error {
return c.raw.DeleteBucket(ctx, req, s.gax...)
}, s.retry, s.idempotent)
}, s.retry, s.idempotent, setRetryHeaderGRPC(ctx))
}
func (c *grpcStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) {
@ -178,9 +235,7 @@ func (c *grpcStorageClient) GetBucket(ctx context.Context, bucket string, conds
return nil, err
}
if s.userProject != "" {
req.CommonRequestParams = &storagepb.CommonRequestParams{
UserProject: toProjectResource(s.userProject),
}
ctx = setUserProjectMetadata(ctx, s.userProject)
}
var battrs *BucketAttrs
@ -190,7 +245,7 @@ func (c *grpcStorageClient) GetBucket(ctx context.Context, bucket string, conds
battrs = newBucketFromProto(res)
return err
}, s.retry, s.idempotent)
}, s.retry, s.idempotent, setRetryHeaderGRPC(ctx))
if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
return nil, ErrBucketNotExist
@ -198,14 +253,153 @@ func (c *grpcStorageClient) GetBucket(ctx context.Context, bucket string, conds
return battrs, err
}
func (c *grpcStorageClient) UpdateBucket(ctx context.Context, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) {
return nil, errMethodNotSupported
func (c *grpcStorageClient) UpdateBucket(ctx context.Context, bucket string, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) {
s := callSettings(c.settings, opts...)
b := uattrs.toProtoBucket()
b.Name = bucketResourceName(globalProjectAlias, bucket)
req := &storagepb.UpdateBucketRequest{
Bucket: b,
PredefinedAcl: uattrs.PredefinedACL,
PredefinedDefaultObjectAcl: uattrs.PredefinedDefaultObjectACL,
}
if err := applyBucketCondsProto("grpcStorageClient.UpdateBucket", conds, req); err != nil {
return nil, err
}
if s.userProject != "" {
ctx = setUserProjectMetadata(ctx, s.userProject)
}
var paths []string
fieldMask := &fieldmaskpb.FieldMask{
Paths: paths,
}
if uattrs.CORS != nil {
fieldMask.Paths = append(fieldMask.Paths, "cors")
}
if uattrs.DefaultEventBasedHold != nil {
fieldMask.Paths = append(fieldMask.Paths, "default_event_based_hold")
}
if uattrs.RetentionPolicy != nil {
fieldMask.Paths = append(fieldMask.Paths, "retention_policy")
}
if uattrs.VersioningEnabled != nil {
fieldMask.Paths = append(fieldMask.Paths, "versioning")
}
if uattrs.RequesterPays != nil {
fieldMask.Paths = append(fieldMask.Paths, "billing")
}
if uattrs.BucketPolicyOnly != nil || uattrs.UniformBucketLevelAccess != nil || uattrs.PublicAccessPrevention != PublicAccessPreventionUnknown {
fieldMask.Paths = append(fieldMask.Paths, "iam_config")
}
if uattrs.Encryption != nil {
fieldMask.Paths = append(fieldMask.Paths, "encryption")
}
if uattrs.Lifecycle != nil {
fieldMask.Paths = append(fieldMask.Paths, "lifecycle")
}
if uattrs.Logging != nil {
fieldMask.Paths = append(fieldMask.Paths, "logging")
}
if uattrs.Website != nil {
fieldMask.Paths = append(fieldMask.Paths, "website")
}
if uattrs.PredefinedACL != "" {
// In cases where PredefinedACL is set, Acl is cleared.
fieldMask.Paths = append(fieldMask.Paths, "acl")
}
if uattrs.PredefinedDefaultObjectACL != "" {
// In cases where PredefinedDefaultObjectACL is set, DefaultObjectAcl is cleared.
fieldMask.Paths = append(fieldMask.Paths, "default_object_acl")
}
if uattrs.acl != nil {
// In cases where acl is set by UpdateBucketACL method.
fieldMask.Paths = append(fieldMask.Paths, "acl")
}
if uattrs.defaultObjectACL != nil {
// In cases where defaultObjectACL is set by UpdateBucketACL method.
fieldMask.Paths = append(fieldMask.Paths, "default_object_acl")
}
if uattrs.StorageClass != "" {
fieldMask.Paths = append(fieldMask.Paths, "storage_class")
}
if uattrs.RPO != RPOUnknown {
fieldMask.Paths = append(fieldMask.Paths, "rpo")
}
// TODO(cathyo): Handle labels. Pending b/230510191.
req.UpdateMask = fieldMask
var battrs *BucketAttrs
err := run(ctx, func() error {
res, err := c.raw.UpdateBucket(ctx, req, s.gax...)
battrs = newBucketFromProto(res)
return err
}, s.retry, s.idempotent, setRetryHeaderGRPC(ctx))
return battrs, err
}
func (c *grpcStorageClient) LockBucketRetentionPolicy(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error {
return errMethodNotSupported
s := callSettings(c.settings, opts...)
req := &storagepb.LockBucketRetentionPolicyRequest{
Bucket: bucketResourceName(globalProjectAlias, bucket),
}
if err := applyBucketCondsProto("grpcStorageClient.LockBucketRetentionPolicy", conds, req); err != nil {
return err
}
return run(ctx, func() error {
_, err := c.raw.LockBucketRetentionPolicy(ctx, req, s.gax...)
return err
}, s.retry, s.idempotent, setRetryHeaderGRPC(ctx))
}
func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) (*ObjectIterator, error) {
return nil, errMethodNotSupported
func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) *ObjectIterator {
s := callSettings(c.settings, opts...)
it := &ObjectIterator{
ctx: ctx,
}
if q != nil {
it.query = *q
}
req := &storagepb.ListObjectsRequest{
Parent: bucketResourceName(globalProjectAlias, bucket),
Prefix: it.query.Prefix,
Delimiter: it.query.Delimiter,
Versions: it.query.Versions,
LexicographicStart: it.query.StartOffset,
LexicographicEnd: it.query.EndOffset,
// TODO(noahietz): Convert a projection to a FieldMask.
// ReadMask: q.Projection,
}
if s.userProject != "" {
ctx = setUserProjectMetadata(ctx, s.userProject)
}
gitr := c.raw.ListObjects(it.ctx, req, s.gax...)
fetch := func(pageSize int, pageToken string) (token string, err error) {
var objects []*storagepb.Object
err = run(it.ctx, func() error {
objects, token, err = gitr.InternalFetch(pageSize, pageToken)
return err
}, s.retry, s.idempotent, setRetryHeaderGRPC(ctx))
if err != nil {
if st, ok := status.FromError(err); ok && st.Code() == codes.NotFound {
err = ErrBucketNotExist
}
return "", err
}
for _, obj := range objects {
b := newObjectFromProto(obj)
it.items = append(it.items, b)
}
return token, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
fetch,
func() int { return len(it.items) },
func() interface{} { b := it.items; it.items = nil; return b })
return it
}
// Object metadata methods.
@ -223,10 +417,32 @@ func (c *grpcStorageClient) UpdateObject(ctx context.Context, bucket, object str
// Default Object ACL methods.
func (c *grpcStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error {
return errMethodNotSupported
// There is no separate API for PATCH in gRPC.
// Make a GET call first to retrieve BucketAttrs.
attrs, err := c.GetBucket(ctx, bucket, nil, opts...)
if err != nil {
return err
}
// Delete the entity and copy other remaining ACL entities.
var acl []ACLRule
for _, a := range attrs.DefaultObjectACL {
if a.Entity != entity {
acl = append(acl, a)
}
}
uattrs := &BucketAttrsToUpdate{defaultObjectACL: acl}
// Call UpdateBucket with a MetagenerationMatch precondition set.
if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil {
return err
}
return nil
}
func (c *grpcStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) {
return nil, errMethodNotSupported
attrs, err := c.GetBucket(ctx, bucket, nil, opts...)
if err != nil {
return nil, err
}
return attrs.DefaultObjectACL, nil
}
func (c *grpcStorageClient) UpdateDefaultObjectACL(ctx context.Context, opts ...storageOption) (*ACLRule, error) {
return nil, errMethodNotSupported
@ -235,13 +451,51 @@ func (c *grpcStorageClient) UpdateDefaultObjectACL(ctx context.Context, opts ...
// Bucket ACL methods.
func (c *grpcStorageClient) DeleteBucketACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error {
return errMethodNotSupported
// There is no separate API for PATCH in gRPC.
// Make a GET call first to retrieve BucketAttrs.
attrs, err := c.GetBucket(ctx, bucket, nil, opts...)
if err != nil {
return err
}
// Delete the entity and copy other remaining ACL entities.
var acl []ACLRule
for _, a := range attrs.ACL {
if a.Entity != entity {
acl = append(acl, a)
}
}
uattrs := &BucketAttrsToUpdate{acl: acl}
// Call UpdateBucket with a MetagenerationMatch precondition set.
if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil {
return err
}
return nil
}
func (c *grpcStorageClient) ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) {
return nil, errMethodNotSupported
attrs, err := c.GetBucket(ctx, bucket, nil, opts...)
if err != nil {
return nil, err
}
return attrs.ACL, nil
}
func (c *grpcStorageClient) UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error) {
return nil, errMethodNotSupported
// There is no separate API for PATCH in gRPC.
// Make a GET call first to retrieve BucketAttrs.
attrs, err := c.GetBucket(ctx, bucket, nil, opts...)
if err != nil {
return nil, err
}
var acl []ACLRule
aclRule := ACLRule{Entity: entity, Role: role}
acl = append(attrs.ACL, aclRule)
uattrs := &BucketAttrsToUpdate{acl: acl}
// Call UpdateBucket with a MetagenerationMatch precondition set.
_, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...)
if err != nil {
return nil, err
}
return &aclRule, err
}
// Object ACL methods.
@ -288,7 +542,7 @@ func (c *grpcStorageClient) GetIamPolicy(ctx context.Context, resource string, v
var err error
rp, err = c.raw.GetIamPolicy(ctx, req, s.gax...)
return err
}, s.retry, s.idempotent)
}, s.retry, s.idempotent, setRetryHeaderGRPC(ctx))
return rp, err
}
@ -305,7 +559,7 @@ func (c *grpcStorageClient) SetIamPolicy(ctx context.Context, resource string, p
return run(ctx, func() error {
_, err := c.raw.SetIamPolicy(ctx, req, s.gax...)
return err
}, s.retry, s.idempotent)
}, s.retry, s.idempotent, setRetryHeaderGRPC(ctx))
}
func (c *grpcStorageClient) TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) {
@ -320,7 +574,7 @@ func (c *grpcStorageClient) TestIamPermissions(ctx context.Context, resource str
var err error
res, err = c.raw.TestIamPermissions(ctx, req, s.gax...)
return err
}, s.retry, s.idempotent)
}, s.retry, s.idempotent, setRetryHeaderGRPC(ctx))
if err != nil {
return nil, err
}
@ -344,3 +598,12 @@ func (c *grpcStorageClient) CreateHMACKey(ctx context.Context, desc *hmacKeyDesc
func (c *grpcStorageClient) DeleteHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) error {
return errMethodNotSupported
}
// setUserProjectMetadata appends a project ID to the outgoing Context metadata
// via the x-goog-user-project system parameter defined at
// https://cloud.google.com/apis/docs/system-parameters. This is only for
// billing purposes, and is generally optional, except for requester-pays
// buckets.
func setUserProjectMetadata(ctx context.Context, project string) context.Context {
return metadata.AppendToOutgoingContext(ctx, "x-goog-user-project", project)
}

View file

@ -130,7 +130,7 @@ func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMAC
err = run(ctx, func() error {
metadata, err = call.Context(ctx).Do()
return err
}, hkh.retry, true)
}, hkh.retry, true, setRetryHeaderHTTP(call))
if err != nil {
return nil, err
}
@ -159,7 +159,7 @@ func (hkh *HMACKeyHandle) Delete(ctx context.Context, opts ...HMACKeyOption) err
return run(ctx, func() error {
return delCall.Context(ctx).Do()
}, hkh.retry, true)
}, hkh.retry, true, setRetryHeaderHTTP(delCall))
}
func pbHmacKeyToHMACKey(pb *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, error) {
@ -221,7 +221,7 @@ func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEma
h, err := call.Context(ctx).Do()
hkPb = h
return err
}, c.retry, false); err != nil {
}, c.retry, false, setRetryHeaderHTTP(call)); err != nil {
return nil, err
}
@ -267,7 +267,7 @@ func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opt
err = run(ctx, func() error {
metadata, err = call.Context(ctx).Do()
return err
}, h.retry, isIdempotent)
}, h.retry, isIdempotent, setRetryHeaderHTTP(call))
if err != nil {
return nil, err
@ -373,7 +373,7 @@ func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string,
err = run(it.ctx, func() error {
resp, err = call.Context(ctx).Do()
return err
}, it.retry, true)
}, it.retry, true, setRetryHeaderHTTP(call))
if err != nil {
return "", err
}

View file

@ -21,10 +21,12 @@ import (
"net/http"
"net/url"
"os"
"reflect"
"strings"
"golang.org/x/oauth2/google"
"google.golang.org/api/googleapi"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/option/internaloption"
raw "google.golang.org/api/storage/v1"
@ -136,7 +138,18 @@ func (c *httpStorageClient) Close() error {
// Top-level methods.
func (c *httpStorageClient) GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) {
return "", errMethodNotSupported
s := callSettings(c.settings, opts...)
call := c.raw.Projects.ServiceAccount.Get(project)
var res *raw.ServiceAccount
err := run(ctx, func() error {
var err error
res, err = call.Context(ctx).Do()
return err
}, s.retry, s.idempotent, setRetryHeaderHTTP(call))
if err != nil {
return "", err
}
return res.EmailAddress, nil
}
func (c *httpStorageClient) CreateBucket(ctx context.Context, project string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) {
@ -169,12 +182,50 @@ func (c *httpStorageClient) CreateBucket(ctx context.Context, project string, at
}
battrs, err = newBucket(b)
return err
}, s.retry, s.idempotent)
}, s.retry, s.idempotent, setRetryHeaderHTTP(req))
return battrs, err
}
func (c *httpStorageClient) ListBuckets(ctx context.Context, project string, opts ...storageOption) (*BucketIterator, error) {
return nil, errMethodNotSupported
func (c *httpStorageClient) ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator {
s := callSettings(c.settings, opts...)
it := &BucketIterator{
ctx: ctx,
projectID: project,
}
fetch := func(pageSize int, pageToken string) (token string, err error) {
req := c.raw.Buckets.List(it.projectID)
setClientHeader(req.Header())
req.Projection("full")
req.Prefix(it.Prefix)
req.PageToken(pageToken)
if pageSize > 0 {
req.MaxResults(int64(pageSize))
}
var resp *raw.Buckets
err = run(it.ctx, func() error {
resp, err = req.Context(it.ctx).Do()
return err
}, s.retry, s.idempotent, setRetryHeaderHTTP(req))
if err != nil {
return "", err
}
for _, item := range resp.Items {
b, err := newBucket(item)
if err != nil {
return "", err
}
it.buckets = append(it.buckets, b)
}
return resp.NextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
fetch,
func() int { return len(it.buckets) },
func() interface{} { b := it.buckets; it.buckets = nil; return b })
return it
}
// Bucket methods.
@ -190,7 +241,7 @@ func (c *httpStorageClient) DeleteBucket(ctx context.Context, bucket string, con
req.UserProject(s.userProject)
}
return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent)
return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req))
}
func (c *httpStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) {
@ -209,7 +260,7 @@ func (c *httpStorageClient) GetBucket(ctx context.Context, bucket string, conds
err = run(ctx, func() error {
resp, err = req.Context(ctx).Do()
return err
}, s.retry, s.idempotent)
}, s.retry, s.idempotent, setRetryHeaderHTTP(req))
var e *googleapi.Error
if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound {
@ -220,14 +271,109 @@ func (c *httpStorageClient) GetBucket(ctx context.Context, bucket string, conds
}
return newBucket(resp)
}
func (c *httpStorageClient) UpdateBucket(ctx context.Context, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) {
return nil, errMethodNotSupported
func (c *httpStorageClient) UpdateBucket(ctx context.Context, bucket string, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) {
s := callSettings(c.settings, opts...)
rb := uattrs.toRawBucket()
req := c.raw.Buckets.Patch(bucket, rb).Projection("full")
setClientHeader(req.Header())
err := applyBucketConds("httpStorageClient.UpdateBucket", conds, req)
if err != nil {
return nil, err
}
if s.userProject != "" {
req.UserProject(s.userProject)
}
if uattrs != nil && uattrs.PredefinedACL != "" {
req.PredefinedAcl(uattrs.PredefinedACL)
}
if uattrs != nil && uattrs.PredefinedDefaultObjectACL != "" {
req.PredefinedDefaultObjectAcl(uattrs.PredefinedDefaultObjectACL)
}
var rawBucket *raw.Bucket
err = run(ctx, func() error {
rawBucket, err = req.Context(ctx).Do()
return err
}, s.retry, s.idempotent, setRetryHeaderHTTP(req))
if err != nil {
return nil, err
}
return newBucket(rawBucket)
}
func (c *httpStorageClient) LockBucketRetentionPolicy(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error {
return errMethodNotSupported
s := callSettings(c.settings, opts...)
var metageneration int64
if conds != nil {
metageneration = conds.MetagenerationMatch
}
req := c.raw.Buckets.LockRetentionPolicy(bucket, metageneration)
return run(ctx, func() error {
_, err := req.Context(ctx).Do()
return err
}, s.retry, s.idempotent, setRetryHeaderHTTP(req))
}
func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) (*ObjectIterator, error) {
return nil, errMethodNotSupported
func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) *ObjectIterator {
s := callSettings(c.settings, opts...)
it := &ObjectIterator{
ctx: ctx,
}
if q != nil {
it.query = *q
}
fetch := func(pageSize int, pageToken string) (string, error) {
req := c.raw.Objects.List(bucket)
setClientHeader(req.Header())
projection := it.query.Projection
if projection == ProjectionDefault {
projection = ProjectionFull
}
req.Projection(projection.String())
req.Delimiter(it.query.Delimiter)
req.Prefix(it.query.Prefix)
req.StartOffset(it.query.StartOffset)
req.EndOffset(it.query.EndOffset)
req.Versions(it.query.Versions)
req.IncludeTrailingDelimiter(it.query.IncludeTrailingDelimiter)
if len(it.query.fieldSelection) > 0 {
req.Fields("nextPageToken", googleapi.Field(it.query.fieldSelection))
}
req.PageToken(pageToken)
if s.userProject != "" {
req.UserProject(s.userProject)
}
if pageSize > 0 {
req.MaxResults(int64(pageSize))
}
var resp *raw.Objects
var err error
err = run(it.ctx, func() error {
resp, err = req.Context(it.ctx).Do()
return err
}, s.retry, s.idempotent, setRetryHeaderHTTP(req))
if err != nil {
var e *googleapi.Error
if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound {
err = ErrBucketNotExist
}
return "", err
}
for _, item := range resp.Items {
it.items = append(it.items, newObject(item))
}
for _, prefix := range resp.Prefixes {
it.items = append(it.items, &ObjectAttrs{Prefix: prefix})
}
return resp.NextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
fetch,
func() int { return len(it.items) },
func() interface{} { b := it.items; it.items = nil; return b })
return it
}
// Object metadata methods.
@ -245,10 +391,26 @@ func (c *httpStorageClient) UpdateObject(ctx context.Context, bucket, object str
// Default Object ACL methods.
func (c *httpStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error {
return errMethodNotSupported
s := callSettings(c.settings, opts...)
req := c.raw.DefaultObjectAccessControls.Delete(bucket, string(entity))
configureACLCall(ctx, s.userProject, req)
return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req))
}
func (c *httpStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) {
return nil, errMethodNotSupported
s := callSettings(c.settings, opts...)
var acls *raw.ObjectAccessControls
var err error
req := c.raw.DefaultObjectAccessControls.List(bucket)
configureACLCall(ctx, s.userProject, req)
err = run(ctx, func() error {
acls, err = req.Do()
return err
}, s.retry, true, setRetryHeaderHTTP(req))
if err != nil {
return nil, err
}
return toObjectACLRules(acls.Items), nil
}
func (c *httpStorageClient) UpdateDefaultObjectACL(ctx context.Context, opts ...storageOption) (*ACLRule, error) {
return nil, errMethodNotSupported
@ -257,13 +419,59 @@ func (c *httpStorageClient) UpdateDefaultObjectACL(ctx context.Context, opts ...
// Bucket ACL methods.
func (c *httpStorageClient) DeleteBucketACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error {
return errMethodNotSupported
s := callSettings(c.settings, opts...)
req := c.raw.BucketAccessControls.Delete(bucket, string(entity))
configureACLCall(ctx, s.userProject, req)
return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req))
}
func (c *httpStorageClient) ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) {
return nil, errMethodNotSupported
s := callSettings(c.settings, opts...)
var acls *raw.BucketAccessControls
var err error
req := c.raw.BucketAccessControls.List(bucket)
configureACLCall(ctx, s.userProject, req)
err = run(ctx, func() error {
acls, err = req.Do()
return err
}, s.retry, true, setRetryHeaderHTTP(req))
if err != nil {
return nil, err
}
return toBucketACLRules(acls.Items), nil
}
func (c *httpStorageClient) UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error) {
return nil, errMethodNotSupported
s := callSettings(c.settings, opts...)
acl := &raw.BucketAccessControl{
Bucket: bucket,
Entity: string(entity),
Role: string(role),
}
req := c.raw.BucketAccessControls.Update(bucket, string(entity), acl)
configureACLCall(ctx, s.userProject, req)
var aclRule ACLRule
var err error
err = run(ctx, func() error {
acl, err = req.Do()
aclRule = toBucketACLRule(acl)
return err
}, s.retry, s.idempotent, setRetryHeaderHTTP(req))
if err != nil {
return nil, err
}
return &aclRule, nil
}
// configureACLCall sets the context, user project and headers on the apiary library call.
// This will panic if the call does not have the correct methods.
func configureACLCall(ctx context.Context, userProject string, call interface{ Header() http.Header }) {
vc := reflect.ValueOf(call)
vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)})
if userProject != "" {
vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(userProject)})
}
setClientHeader(call.Header())
}
// Object ACL methods.
@ -308,7 +516,7 @@ func (c *httpStorageClient) GetIamPolicy(ctx context.Context, resource string, v
var err error
rp, err = call.Context(ctx).Do()
return err
}, s.retry, s.idempotent)
}, s.retry, s.idempotent, setRetryHeaderHTTP(call))
if err != nil {
return nil, err
}
@ -328,7 +536,7 @@ func (c *httpStorageClient) SetIamPolicy(ctx context.Context, resource string, p
return run(ctx, func() error {
_, err := call.Context(ctx).Do()
return err
}, s.retry, s.idempotent)
}, s.retry, s.idempotent, setRetryHeaderHTTP(call))
}
func (c *httpStorageClient) TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) {
@ -343,7 +551,7 @@ func (c *httpStorageClient) TestIamPermissions(ctx context.Context, resource str
var err error
res, err = call.Context(ctx).Do()
return err
}, s.retry, s.idempotent)
}, s.retry, s.idempotent, setRetryHeaderHTTP(call))
if err != nil {
return nil, err
}

View file

@ -57,7 +57,7 @@ func (c *iamClient) GetWithVersion(ctx context.Context, resource string, request
err = run(ctx, func() error {
rp, err = call.Context(ctx).Do()
return err
}, c.retry, true)
}, c.retry, true, setRetryHeaderHTTP(call))
if err != nil {
return nil, err
}
@ -78,7 +78,7 @@ func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (
return run(ctx, func() error {
_, err := call.Context(ctx).Do()
return err
}, c.retry, isIdempotent)
}, c.retry, isIdempotent, setRetryHeaderHTTP(call))
}
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) {
@ -94,7 +94,7 @@ func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (
err = run(ctx, func() error {
res, err = call.Context(ctx).Do()
return err
}, c.retry, true)
}, c.retry, true, setRetryHeaderHTTP(call))
if err != nil {
return nil, err
}

View file

@ -15,4 +15,4 @@
package internal
// Version is the current tagged release of the library.
const Version = "1.22.0"
const Version = "1.22.1"

View file

@ -16,29 +16,39 @@ package storage
import (
"context"
"errors"
"fmt"
"io"
"net"
"net/http"
"net/url"
"strings"
"cloud.google.com/go/internal"
"cloud.google.com/go/internal/version"
sinternal "cloud.google.com/go/storage/internal"
"github.com/google/uuid"
gax "github.com/googleapis/gax-go/v2"
"golang.org/x/xerrors"
"google.golang.org/api/googleapi"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
var defaultRetry *retryConfig = &retryConfig{}
var xGoogDefaultHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), sinternal.Version)
// run determines whether a retry is necessary based on the config and
// idempotency information. It then calls the function with or without retries
// as appropriate, using the configured settings.
func run(ctx context.Context, call func() error, retry *retryConfig, isIdempotent bool) error {
func run(ctx context.Context, call func() error, retry *retryConfig, isIdempotent bool, setHeader func(string, int)) error {
attempts := 1
invocationID := uuid.New().String()
if retry == nil {
retry = defaultRetry
}
if (retry.policy == RetryIdempotent && !isIdempotent) || retry.policy == RetryNever {
setHeader(invocationID, attempts)
return call()
}
bo := gax.Backoff{}
@ -51,17 +61,39 @@ func run(ctx context.Context, call func() error, retry *retryConfig, isIdempoten
if retry.shouldRetry != nil {
errorFunc = retry.shouldRetry
}
return internal.Retry(ctx, bo, func() (stop bool, err error) {
setHeader(invocationID, attempts)
err = call()
attempts++
return !errorFunc(err), err
})
}
func setRetryHeaderHTTP(req interface{ Header() http.Header }) func(string, int) {
return func(invocationID string, attempts int) {
if req == nil {
return
}
header := req.Header()
invocationHeader := fmt.Sprintf("gccl-invocation-id/%v gccl-attempt-count/%v", invocationID, attempts)
xGoogHeader := strings.Join([]string{invocationHeader, xGoogDefaultHeader}, " ")
header.Set("x-goog-api-client", xGoogHeader)
}
}
// TODO: Implement method setting header via context for gRPC
func setRetryHeaderGRPC(_ context.Context) func(string, int) {
return func(_ string, _ int) {
return
}
}
func shouldRetry(err error) bool {
if err == nil {
return false
}
if xerrors.Is(err, io.ErrUnexpectedEOF) {
if errors.Is(err, io.ErrUnexpectedEOF) {
return true
}

View file

@ -142,7 +142,7 @@ func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (re
err = run(ctx, func() error {
rn, err = call.Context(ctx).Do()
return err
}, b.retry, false)
}, b.retry, false, setRetryHeaderHTTP(call))
if err != nil {
return nil, err
}
@ -164,7 +164,7 @@ func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notific
err = run(ctx, func() error {
res, err = call.Context(ctx).Do()
return err
}, b.retry, true)
}, b.retry, true, setRetryHeaderHTTP(call))
if err != nil {
return nil, err
}
@ -191,5 +191,5 @@ func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err e
}
return run(ctx, func() error {
return call.Context(ctx).Do()
}, b.retry, true)
}, b.retry, true, setRetryHeaderHTTP(call))
}

View file

@ -210,7 +210,7 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
gen = gen64
}
return nil
}, o.retry, true)
}, o.retry, true, setRetryHeaderHTTP(&readerRequestWrapper{req}))
if err != nil {
return nil, err
}
@ -356,6 +356,16 @@ func setConditionsHeaders(headers http.Header, conds *Conditions) error {
return nil
}
// Wrap a request to look similar to an apiary library request, in order to
// be used by run().
type readerRequestWrapper struct {
req *http.Request
}
func (w *readerRequestWrapper) Header() http.Header {
return w.req.Header
}
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
// Reader reads a Cloud Storage object.
@ -492,7 +502,7 @@ func (o *ObjectHandle) newRangeReaderWithGRPC(ctx context.Context, offset, lengt
msg, err = stream.Recv()
return err
}, o.retry, true)
}, o.retry, true, setRetryHeaderGRPC(ctx))
if err != nil {
// Close the stream context we just created to ensure we don't leak
// resources.

View file

@ -39,12 +39,10 @@ import (
"cloud.google.com/go/internal/optional"
"cloud.google.com/go/internal/trace"
"cloud.google.com/go/internal/version"
"cloud.google.com/go/storage/internal"
gapic "cloud.google.com/go/storage/internal/apiv2"
"github.com/googleapis/gax-go/v2"
"golang.org/x/oauth2/google"
"golang.org/x/xerrors"
"google.golang.org/api/googleapi"
"google.golang.org/api/option"
"google.golang.org/api/option/internaloption"
@ -88,10 +86,9 @@ const (
ScopeReadWrite = raw.DevstorageReadWriteScope
)
var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), internal.Version)
// TODO: remove this once header with invocation ID is applied to all methods.
func setClientHeader(headers http.Header) {
headers.Set("x-goog-api-client", xGoogHeader)
headers.Set("x-goog-api-client", xGoogDefaultHeader)
}
// Client is a client for interacting with Google Cloud Storage.
@ -122,6 +119,13 @@ type Client struct {
// Clients should be reused instead of created as needed. The methods of Client
// are safe for concurrent use by multiple goroutines.
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
// Use the experimental gRPC client if the env var is set.
// This is an experimental API and not intended for public use.
if withGRPC := os.Getenv("STORAGE_USE_GRPC"); withGRPC != "" {
return newGRPCClient(ctx, opts...)
}
var creds *google.Credentials
// In general, it is recommended to use raw.NewService instead of htransport.NewClient
@ -195,34 +199,18 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
}, nil
}
// hybridClientOptions carries the set of client options for HTTP and gRPC clients.
type hybridClientOptions struct {
HTTPOpts []option.ClientOption
GRPCOpts []option.ClientOption
}
// newHybridClient creates a new Storage client that initializes a gRPC-based client
// for media upload and download operations.
// newGRPCClient creates a new Storage client that initializes a gRPC-based
// client. Calls that have not been implemented in gRPC will panic.
//
// This is an experimental API and not intended for public use.
func newHybridClient(ctx context.Context, opts *hybridClientOptions) (*Client, error) {
if opts == nil {
opts = &hybridClientOptions{}
}
opts.GRPCOpts = append(defaultGRPCOptions(), opts.GRPCOpts...)
c, err := NewClient(ctx, opts.HTTPOpts...)
func newGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
opts = append(defaultGRPCOptions(), opts...)
g, err := gapic.NewClient(ctx, opts...)
if err != nil {
return nil, err
}
g, err := gapic.NewClient(ctx, opts.GRPCOpts...)
if err != nil {
return nil, err
}
c.gc = g
return c, nil
return &Client{gc: g}, nil
}
// Close closes the Client.
@ -926,9 +914,9 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error
}
var obj *raw.Object
setClientHeader(call.Header())
err = run(ctx, func() error { obj, err = call.Do(); return err }, o.retry, true)
err = run(ctx, func() error { obj, err = call.Do(); return err }, o.retry, true, setRetryHeaderHTTP(call))
var e *googleapi.Error
if ok := xerrors.As(err, &e); ok && e.Code == http.StatusNotFound {
if errors.As(err, &e) && e.Code == http.StatusNotFound {
return nil, ErrObjectNotExist
}
if err != nil {
@ -1031,9 +1019,9 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
if o.conds != nil && o.conds.MetagenerationMatch != 0 {
isIdempotent = true
}
err = run(ctx, func() error { obj, err = call.Do(); return err }, o.retry, isIdempotent)
err = run(ctx, func() error { obj, err = call.Do(); return err }, o.retry, isIdempotent, setRetryHeaderHTTP(call))
var e *googleapi.Error
if ok := xerrors.As(err, &e); ok && e.Code == http.StatusNotFound {
if errors.As(err, &e) && e.Code == http.StatusNotFound {
return nil, ErrObjectNotExist
}
if err != nil {
@ -1101,9 +1089,9 @@ func (o *ObjectHandle) Delete(ctx context.Context) error {
if (o.conds != nil && o.conds.GenerationMatch != 0) || o.gen >= 0 {
isIdempotent = true
}
err := run(ctx, func() error { return call.Do() }, o.retry, isIdempotent)
err := run(ctx, func() error { return call.Do() }, o.retry, isIdempotent, setRetryHeaderHTTP(call))
var e *googleapi.Error
if ok := xerrors.As(err, &e); ok && e.Code == http.StatusNotFound {
if errors.As(err, &e) && e.Code == http.StatusNotFound {
return ErrObjectNotExist
}
return err
@ -1129,6 +1117,9 @@ func (o *ObjectHandle) ReadCompressed(compressed bool) *ObjectHandle {
// attribute is specified, the content type will be automatically sniffed
// using net/http.DetectContentType.
//
// Note that each Writer allocates an internal buffer of size Writer.ChunkSize.
// See the ChunkSize docs for more information.
//
// It is the caller's responsibility to call Close when writing is done. To
// stop writing without saving the data, cancel the context.
func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer {
@ -2016,7 +2007,7 @@ func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string,
err = run(ctx, func() error {
res, err = r.Context(ctx).Do()
return err
}, c.retry, true)
}, c.retry, true, setRetryHeaderHTTP(r))
if err != nil {
return "", err
}

View file

@ -25,7 +25,6 @@ import (
"unicode/utf8"
"github.com/golang/protobuf/proto"
"golang.org/x/xerrors"
"google.golang.org/api/googleapi"
raw "google.golang.org/api/storage/v1"
storagepb "google.golang.org/genproto/googleapis/storage/v2"
@ -62,21 +61,26 @@ type Writer struct {
// ChunkSize controls the maximum number of bytes of the object that the
// Writer will attempt to send to the server in a single request. Objects
// smaller than the size will be sent in a single request, while larger
// objects will be split over multiple requests. The size will be rounded up
// to the nearest multiple of 256K.
// objects will be split over multiple requests. The value will be rounded up
// to the nearest multiple of 256K. The default ChunkSize is 16MiB.
//
// ChunkSize will default to a reasonable value. If you perform many
// concurrent writes of small objects (under ~8MB), you may wish set ChunkSize
// to a value that matches your objects' sizes to avoid consuming large
// amounts of memory. See
// Each Writer will internally allocate a buffer of size ChunkSize. This is
// used to buffer input data and allow for the input to be sent again if a
// request must be retried.
//
// If you upload small objects (< 16MiB), you should set ChunkSize
// to a value slightly larger than the objects' sizes to avoid memory bloat.
// This is especially important if you are uploading many small objects
// concurrently. See
// https://cloud.google.com/storage/docs/json_api/v1/how-tos/upload#size
// for more information about performance trade-offs related to ChunkSize.
//
// If ChunkSize is set to zero, chunking will be disabled and the object will
// be uploaded in a single request without the use of a buffer. This will
// further reduce memory used during uploads, but will also prevent the writer
// from retrying in case of a transient error from the server, since a buffer
// is required in order to retry the failed request.
// from retrying in case of a transient error from the server or resuming an
// upload that fails midway through, since the buffer is required in order to
// retry the failed request.
//
// ChunkSize must be set before the first Write call.
ChunkSize int
@ -261,7 +265,7 @@ func (w *Writer) Write(p []byte) (n int, err error) {
// Preserve existing functionality that when context is canceled, Write will return
// context.Canceled instead of "io: read/write on closed pipe". This hides the
// pipe implementation detail from users and makes Write seem as though it's an RPC.
if xerrors.Is(werr, context.Canceled) || xerrors.Is(werr, context.DeadlineExceeded) {
if errors.Is(werr, context.Canceled) || errors.Is(werr, context.DeadlineExceeded) {
return n, werr
}
}
@ -452,17 +456,12 @@ func (w *Writer) openGRPC() error {
//
// This is an experimental API and not intended for public use.
func (w *Writer) startResumableUpload() error {
var common *storagepb.CommonRequestParams
if w.o.userProject != "" {
common = &storagepb.CommonRequestParams{UserProject: w.o.userProject}
}
spec, err := w.writeObjectSpec()
if err != nil {
return err
}
upres, err := w.o.c.gc.StartResumableWrite(w.ctx, &storagepb.StartResumableWriteRequest{
WriteObjectSpec: spec,
CommonRequestParams: common,
WriteObjectSpec: spec,
})
w.upid = upres.GetUploadId()

19
vendor/github.com/antzucaro/matchr/COPYING.txt generated vendored Normal file
View file

@ -0,0 +1,19 @@
Matchr: an approximate string matching library for the Go programming language
Copyright (C) 2013-2014 Ant Zucaro
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
You can contact Ant Zucaro at azucaro at gmail dot com.

12
vendor/github.com/antzucaro/matchr/README.md generated vendored Normal file
View file

@ -0,0 +1,12 @@
# matchr
An approximate string matching library for the [Go programming language](http://www.golang.org).
## Rationale
Data used in record linkage can often be of dubious quality. Typographical
errors or changing data elements (to name a few things) make establishing similarity between two sets of data
difficult. Rather than use exact string comparison in such situations, it is
vital to have a means to identify how similar two strings are. Similarity functions can cater
to certain data sets in order to make better matching decisions. The matchr library provides
several of these similarity functions.

View file

@ -0,0 +1,112 @@
package matchr
// DamerauLevenshtein computes the Damerau-Levenshtein distance between two
// strings. The returned value - distance - is the number of insertions,
// deletions, substitutions, and transpositions it takes to transform one
// string (s1) into another (s2). Each step in the transformation "costs"
// one distance point. It is similar to the Optimal String Alignment,
// algorithm, but is more complex because it allows multiple edits on
// substrings.
//
// This implementation is based off of the one found on Wikipedia at
// http://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance#Distance_with_adjacent_transpositions
// as well as KevinStern's Java implementation found at
// https://github.com/KevinStern/software-and-algorithms.
func DamerauLevenshtein(s1 string, s2 string) (distance int) {
// index by code point, not byte
r1 := []rune(s1)
r2 := []rune(s2)
// the maximum possible distance
inf := len(r1) + len(r2)
// if one string is blank, we needs insertions
// for all characters in the other one
if len(r1) == 0 {
return len(r2)
}
if len(r2) == 0 {
return len(r1)
}
// construct the edit-tracking matrix
matrix := make([][]int, len(r1))
for i := range matrix {
matrix[i] = make([]int, len(r2))
}
// seen characters
seenRunes := make(map[rune]int)
if r1[0] != r2[0] {
matrix[0][0] = 1
}
seenRunes[r1[0]] = 0
for i := 1; i < len(r1); i++ {
deleteDist := matrix[i-1][0] + 1
insertDist := (i+1)*1 + 1
var matchDist int
if r1[i] == r2[0] {
matchDist = i
} else {
matchDist = i + 1
}
matrix[i][0] = min(min(deleteDist, insertDist), matchDist)
}
for j := 1; j < len(r2); j++ {
deleteDist := (j + 1) * 2
insertDist := matrix[0][j-1] + 1
var matchDist int
if r1[0] == r2[j] {
matchDist = j
} else {
matchDist = j + 1
}
matrix[0][j] = min(min(deleteDist, insertDist), matchDist)
}
for i := 1; i < len(r1); i++ {
var maxSrcMatchIndex int
if r1[i] == r2[0] {
maxSrcMatchIndex = 0
} else {
maxSrcMatchIndex = -1
}
for j := 1; j < len(r2); j++ {
swapIndex, ok := seenRunes[r2[j]]
jSwap := maxSrcMatchIndex
deleteDist := matrix[i-1][j] + 1
insertDist := matrix[i][j-1] + 1
matchDist := matrix[i-1][j-1]
if r1[i] != r2[j] {
matchDist += 1
} else {
maxSrcMatchIndex = j
}
// for transpositions
var swapDist int
if ok && jSwap != -1 {
iSwap := swapIndex
var preSwapCost int
if iSwap == 0 && jSwap == 0 {
preSwapCost = 0
} else {
preSwapCost = matrix[maxI(0, iSwap-1)][maxI(0, jSwap-1)]
}
swapDist = i + j + preSwapCost - iSwap - jSwap - 1
} else {
swapDist = inf
}
matrix[i][j] = min(min(min(deleteDist, insertDist), matchDist), swapDist)
}
seenRunes[r1[i]] = i
}
return matrix[len(r1)-1][len(r2)-1]
}

Binary file not shown.

25
vendor/github.com/antzucaro/matchr/hamming.go generated vendored Normal file
View file

@ -0,0 +1,25 @@
package matchr
import "errors"
// Hamming computes the Hamming distance between two equal-length strings.
// This is the number of times the two strings differ between characters at
// the same index. This implementation is based off of the algorithm
// description found at http://en.wikipedia.org/wiki/Hamming_distance.
func Hamming(s1 string, s2 string) (distance int, err error) {
// index by code point, not byte
r1 := []rune(s1)
r2 := []rune(s2)
if len(r1) != len(r2) {
err = errors.New("Hamming distance of different sized strings.")
return
}
for i, v := range r1 {
if r2[i] != v {
distance += 1
}
}
return
}

135
vendor/github.com/antzucaro/matchr/jarowinkler.go generated vendored Normal file
View file

@ -0,0 +1,135 @@
package matchr
func jaroWinklerBase(s1 string, s2 string,
longTolerance bool, winklerize bool) (distance float64) {
// index by code point, not byte
r1 := []rune(s1)
r2 := []rune(s2)
r1Length := len(r1)
r2Length := len(r2)
if r1Length == 0 || r2Length == 0 {
return
}
minLength := 0
if r1Length > r2Length {
minLength = r1Length
} else {
minLength = r2Length
}
searchRange := minLength
searchRange = (searchRange / 2) - 1
if searchRange < 0 {
searchRange = 0
}
var lowLim, hiLim, transCount, commonChars int
var i, j, k int
r1Flag := make([]bool, r1Length+1)
r2Flag := make([]bool, r2Length+1)
// find the common chars within the acceptable range
commonChars = 0
for i, _ = range r1 {
if i >= searchRange {
lowLim = i - searchRange
} else {
lowLim = 0
}
if (i + searchRange) <= (r2Length - 1) {
hiLim = i + searchRange
} else {
hiLim = r2Length - 1
}
for j := lowLim; j <= hiLim; j++ {
if !r2Flag[j] && r2[j] == r1[i] {
r2Flag[j] = true
r1Flag[i] = true
commonChars++
break
}
}
}
// if we have nothing in common at this point, nothing else can be done
if commonChars == 0 {
return
}
// otherwise we count the transpositions
k = 0
transCount = 0
for i, _ := range r1 {
if r1Flag[i] {
for j = k; j < r2Length; j++ {
if r2Flag[j] {
k = j + 1
break
}
}
if r1[i] != r2[j] {
transCount++
}
}
}
transCount /= 2
// adjust for similarities in nonmatched characters
distance = float64(commonChars)/float64(r1Length) +
float64(commonChars)/float64(r2Length) +
(float64(commonChars-transCount))/float64(commonChars)
distance /= 3.0
// give more weight to already-similar strings
if winklerize && distance > 0.7 {
// the first 4 characters in common
if minLength >= 4 {
j = 4
} else {
j = minLength
}
for i = 0; i < j && len(r1) > i && len(r2) > i && r1[i] == r2[i] && nan(r1[i]); i++ {
}
if i > 0 {
distance += float64(i) * 0.1 * (1.0 - distance)
}
if longTolerance && (minLength > 4) && (commonChars > i+1) &&
(2*commonChars >= minLength+i) {
if nan(r1[0]) {
distance += (1.0 - distance) * (float64(commonChars-i-1) /
(float64(r1Length) + float64(r2Length) - float64(i*2) + 2))
}
}
}
return
}
// Jaro computes the Jaro edit distance between two strings. It represents
// this with a float64 between 0 and 1 inclusive, with 0 indicating the two
// strings are not at all similar and 1 indicating the two strings are exact
// matches.
//
// See http://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance for a
// full description.
func Jaro(r1 string, r2 string) (distance float64) {
return jaroWinklerBase(r1, r2, false, false)
}
// JaroWinkler computes the Jaro-Winkler edit distance between two strings.
// This is a modification of the Jaro algorithm that gives additional weight
// to prefix matches.
func JaroWinkler(r1 string, r2 string, longTolerance bool) (distance float64) {
return jaroWinklerBase(r1, r2, longTolerance, true)
}

48
vendor/github.com/antzucaro/matchr/levenshtein.go generated vendored Normal file
View file

@ -0,0 +1,48 @@
package matchr
// Levenshtein computes the Levenshtein distance between two
// strings. The returned value - distance - is the number of insertions,
// deletions, and substitutions it takes to transform one
// string (s1) into another (s2). Each step in the transformation "costs"
// one distance point.
func Levenshtein(s1 string, s2 string) (distance int) {
// index by code point, not byte
r1 := []rune(s1)
r2 := []rune(s2)
rows := len(r1) + 1
cols := len(r2) + 1
var d1 int
var d2 int
var d3 int
var i int
var j int
dist := make([]int, rows*cols)
for i = 0; i < rows; i++ {
dist[i*cols] = i
}
for j = 0; j < cols; j++ {
dist[j] = j
}
for j = 1; j < cols; j++ {
for i = 1; i < rows; i++ {
if r1[i-1] == r2[j-1] {
dist[(i*cols)+j] = dist[((i-1)*cols)+(j-1)]
} else {
d1 = dist[((i-1)*cols)+j] + 1
d2 = dist[(i*cols)+(j-1)] + 1
d3 = dist[((i-1)*cols)+(j-1)] + 1
dist[(i*cols)+j] = min(d1, min(d2, d3))
}
}
}
distance = dist[(cols*rows)-1]
return
}

View file

@ -0,0 +1,30 @@
package matchr
// LongestCommonSubsequence computes the longest substring
// between two strings. The returned value is the length
// of the substring, which contains letters from both
// strings, while maintaining the order of the letters.
func LongestCommonSubsequence(s1, s2 string) int {
r1 := []rune(s1)
r2 := []rune(s2)
table := make([][]int, len(s1)+1)
// Construct 2D table
for i := range table {
table[i] = make([]int, len(s2)+1)
}
var i int
var j int
for i = len(r1) - 1; i >= 0; i-- {
for j = len(r2) - 1; j >= 0; j-- {
if r1[i] == r2[j] {
table[i][j] = 1 + table[i+1][j+1]
} else {
table[i][j] = maxI(table[i+1][j], table[i][j+1])
}
}
}
return table[0][0]
}

721
vendor/github.com/antzucaro/matchr/metaphone.go generated vendored Normal file
View file

@ -0,0 +1,721 @@
package matchr
import (
"bytes"
"strings"
)
type metaphoneresult struct {
// the maximum number of code values to calculate
maxLength int
// whether to calculate an alternate
calcAlternate bool
// no direct modifications - only through add()
primary bytes.Buffer
alternate bytes.Buffer
// length of the private buffers
PrimaryLength int
AlternateLength int
}
func newMetaphoneresult(maxLength int, calcAlternate bool) (r *metaphoneresult) {
r = &metaphoneresult{maxLength: maxLength, calcAlternate: calcAlternate}
return
}
func (r *metaphoneresult) add(c1 string, c2 string) {
if c1 != "" {
r.primary.WriteString(c1)
r.PrimaryLength += len(c1)
}
if c2 != "" && r.calcAlternate {
r.alternate.WriteString(c2)
r.AlternateLength += len(c2)
}
}
func (r *metaphoneresult) isComplete() bool {
return r.PrimaryLength >= r.maxLength && r.AlternateLength >= r.maxLength
}
func (r *metaphoneresult) result() (primary string, alternate string) {
primary = r.primary.String()
if len(primary) > r.maxLength {
primary = primary[0:r.maxLength]
}
alternate = r.alternate.String()
if len(alternate) > r.maxLength {
alternate = alternate[0:r.maxLength]
}
return
}
// utility functions for checking things within a string
func isSlavoGermanic(value string) bool {
return strings.Contains(value, "W") || strings.Contains(value, "K") ||
strings.Contains(value, "CZ") || strings.Contains(value, "WITZ")
}
func isSilentStart(input runestring) bool {
SILENT_START := [...]string{"GN", "KN", "PN", "WR", "PS"}
prefix := input.SafeSubstr(0, 2)
for _, criteria := range SILENT_START {
if prefix == criteria {
return true
}
}
return false
}
func handleVowel(result *metaphoneresult, index int) int {
if index == 0 {
result.add("A", "A")
}
return index + 1
}
/******************************************************************************
* Entry handlers for letters.
*****************************************************************************/
func handleC(input runestring, result *metaphoneresult, index int) int {
if conditionC0(input, index) {
result.add("K", "K")
index += 2
} else if index == 0 && input.Contains(index, 6, "CAESAR") {
result.add("S", "S")
index += 2
} else if input.Contains(index, 2, "CH") {
index = handleCH(input, result, index)
} else if input.Contains(index, 2, "CZ") &&
!input.Contains(index-2, 4, "WICZ") {
result.add("S", "X")
index += 2
} else if input.Contains(index+1, 3, "CIA") {
result.add("X", "X")
index += 3
} else if input.Contains(index, 2, "CC") &&
!(index == 1 && input.SafeAt(0) == 'M') {
return handleCC(input, result, index)
} else if input.Contains(index, 2, "CK") ||
input.Contains(index, 2, "CG") ||
input.Contains(index, 2, "CQ") {
result.add("K", "K")
index += 2
} else if input.Contains(index, 2, "CI") ||
input.Contains(index, 2, "CE") ||
input.Contains(index, 2, "CY") {
if input.Contains(index, 3, "CIO") ||
input.Contains(index, 3, "CIE") ||
input.Contains(index, 3, "CIA") {
result.add("S", "X")
} else {
result.add("S", "S")
}
index += 2
} else {
result.add("K", "K")
if input.Contains(index+1, 2, " C") ||
input.Contains(index+1, 2, " Q") ||
input.Contains(index+1, 2, " G") {
index += 3
} else if (input.Contains(index+1, 1, "C") ||
input.Contains(index+1, 1, "K") ||
input.Contains(index+1, 1, "Q")) &&
!(input.Contains(index+1, 2, "CE") ||
input.Contains(index+1, 2, "CI")) {
index += 2
} else {
index++
}
}
return index
}
func handleCC(input runestring, result *metaphoneresult, index int) int {
if input.Contains(index+2, 1, "I", "E", "H") &&
!input.Contains(index+2, 2, "HU") {
if (index == 1 && input.SafeAt(index-1) == 'A') ||
(input.Contains(index-1, 5, "UCCEE", "UCCES")) {
result.add("KS", "KS")
} else {
result.add("X", "X")
}
index += 3
} else {
result.add("K", "K")
index += 2
}
return index
}
func handleCH(input runestring, result *metaphoneresult, index int) int {
if index > 0 && input.Contains(index, 4, "CHAE") {
result.add("K", "X")
return index + 2
} else if conditionCH0(input, index) {
result.add("K", "K")
return index + 2
// TODO: combine this condition with the one above?
} else if conditionCH1(input, index) {
result.add("K", "K")
return index + 2
} else {
if index > 0 {
if input.Contains(0, 2, "MC") {
result.add("K", "K")
} else {
result.add("X", "K")
}
} else {
result.add("X", "X")
}
return index + 2
}
}
func handleD(input runestring, result *metaphoneresult, index int) int {
if input.Contains(index, 2, "DG") {
if input.Contains(index+2, 1, "I", "E", "Y") {
result.add("J", "J")
index += 3
} else {
result.add("TK", "TK")
index += 2
}
} else if input.Contains(index, 2, "DT", "DD") {
result.add("T", "T")
index += 2
} else {
result.add("T", "T")
index++
}
return index
}
func handleG(input runestring, result *metaphoneresult, index int, slavoGermanic bool) int {
if input.SafeAt(index+1) == 'H' {
index = handleGH(input, result, index)
} else if input.SafeAt(index+1) == 'N' {
if index == 1 && isVowel(input.SafeAt(0)) && !slavoGermanic {
result.add("KN", "N")
} else if !input.Contains(index+2, 2, "EY") && input.SafeAt(index+1) != 'Y' && !slavoGermanic {
result.add("N", "KN")
} else {
result.add("KN", "KN")
}
index += 2
} else if input.Contains(index+1, 2, "LI") && !slavoGermanic {
result.add("KL", "L")
index += 2
} else if index == 0 && (input.SafeAt(index+1) == 'Y' ||
input.Contains(index+1, 2, "ES", "EP", "EB", "EL", "EY", "IB", "IL", "IN", "IE", "EI", "ER")) {
result.add("K", "J")
index += 2
} else if (input.Contains(index+1, 2, "ER") ||
input.SafeAt(index+1) == 'Y') &&
!input.Contains(0, 6, "DANGER", "RANGER", "MANGER") &&
!input.Contains(index-1, 1, "E", "I") &&
!input.Contains(index-1, 3, "RGY", "OGY") {
result.add("K", "J")
index += 2
} else if input.Contains(index+1, 1, "E", "I", "Y") ||
input.Contains(index-1, 4, "AGGI", "OGGI") {
if input.Contains(0, 4, "VAN ", "VON ") ||
input.Contains(0, 3, "SCH") ||
input.Contains(index+1, 2, "ET") {
result.add("K", "K")
} else if input.Contains(index+1, 3, "IER") {
result.add("J", "J")
} else {
result.add("J", "K")
}
index += 2
} else if input.SafeAt(index+1) == 'G' {
result.add("K", "K")
index += 2
} else {
result.add("K", "K")
index++
}
return index
}
func handleGH(input runestring, result *metaphoneresult, index int) int {
if index > 0 && !isVowel(input.SafeAt(index-1)) {
result.add("K", "K")
index += 2
} else if index == 0 {
if input.SafeAt(index+2) == 'I' {
result.add("J", "J")
} else {
result.add("K", "K")
}
index += 2
} else if (index > 1 && input.Contains(index-2, 1, "B", "H", "D")) ||
(index > 2 && input.Contains(index-3, 1, "B", "H", "D")) ||
(index > 3 && input.Contains(index-4, 1, "B", "H")) {
index += 2
} else {
if index > 2 && input.SafeAt(index-1) == 'U' &&
input.Contains(index-3, 1, "C", "G", "L", "R", "T") {
result.add("F", "F")
} else if index > 0 && input.SafeAt(index-1) != 'I' {
result.add("K", "K")
}
index += 2
}
return index
}
func handleH(input runestring, result *metaphoneresult, index int) int {
if (index == 0 || isVowel(input.SafeAt(index-1))) &&
isVowel(input.SafeAt(index+1)) {
result.add("H", "H")
index += 2
} else {
index++
}
return index
}
func handleJ(input runestring, result *metaphoneresult, index int, slavoGermanic bool) int {
if input.Contains(index, 4, "JOSE") || input.Contains(0, 4, "SAN ") {
if (index == 0 && (input.SafeAt(index+4) == ' ') ||
len(input) == 4) || input.Contains(0, 4, "SAN ") {
result.add("H", "H")
} else {
result.add("J", "H")
}
index++
} else {
if index == 0 && !input.Contains(index, 4, "JOSE") {
result.add("J", "A")
} else if isVowel(input.SafeAt(index-1)) && !slavoGermanic &&
(input.SafeAt(index+1) == 'A' || input.SafeAt(index+1) == 'O') {
result.add("J", "H")
} else if index == (len(input) - 1) {
result.add("J", " ")
} else if !input.Contains(index+1, 1,
"L", "T", "K", "S", "N", "M", "B", "Z") &&
!input.Contains(index-1, 1, "S", "K", "L") {
result.add("J", "J")
}
if input.SafeAt(index+1) == 'J' {
index += 2
} else {
index++
}
}
return index
}
func handleL(input runestring, result *metaphoneresult, index int) int {
if input.SafeAt(index+1) == 'L' {
if conditionL0(input, index) {
result.add("L", "")
} else {
result.add("L", "L")
}
index += 2
} else {
result.add("L", "L")
index++
}
return index
}
func handleP(input runestring, result *metaphoneresult, index int) int {
if input.SafeAt(index+1) == 'H' {
result.add("F", "F")
index += 2
} else {
result.add("P", "P")
if input.Contains(index+1, 1, "P", "B") {
index += 2
} else {
index++
}
}
return index
}
func handleR(input runestring, result *metaphoneresult, index int, slavoGermanic bool) int {
if index == (len(input)-1) && !slavoGermanic &&
input.Contains(index-2, 2, "IE") &&
!input.Contains(index-4, 2, "ME", "MA") {
result.add("", "R")
} else {
result.add("R", "R")
}
if input.SafeAt(index+1) == 'R' {
index += 2
} else {
index++
}
return index
}
func handleS(input runestring, result *metaphoneresult, index int, slavoGermanic bool) int {
if input.Contains(index-1, 3, "ISL", "YSL") {
index++
} else if index == 0 && input.Contains(index, 5, "SUGAR") {
result.add("X", "S")
index++
} else if input.Contains(index, 2, "SH") {
if input.Contains(index+1, 4, "HEIM", "HOEK", "HOLM", "HOLZ") {
result.add("S", "S")
} else {
result.add("X", "X")
}
index += 2
} else if input.Contains(index, 3, "SIO", "SIA") ||
input.Contains(index, 4, "SIAN") {
if slavoGermanic {
result.add("S", "S")
} else {
result.add("S", "X")
}
index += 3
} else if (index == 0 && input.Contains(index+1, 1, "M", "N", "L", "W")) ||
input.Contains(index+1, 1, "Z") {
result.add("S", "X")
if input.Contains(index+1, 1, "Z") {
index += 2
} else {
index++
}
} else if input.Contains(index, 2, "SC") {
index = handleSC(input, result, index)
} else {
if index == len(input)-1 &&
input.Contains(index-2, 2, "AI", "OI") {
result.add("", "S")
} else {
result.add("S", "S")
}
if input.Contains(index+1, 1, "S", "Z") {
index += 2
} else {
index++
}
}
return index
}
func handleSC(input runestring, result *metaphoneresult, index int) int {
if input.SafeAt(index+2) == 'H' {
if input.Contains(index+3, 2, "OO", "ER", "EN", "UY", "ED", "EM") {
if input.Contains(index+3, 2, "ER", "EN") {
result.add("X", "SK")
} else {
result.add("SK", "SK")
}
} else {
if index == 0 && !isVowel(input.SafeAt(3)) && input.SafeAt(3) != 'W' {
result.add("X", "S")
} else {
result.add("X", "X")
}
}
} else if input.Contains(index+2, 1, "I", "E", "Y") {
result.add("S", "S")
} else {
result.add("SK", "SK")
}
index += 3
return index
}
func handleT(input runestring, result *metaphoneresult, index int) int {
if input.Contains(index, 4, "TION") {
result.add("X", "X")
index += 3
} else if input.Contains(index, 3, "TIA", "TCH") {
result.add("X", "X")
index += 3
} else if input.Contains(index, 2, "TH") || input.Contains(index, 3, "TTH") {
if input.Contains(index+2, 2, "OM", "AM") ||
input.Contains(0, 4, "VAN ", "VON ") ||
input.Contains(0, 3, "SCH") {
result.add("T", "T")
} else {
result.add("0", "T")
}
index += 2
} else {
result.add("T", "T")
if input.Contains(index+1, 1, "T", "D") {
index += 2
} else {
index++
}
}
return index
}
func handleW(input runestring, result *metaphoneresult, index int) int {
if input.Contains(index, 2, "WR") {
result.add("R", "R")
index += 2
} else {
if index == 0 && (isVowel(input.SafeAt(index+1)) ||
input.Contains(index, 2, "WH")) {
if isVowel(input.SafeAt(index + 1)) {
result.add("A", "F")
} else {
result.add("A", "A")
}
index++
} else if (index == len(input)-1 && isVowel(input.SafeAt(index-1))) ||
input.Contains(index-1, 5, "EWSKI", "EWSKY", "OWSKI", "OWSKY") ||
input.Contains(0, 3, "SCH") {
result.add("", "F")
index++
} else if input.Contains(index, 4, "WICZ", "WITZ") {
result.add("TS", "FX")
index += 4
} else {
index++
}
}
return index
}
func handleX(input runestring, result *metaphoneresult, index int) int {
if index == 0 {
result.add("S", "S")
index++
} else {
if !((index == len(input)-1) &&
(input.Contains(index-3, 3, "IAU", "EAU") ||
input.Contains(index-2, 2, "AU", "OU"))) {
result.add("KS", "KS")
}
if input.Contains(index+1, 1, "C", "X") {
index += 2
} else {
index++
}
}
return index
}
func handleZ(input runestring, result *metaphoneresult, index int, slavoGermanic bool) int {
if input.SafeAt(index+1) == 'H' {
result.add("J", "J")
} else {
if input.Contains(index+1, 2, "ZO", "ZI", "ZA") ||
(slavoGermanic && (index > 0 && input.SafeAt(index-1) != 'T')) {
result.add("S", "TS")
} else {
result.add("S", "S")
}
}
if input.SafeAt(index+1) == 'Z' {
index += 2
} else {
index++
}
return index
}
/******************************************************************************
* Complex conditional handlers for letters
*****************************************************************************/
func conditionC0(input runestring, index int) bool {
if input.Contains(index, 4, "CHIA") {
return true
} else if index <= 1 {
return false
} else if isVowel(input.SafeAt(index - 2)) {
return false
} else if !input.Contains(index-1, 3, "ACH") {
return false
} else {
c := input.SafeAt(index + 2)
return (c != 'I' && c != 'E') ||
(input.Contains(index-2, 6, "BACHER") ||
input.Contains(index-2, 6, "MACHER"))
}
}
func conditionCH0(input runestring, index int) bool {
if index != 0 {
return false
} else if !input.Contains(index+1, 5, "HARAC", "HARIS") &&
!input.Contains(index+1, 3, "HOR", "HYM", "HIA", "HEM") {
return false
} else if input.Contains(0, 5, "CHORE") {
return false
} else {
return true
}
}
func conditionCH1(input runestring, index int) bool {
// good god this is ugly
return (input.Contains(0, 4, "VAN ", "VON ") || input.Contains(0, 3, "SCH")) ||
input.Contains(index-2, 6, "ORCHES", "ARCHIT", "ORCHID") ||
input.Contains(index+2, 1, "T", "S") ||
((input.Contains(index-1, 1, "A", "O", "U", "E") || index == 0) &&
(input.Contains(index+2, 1, "L", "R", "N", "M", "B", "H", "F", "V", "W", " ") ||
index+1 == len(input)-1))
}
func conditionL0(input runestring, index int) bool {
if index == (len(input)-3) &&
input.Contains(index-1, 4, "ILLO", "ILLA", "ALLE") {
return true
} else if (input.Contains(len(input)-2, 2, "AS", "OS") ||
input.Contains(len(input)-1, 1, "A", "O")) &&
(input.Contains(index-1, 4, "ALLE")) {
return true
} else {
return false
}
}
func conditionM0(input runestring, index int) bool {
if input.SafeAt(index+1) == 'M' {
return true
}
return input.Contains(index-1, 3, "UMB") &&
((index+1) == (len(input)-1) ||
input.Contains(index+2, 2, "ER"))
}
// DoubleMetaphone computes the Double-Metaphone value of the input string.
// This value is a phonetic representation of how the string sounds, with
// affordances for many different language dialects. It was originally
// developed by Lawrence Phillips in the 1990s.
//
// More information about this algorithm can be found on Wikipedia at
// http://en.wikipedia.org/wiki/Metaphone.
func DoubleMetaphone(s1 string) (string, string) {
// trim, upper space
s1 = cleanInput(s1)
// structure to traverse the string by code point, not byte
input := runestring(s1)
slavoGermanic := isSlavoGermanic(s1)
// where we are in the string
index := 0
if isSilentStart(input) {
index += 1
}
result := newMetaphoneresult(4, true)
for !result.isComplete() && index <= len(input)-1 {
c := rune(input.SafeAt(index))
switch c {
case 'A', 'E', 'I', 'O', 'U', 'Y':
index = handleVowel(result, index)
case 'B':
result.add("P", "P")
if input.SafeAt(index+1) == 'B' {
index += 2
} else {
index++
}
case 'Ç':
result.add("S", "S")
index++
case 'C':
index = handleC(input, result, index)
case 'D':
index = handleD(input, result, index)
case 'F':
result.add("F", "F")
if input.SafeAt(index+1) == 'F' {
index += 2
} else {
index++
}
case 'G':
index = handleG(input, result, index, slavoGermanic)
case 'H':
index = handleH(input, result, index)
case 'J':
index = handleJ(input, result, index, slavoGermanic)
case 'K':
result.add("K", "K")
if input.SafeAt(index+1) == 'K' {
index += 2
} else {
index++
}
case 'L':
index = handleL(input, result, index)
case 'M':
result.add("M", "M")
if conditionM0(input, index) {
index += 2
} else {
index++
}
case 'N':
result.add("N", "N")
if input.SafeAt(index+1) == 'N' {
index += 2
} else {
index++
}
case 'Ñ':
result.add("N", "N")
index++
case 'P':
index = handleP(input, result, index)
case 'Q':
result.add("K", "K")
if input.SafeAt(index+1) == 'Q' {
index += 2
} else {
index++
}
case 'R':
index = handleR(input, result, index, slavoGermanic)
case 'S':
index = handleS(input, result, index, slavoGermanic)
case 'T':
index = handleT(input, result, index)
case 'V':
result.add("F", "F")
if input.SafeAt(index+1) == 'V' {
index += 2
} else {
index++
}
case 'W':
index = handleW(input, result, index)
case 'X':
index = handleX(input, result, index)
case 'Z':
index = handleZ(input, result, index, slavoGermanic)
default:
index++
}
}
return result.result()
}

156
vendor/github.com/antzucaro/matchr/nysiis.go generated vendored Normal file
View file

@ -0,0 +1,156 @@
package matchr
// NYSIIS computes the NYSIIS phonetic encoding of the input string. It is a
// modification of the traditional Soundex algorithm.
func NYSIIS(s1 string) string {
cleans1 := runestring(cleanInput(s1))
input := runestring(make([]rune, 0, len(s1)))
// The output can't be larger than the string itself
output := runestring(make([]rune, 0, len(s1)))
// 0. Remove all non-ASCII characters
for _, v := range cleans1 {
if v >= 65 && v <= 90 {
input = append(input, v)
}
}
if len(input) == 0 {
return ""
}
// 1. Transcoding first characters
switch input[0] {
case 'M':
if input.SafeSubstr(0, 3) == "MAC" {
// MAC -> MCC
input[1] = 'C'
}
case 'K':
if input.SafeSubstr(0, 2) == "KN" {
// KN -> NN
input[0] = 'N'
} else {
// K -> C
input[0] = 'C'
}
case 'P':
next := input.SafeAt(1)
if next == 'H' {
// PH -> FF
input[0] = 'F'
input[1] = 'F'
} else if next == 'F' {
// PF -> FF
input[0] = 'F'
}
case 'S':
if input.SafeSubstr(0, 3) == "SCH" {
input[1] = 'S'
input[2] = 'S'
}
}
// 2. Transcoding last characters
switch input.SafeSubstr(len(input)-2, 2) {
case "EE", "IE":
// EE, IE -> Y
input.Del(len(input) - 2)
input[len(input)-1] = 'Y'
case "DT", "RT", "RD", "NT", "ND":
// DT, RT, RD, NT, ND -> D
input.Del(len(input) - 2)
input[len(input)-1] = 'D'
}
// 3. First character of key = first character of name
output = append(output, input[0])
last := input[0]
for i := 1; i < len(input); i++ {
c := input[i]
switch c {
case 'A', 'I', 'O', 'U':
// A, E, I, O, U -> A (E is separate)
input[i] = 'A'
case 'E':
// EV -> AF, else A
if input.SafeAt(i+1) == 'V' {
input[i+1] = 'F'
}
input[i] = 'A'
case 'Q':
// Q -> G
input[i] = 'G'
case 'Z':
// Z -> S
input[i] = 'S'
case 'M':
// M -> N
input[i] = 'N'
case 'K':
// KN -> N, else K -> C
if input.SafeAt(i+1) == 'N' {
input.Del(i)
} else {
input[i] = 'C'
}
case 'S':
// SCH -> SSS
if input.SafeSubstr(i, 3) == "SCH" {
input[i+1] = 'S'
input[i+2] = 'S'
}
case 'P':
// PH -> FF
if input.SafeAt(i+1) == 'H' {
input[i] = 'F'
input[i+1] = 'F'
}
case 'H':
// H -> $(previous character) if previous character or
// next character is a non-vowel
prev := input.SafeAt(i - 1)
next := input.SafeAt(i + 1)
if !isVowelNoY(prev) || !isVowelNoY(next) {
input[i] = prev
}
case 'W':
prev := input.SafeAt(i - 1)
if isVowelNoY(prev) {
input[i] = prev
}
}
if input[i] != last && input[i] != 0 {
output = append(output, input[i])
}
last = input[i]
}
// have to be careful here because we've already added the first
// key value
if len(output) > 1 {
// remove trailing s
if output.SafeAt(len(output)-1) == 'S' {
output.Del(len(output) - 1)
}
// trailing AY -> Y
if len(output) > 2 && output.SafeSubstr(len(output)-2, 2) == "AY" {
output.Del(len(output) - 2)
}
// trailing A -> remove it
if output.SafeAt(len(output)-1) == 'A' {
output.Del(len(output) - 1)
}
}
if len(output) > 6 {
return string(output[0:6])
} else {
return string(output)
}
}

56
vendor/github.com/antzucaro/matchr/osa.go generated vendored Normal file
View file

@ -0,0 +1,56 @@
package matchr
// OSA computes the Optimal String Alignment distance between two
// strings. The returned value - distance - is the number of insertions,
// deletions, substitutions, and transpositions it takes to transform one
// string (s1) into another (s2). Each step in the transformation "costs"
// one distance point. It is similar to Damerau-Levenshtein, but is simpler
// because it does not allow multiple edits on any substring.
func OSA(s1 string, s2 string) (distance int) {
// index by code point, not byte
r1 := []rune(s1)
r2 := []rune(s2)
rows := len(r1) + 1
cols := len(r2) + 1
var i, j, d1, d2, d3, d_now, cost int
dist := make([]int, rows*cols)
for i = 0; i < rows; i++ {
dist[i*cols] = i
}
for j = 0; j < cols; j++ {
dist[j] = j
}
for i = 1; i < rows; i++ {
for j = 1; j < cols; j++ {
if r1[i-1] == r2[j-1] {
cost = 0
} else {
cost = 1
}
d1 = dist[((i-1)*cols)+j] + 1
d2 = dist[(i*cols)+(j-1)] + 1
d3 = dist[((i-1)*cols)+(j-1)] + cost
d_now = min(d1, min(d2, d3))
if i > 2 && j > 2 && r1[i-1] == r2[j-2] &&
r1[i-2] == r2[j-1] {
d1 = dist[((i-2)*cols)+(j-2)] + cost
d_now = min(d_now, d1)
}
dist[(i*cols)+j] = d_now
}
}
distance = dist[(cols*rows)-1]
return
}

128
vendor/github.com/antzucaro/matchr/phonex.go generated vendored Normal file
View file

@ -0,0 +1,128 @@
package matchr
func preProcess(input []rune) []rune {
output := runestring(make([]rune, 0, len(input)))
// 0. Remove all non-ASCII characters
for _, v := range input {
if v >= 65 && v <= 90 {
output = append(output, v)
}
}
// 1. Remove all trailing 'S' characters at the end of the name
for i := len(output) - 1; i >= 0 && output[i] == 'S'; i-- {
output.Del(i)
}
// 2. Convert leading letter pairs as follows
// KN -> N, PH -> F, WR -> R
switch output.SafeSubstr(0, 2) {
case "KN":
output = output[1:]
case "PH":
output[0] = 'F' // H will be ignored anyway
case "WR":
output = output[1:]
}
// 3a. Convert leading single letters as follows:
// H -> Remove
if output.SafeAt(0) == 'H' {
output = output[1:]
}
// 3a. Convert leading single letters as follows:
// E,I,O,U,Y -> A
// P -> B
// V -> F
// K,Q -> C
// J -> G
// Z -> S
switch output.SafeAt(0) {
case 'E', 'I', 'O', 'U', 'Y':
output[0] = 'A'
case 'P':
output[0] = 'B'
case 'V':
output[0] = 'F'
case 'K', 'Q':
output[0] = 'C'
case 'J':
output[0] = 'G'
case 'Z':
output[0] = 'S'
}
return output
}
// Phonex computes the Phonex phonetic encoding of the input string. Phonex is
// a modification of the venerable Soundex algorithm. It accounts for a few
// more letter combinations to improve accuracy on some data sets.
//
// This implementation is based off of the original C implementation by the
// creator - A. J. Lait - as found in his research paper entitled "An
// Assessment of Name Matching Algorithms."
func Phonex(s1 string) string {
// preprocess
s1 = cleanInput(s1)
input := runestring(preProcess([]rune(s1)))
result := make([]rune, 0, len(input))
last := rune(0)
code := rune(0)
for i := 0; i < len(input) &&
input[i] != ' ' &&
input[i] != ',' &&
len(result) < 4; i++ {
switch input[i] {
case 'B', 'P', 'F', 'V':
code = '1'
case 'C', 'S', 'K', 'G', 'J', 'Q', 'X', 'Z':
code = '2'
case 'D', 'T':
if input.SafeAt(i+1) != 'C' {
code = '3'
}
case 'L':
if isVowel(input.SafeAt(i+1)) || i == len(input)-1 {
code = '4'
}
case 'M', 'N':
nextChar := input.SafeAt(i + 1)
if nextChar == 'D' || nextChar == 'G' {
// ignore next character
i++
}
code = '5'
case 'R':
if isVowel(input.SafeAt(i+1)) || i == len(input)-1 {
code = '6'
}
default:
code = 0
}
if last != code && code != 0 && i != 0 {
result = append(result, code)
}
// special case for 1st character: we use the actual character
if i == 0 {
result = append(result, input[i])
last = code
} else {
last = result[len(result)-1]
}
}
for len(result) < 4 {
result = append(result, '0')
}
return string(result)
}

44
vendor/github.com/antzucaro/matchr/runestring.go generated vendored Normal file
View file

@ -0,0 +1,44 @@
package matchr
type runestring []rune
// A safe way to index a runestring. It will return a null rune if you try
// to index outside of the bounds of the runestring.
func (r *runestring) SafeAt(pos int) rune {
if pos < 0 || pos >= len(*r) {
return 0
} else {
return (*r)[pos]
}
}
// A safe way to obtain a substring of a runestring. It will return a null
// string ("") if you index somewhere outside its bounds.
func (r *runestring) SafeSubstr(pos int, length int) string {
if pos < 0 || pos > len(*r) || (pos+length) > len(*r) {
return ""
} else {
return string((*r)[pos : pos+length])
}
}
// Delete characters at positions pos. It will do nothing if you provide
// an index outside the bounds of the runestring.
func (r *runestring) Del(pos ...int) {
for _, i := range pos {
if i >= 0 && i <= len(*r) {
*r = append((*r)[:i], (*r)[i+1:]...)
}
}
}
// A helper to determine if any substrings exist within the given runestring.
func (r *runestring) Contains(start int, length int, criteria ...string) bool {
substring := r.SafeSubstr(start, length)
for _, c := range criteria {
if substring == c {
return true
}
}
return false
}

87
vendor/github.com/antzucaro/matchr/smithwaterman.go generated vendored Normal file
View file

@ -0,0 +1,87 @@
package matchr
const GAP_COST = float64(0.5)
func getCost(r1 []rune, r1Index int, r2 []rune, r2Index int) float64 {
if r1[r1Index] == r2[r2Index] {
return 1.0
} else {
return -2.0
}
}
// SmithWaterman computes the Smith-Waterman local sequence alignment for the
// two input strings. This was originally designed to find similar regions in
// strings representing DNA or protein sequences.
func SmithWaterman(s1 string, s2 string) float64 {
var cost float64
// index by code point, not byte
r1 := []rune(s1)
r2 := []rune(s2)
r1Len := len(r1)
r2Len := len(r2)
if r1Len == 0 {
return float64(r2Len)
}
if r2Len == 0 {
return float64(r1Len)
}
d := make([][]float64, r1Len)
for i := range d {
d[i] = make([]float64, r2Len)
}
var maxSoFar float64
for i := 0; i < r1Len; i++ {
// substitution cost
cost = getCost(r1, i, r2, 0)
if i == 0 {
d[0][0] = max(0.0, max(-GAP_COST, cost))
} else {
d[i][0] = max(0.0, max(d[i-1][0]-GAP_COST, cost))
}
// save if it is the biggest thus far
if d[i][0] > maxSoFar {
maxSoFar = d[i][0]
}
}
for j := 0; j < r2Len; j++ {
// substitution cost
cost = getCost(r1, 0, r2, j)
if j == 0 {
d[0][0] = max(0, max(-GAP_COST, cost))
} else {
d[0][j] = max(0, max(d[0][j-1]-GAP_COST, cost))
}
// save if it is the biggest thus far
if d[0][j] > maxSoFar {
maxSoFar = d[0][j]
}
}
for i := 1; i < r1Len; i++ {
for j := 1; j < r2Len; j++ {
cost = getCost(r1, i, r2, j)
// find the lowest cost
d[i][j] = max(
max(0, d[i-1][j]-GAP_COST),
max(d[i][j-1]-GAP_COST, d[i-1][j-1]+cost))
// save if it is the biggest thus far
if d[i][j] > maxSoFar {
maxSoFar = d[i][j]
}
}
}
return maxSoFar
}

72
vendor/github.com/antzucaro/matchr/soundex.go generated vendored Normal file
View file

@ -0,0 +1,72 @@
package matchr
import "strings"
// Soundex computes the Soundex phonetic representation of the input string. It
// attempts to encode homophones with the same characters. More information can
// be found at http://en.wikipedia.org/wiki/Soundex.
func Soundex(s1 string) string {
if len(s1) == 0 {
return ""
}
// we should work with all uppercase
s1 = strings.ToUpper(s1)
input := NewString(s1)
// the encoded value
enc := input.Slice(0, 1)
c := ""
prev := ""
hw := false
for i := 0; i < input.RuneCount(); i++ {
switch rune(input.At(i)) {
case 'B', 'F', 'P', 'V':
c = "1"
case 'C', 'G', 'J', 'K', 'Q', 'S', 'X', 'Z':
c = "2"
case 'D', 'T':
c = "3"
case 'L':
c = "4"
case 'M', 'N':
c = "5"
case 'R':
c = "6"
case 'H', 'W':
hw = true
default:
c = ""
}
// don't encode the first position, but we need its code value
// to prevent repeats
if c != "" && c != prev && i > 0 {
// if the next encoded digit is different, we can add it right away
// if it is the same, though, it must not have been preceded
// by an 'H' or a 'W'
if enc[len(enc)-1:len(enc)] != c || !hw {
enc = enc + c
}
// we're done when we reach four encoded characters
if len(enc) == 4 {
break
}
}
prev = c
hw = false
}
// if we've fallen short of 4 "real" encoded characters,
// it gets padded with zeros
for len(enc) < 4 {
enc = enc + "0"
}
return enc
}

215
vendor/github.com/antzucaro/matchr/utf8.go generated vendored Normal file
View file

@ -0,0 +1,215 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package matchr
import (
"errors"
"unicode/utf8"
)
// String wraps a regular string with a small structure that provides more
// efficient indexing by code point index, as opposed to byte index.
// Scanning incrementally forwards or backwards is O(1) per index operation
// (although not as fast a range clause going forwards). Random access is
// O(N) in the length of the string, but the overhead is less than always
// scanning from the beginning.
// If the string is ASCII, random access is O(1).
// Unlike the built-in string type, String has internal mutable state and
// is not thread-safe.
type String struct {
str string
numRunes int
// If width > 0, the rune at runePos starts at bytePos and has the specified width.
width int
bytePos int
runePos int
nonASCII int // byte index of the first non-ASCII rune.
}
// NewString returns a new UTF-8 string with the provided contents.
func NewString(contents string) *String {
return new(String).Init(contents)
}
// Init initializes an existing String to hold the provided contents.
// It returns a pointer to the initialized String.
func (s *String) Init(contents string) *String {
s.str = contents
s.bytePos = 0
s.runePos = 0
for i := 0; i < len(contents); i++ {
if contents[i] >= utf8.RuneSelf {
// Not ASCII.
s.numRunes = utf8.RuneCountInString(contents)
_, s.width = utf8.DecodeRuneInString(contents)
s.nonASCII = i
return s
}
}
// ASCII is simple. Also, the empty string is ASCII.
s.numRunes = len(contents)
s.width = 0
s.nonASCII = len(contents)
return s
}
// String returns the contents of the String. This method also means the
// String is directly printable by fmt.Print.
func (s *String) String() string {
return s.str
}
// RuneCount returns the number of runes (Unicode code points) in the String.
func (s *String) RuneCount() int {
return s.numRunes
}
// IsASCII returns a boolean indicating whether the String contains only ASCII bytes.
func (s *String) IsASCII() bool {
return s.width == 0
}
// Slice returns the string sliced at rune positions [i:j].
func (s *String) Slice(i, j int) string {
// ASCII is easy. Let the compiler catch the indexing error if there is one.
if j < s.nonASCII {
return s.str[i:j]
}
if i < 0 || j > s.numRunes || i > j {
panic(errors.New("utf8.String: slice index out of range"))
}
if i == j {
return ""
}
// For non-ASCII, after At(i), bytePos is always the position of the indexed character.
var low, high int
switch {
case i < s.nonASCII:
low = i
case i == s.numRunes:
low = len(s.str)
default:
s.At(i)
low = s.bytePos
}
switch {
case j == s.numRunes:
high = len(s.str)
default:
s.At(j)
high = s.bytePos
}
return s.str[low:high]
}
// At returns the rune with index i in the String. The sequence of runes is the same
// as iterating over the contents with a "for range" clause.
func (s *String) At(i int) int {
// ASCII is easy. Let the compiler catch the indexing error if there is one.
if i < s.nonASCII {
return int(s.str[i])
}
// Now we do need to know the index is valid.
if i < 0 || i >= s.numRunes {
panic(errors.New("utf8.String: index out of range"))
}
var r rune
// Five easy common cases: within 1 spot of bytePos/runePos, or the beginning, or the end.
// With these cases, all scans from beginning or end work in O(1) time per rune.
switch {
case i == s.runePos-1: // backing up one rune
r, s.width = utf8.DecodeLastRuneInString(s.str[0:s.bytePos])
s.runePos = i
s.bytePos -= s.width
return int(r)
case i == s.runePos+1: // moving ahead one rune
s.runePos = i
s.bytePos += s.width
fallthrough
case i == s.runePos:
r, s.width = utf8.DecodeRuneInString(s.str[s.bytePos:])
return int(r)
case i == 0: // start of string
r, s.width = utf8.DecodeRuneInString(s.str)
s.runePos = 0
s.bytePos = 0
return int(r)
case i == s.numRunes-1: // last rune in string
r, s.width = utf8.DecodeLastRuneInString(s.str)
s.runePos = i
s.bytePos = len(s.str) - s.width
return int(r)
}
// We need to do a linear scan. There are three places to start from:
// 1) The beginning
// 2) bytePos/runePos.
// 3) The end
// Choose the closest in rune count, scanning backwards if necessary.
forward := true
if i < s.runePos {
// Between beginning and pos. Which is closer?
// Since both i and runePos are guaranteed >= nonASCII, that's the
// lowest location we need to start from.
if i < (s.runePos-s.nonASCII)/2 {
// Scan forward from beginning
s.bytePos, s.runePos = s.nonASCII, s.nonASCII
} else {
// Scan backwards from where we are
forward = false
}
} else {
// Between pos and end. Which is closer?
if i-s.runePos < (s.numRunes-s.runePos)/2 {
// Scan forward from pos
} else {
// Scan backwards from end
s.bytePos, s.runePos = len(s.str), s.numRunes
forward = false
}
}
if forward {
// TODO: Is it much faster to use a range loop for this scan?
for {
r, s.width = utf8.DecodeRuneInString(s.str[s.bytePos:])
if s.runePos == i {
break
}
s.runePos++
s.bytePos += s.width
}
} else {
for {
r, s.width = utf8.DecodeLastRuneInString(s.str[0:s.bytePos])
s.runePos--
s.bytePos -= s.width
if s.runePos == i {
break
}
}
}
return int(r)
}
// We want the panic in At(i) to satisfy os.Error, because that's what
// runtime panics satisfy, but we can't import os. This is our solution.
// error is the type of the error returned if a user calls String.At(i) with i out of range.
// It satisfies os.Error and runtime.Error.
// type error string
/*
func (err error) String() string {
return string(err)
}
func (err error) RunTimeError() {
}
*/

119
vendor/github.com/antzucaro/matchr/util.go generated vendored Normal file
View file

@ -0,0 +1,119 @@
package matchr
import (
"math"
"strings"
)
// min of two integers
func min(a int, b int) (res int) {
if a < b {
res = a
} else {
res = b
}
return
}
// max of two integers
func maxI(a int, b int) (res int) {
if a < b {
res = b
} else {
res = a
}
return
}
// max of two float64s
func max(a float64, b float64) (res float64) {
if a < b {
res = b
} else {
res = a
}
return
}
// is this string index outside of the ASCII numeric code points?
func nan(c rune) bool {
return ((c > 57) || (c < 48))
}
// Round a float64 to the given precision
//
// http://play.golang.org/p/S654PxAe_N
//
// (via Rory McGuire at
// https://groups.google.com/forum/#!topic/golang-nuts/ITZV08gAugI)
func round(x float64, prec int) float64 {
if math.IsNaN(x) || math.IsInf(x, 0) {
return x
}
sign := 1.0
if x < 0 {
sign = -1
x *= -1
}
var rounder float64
pow := math.Pow(10, float64(prec))
intermed := x * pow
_, frac := math.Modf(intermed)
if frac >= 0.5 {
rounder = math.Ceil(intermed)
} else {
rounder = math.Floor(intermed)
}
return rounder / pow * sign
}
// A helper to determine if any substrings exist within the given string
func contains(value *String, start int, length int, criteria ...string) bool {
substring := substring(value, start, length)
for _, c := range criteria {
if substring == c {
return true
}
}
return false
}
// A fault-tolerant version of Slice. It will return nothing ("") if the index
// is out of bounds. This allows substring-ing without having to bound check
// every time.
func substring(value *String, start int, length int) string {
if start >= 0 && start+length <= value.RuneCount() {
return value.Slice(start, start+length)
} else {
return ""
}
}
func isVowel(c rune) bool {
switch c {
case 'A', 'E', 'I', 'O', 'U', 'Y':
return true
default:
return false
}
}
func isVowelNoY(c rune) bool {
switch c {
case 'A', 'E', 'I', 'O', 'U':
return true
default:
return false
}
}
func cleanInput(input string) string {
return strings.ToUpper(strings.TrimSpace(input))
}

View file

@ -8857,6 +8857,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@ -10472,6 +10475,14 @@ var awsPartition = partition{
},
},
"health": service{
PartitionEndpoint: "aws-global",
IsRegionalized: boxedFalse,
Defaults: endpointDefaults{
defaultKey{}: endpoint{
SSLCommonName: "health.us-east-1.amazonaws.com",
Protocols: []string{"https"},
},
},
Endpoints: serviceEndpoints{
endpointKey{
Region: "fips-us-east-2",
@ -16611,6 +16622,70 @@ var awsPartition = partition{
},
},
},
"resiliencehub": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
}: endpoint{},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-2",
}: endpoint{},
endpointKey{
Region: "us-west-1",
}: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
},
},
"resource-groups": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -29236,6 +29311,13 @@ var awsisoPartition = partition{
}: endpoint{},
},
},
"appconfigdata": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-iso-west-1",
}: endpoint{},
},
},
"application-autoscaling": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.44.9"
const SDKVersion = "1.44.18"

View file

@ -1279,6 +1279,12 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.
// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
// in the IAM User Guide.
//
// No permissions are required for users to perform this operation. The purpose
// of the sts:GetSessionToken operation is to authenticate the user using MFA.
// You cannot use policies to control authentication operations. For more information,
// see Permissions for GetSessionToken (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html)
// in the IAM User Guide.
//
// Session Duration
//
// The GetSessionToken operation must be called by using the long-term Amazon

View file

@ -1,5 +1,10 @@
# package log
[![Go Reference](https://pkg.go.dev/badge/github.com/go-kit/log.svg)](https://pkg.go.dev/github.com/go-kit/log)
[![Go Report Card](https://goreportcard.com/badge/go-kit/log)](https://goreportcard.com/report/go-kit/log)
[![GitHub Actions](https://github.com/go-kit/log/actions/workflows/test.yml/badge.svg)](https://github.com/go-kit/log/actions/workflows/test.yml)
[![Coverage Status](https://coveralls.io/repos/github/go-kit/log/badge.svg?branch=main)](https://coveralls.io/github/go-kit/log?branch=main)
`package log` provides a minimal interface for structured logging in services.
It may be wrapped to encode conventions, enforce type-safety, provide leveled
logging, and so on. It can be used for both typical application log events,

View file

@ -68,7 +68,7 @@ func safeString(str fmt.Stringer) (s string) {
if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() {
s = "NULL"
} else {
panic(panicVal)
s = fmt.Sprintf("PANIC in String method: %v", panicVal)
}
}
}()
@ -82,7 +82,7 @@ func safeError(err error) (s interface{}) {
if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() {
s = nil
} else {
panic(panicVal)
s = fmt.Sprintf("PANIC in Error method: %v", panicVal)
}
}
}()

View file

@ -7,6 +7,17 @@
// logger = level.NewFilter(logger, level.AllowInfo()) // <--
// logger = log.With(logger, "ts", log.DefaultTimestampUTC)
//
// It's also possible to configure log level from a string. For instance from
// a flag, environment variable or configuration file.
//
// fs := flag.NewFlagSet("myprogram")
// lvl := fs.String("log", "info", "debug, info, warn, error")
//
// var logger log.Logger
// logger = log.NewLogfmtLogger(os.Stderr)
// logger = level.NewFilter(logger, level.Allow(level.ParseDefault(*lvl, level.InfoValue()))) // <--
// logger = log.With(logger, "ts", log.DefaultTimestampUTC)
//
// Then, at the callsites, use one of the level.Debug, Info, Warn, or Error
// helper methods to emit leveled log events.
//

View file

@ -1,6 +1,14 @@
package level
import "github.com/go-kit/log"
import (
"errors"
"strings"
"github.com/go-kit/log"
)
// ErrInvalidLevelString is returned whenever an invalid string is passed to Parse.
var ErrInvalidLevelString = errors.New("invalid level string")
// Error returns a logger that includes a Key/ErrorValue pair.
func Error(logger log.Logger) log.Logger {
@ -66,6 +74,22 @@ func (l *logger) Log(keyvals ...interface{}) error {
// Option sets a parameter for the leveled logger.
type Option func(*logger)
// Allow the provided log level to pass.
func Allow(v Value) Option {
switch v {
case debugValue:
return AllowDebug()
case infoValue:
return AllowInfo()
case warnValue:
return AllowWarn()
case errorValue:
return AllowError()
default:
return AllowNone()
}
}
// AllowAll is an alias for AllowDebug.
func AllowAll() Option {
return AllowDebug()
@ -100,6 +124,33 @@ func allowed(allowed level) Option {
return func(l *logger) { l.allowed = allowed }
}
// Parse a string to its corresponding level value. Valid strings are "debug",
// "info", "warn", and "error". Strings are normalized via strings.TrimSpace and
// strings.ToLower.
func Parse(level string) (Value, error) {
switch strings.TrimSpace(strings.ToLower(level)) {
case debugValue.name:
return debugValue, nil
case infoValue.name:
return infoValue, nil
case warnValue.name:
return warnValue, nil
case errorValue.name:
return errorValue, nil
default:
return nil, ErrInvalidLevelString
}
}
// ParseDefault calls Parse and returns the default Value on error.
func ParseDefault(level string, def Value) Value {
v, err := Parse(level)
if err != nil {
return def
}
return v
}
// ErrNotAllowed sets the error to return from Log when it squelches a log
// event disallowed by the configured Allow[Level] option. By default,
// ErrNotAllowed is nil; in this case the log event is squelched with no

1
vendor/github.com/go-kit/log/staticcheck.conf generated vendored Normal file
View file

@ -0,0 +1 @@
checks = ["all"]

9
vendor/github.com/google/uuid/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,9 @@
language: go
go:
- 1.4.3
- 1.5.3
- tip
script:
- go test -v ./...

10
vendor/github.com/google/uuid/CONTRIBUTING.md generated vendored Normal file
View file

@ -0,0 +1,10 @@
# How to contribute
We definitely welcome patches and contribution to this project!
### Legal requirements
In order to protect both you and ourselves, you will need to sign the
[Contributor License Agreement](https://cla.developers.google.com/clas).
You may have already signed it for other Google projects.

9
vendor/github.com/google/uuid/CONTRIBUTORS generated vendored Normal file
View file

@ -0,0 +1,9 @@
Paul Borman <borman@google.com>
bmatsuo
shawnps
theory
jboverfelt
dsymonds
cd1
wallclockbuilder
dansouza

27
vendor/github.com/google/uuid/LICENSE generated vendored Normal file
View file

@ -0,0 +1,27 @@
Copyright (c) 2009,2014 Google Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

19
vendor/github.com/google/uuid/README.md generated vendored Normal file
View file

@ -0,0 +1,19 @@
# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master)
The uuid package generates and inspects UUIDs based on
[RFC 4122](http://tools.ietf.org/html/rfc4122)
and DCE 1.1: Authentication and Security Services.
This package is based on the github.com/pborman/uuid package (previously named
code.google.com/p/go-uuid). It differs from these earlier packages in that
a UUID is a 16 byte array rather than a byte slice. One loss due to this
change is the ability to represent an invalid UUID (vs a NIL UUID).
###### Install
`go get github.com/google/uuid`
###### Documentation
[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid)
Full `go doc` style documentation for the package can be viewed online without
installing this package by using the GoDoc site here:
http://pkg.go.dev/github.com/google/uuid

80
vendor/github.com/google/uuid/dce.go generated vendored Normal file
View file

@ -0,0 +1,80 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
"fmt"
"os"
)
// A Domain represents a Version 2 domain
type Domain byte
// Domain constants for DCE Security (Version 2) UUIDs.
const (
Person = Domain(0)
Group = Domain(1)
Org = Domain(2)
)
// NewDCESecurity returns a DCE Security (Version 2) UUID.
//
// The domain should be one of Person, Group or Org.
// On a POSIX system the id should be the users UID for the Person
// domain and the users GID for the Group. The meaning of id for
// the domain Org or on non-POSIX systems is site defined.
//
// For a given domain/id pair the same token may be returned for up to
// 7 minutes and 10 seconds.
func NewDCESecurity(domain Domain, id uint32) (UUID, error) {
uuid, err := NewUUID()
if err == nil {
uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
uuid[9] = byte(domain)
binary.BigEndian.PutUint32(uuid[0:], id)
}
return uuid, err
}
// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
// domain with the id returned by os.Getuid.
//
// NewDCESecurity(Person, uint32(os.Getuid()))
func NewDCEPerson() (UUID, error) {
return NewDCESecurity(Person, uint32(os.Getuid()))
}
// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
// domain with the id returned by os.Getgid.
//
// NewDCESecurity(Group, uint32(os.Getgid()))
func NewDCEGroup() (UUID, error) {
return NewDCESecurity(Group, uint32(os.Getgid()))
}
// Domain returns the domain for a Version 2 UUID. Domains are only defined
// for Version 2 UUIDs.
func (uuid UUID) Domain() Domain {
return Domain(uuid[9])
}
// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2
// UUIDs.
func (uuid UUID) ID() uint32 {
return binary.BigEndian.Uint32(uuid[0:4])
}
func (d Domain) String() string {
switch d {
case Person:
return "Person"
case Group:
return "Group"
case Org:
return "Org"
}
return fmt.Sprintf("Domain%d", int(d))
}

12
vendor/github.com/google/uuid/doc.go generated vendored Normal file
View file

@ -0,0 +1,12 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package uuid generates and inspects UUIDs.
//
// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
// Services.
//
// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to
// maps or compared directly.
package uuid

53
vendor/github.com/google/uuid/hash.go generated vendored Normal file
View file

@ -0,0 +1,53 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"crypto/md5"
"crypto/sha1"
"hash"
)
// Well known namespace IDs and UUIDs
var (
NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
Nil UUID // empty UUID, all zeros
)
// NewHash returns a new UUID derived from the hash of space concatenated with
// data generated by h. The hash should be at least 16 byte in length. The
// first 16 bytes of the hash are used to form the UUID. The version of the
// UUID will be the lower 4 bits of version. NewHash is used to implement
// NewMD5 and NewSHA1.
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
h.Reset()
h.Write(space[:]) //nolint:errcheck
h.Write(data) //nolint:errcheck
s := h.Sum(nil)
var uuid UUID
copy(uuid[:], s)
uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
return uuid
}
// NewMD5 returns a new MD5 (Version 3) UUID based on the
// supplied name space and data. It is the same as calling:
//
// NewHash(md5.New(), space, data, 3)
func NewMD5(space UUID, data []byte) UUID {
return NewHash(md5.New(), space, data, 3)
}
// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
// supplied name space and data. It is the same as calling:
//
// NewHash(sha1.New(), space, data, 5)
func NewSHA1(space UUID, data []byte) UUID {
return NewHash(sha1.New(), space, data, 5)
}

38
vendor/github.com/google/uuid/marshal.go generated vendored Normal file
View file

@ -0,0 +1,38 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import "fmt"
// MarshalText implements encoding.TextMarshaler.
func (uuid UUID) MarshalText() ([]byte, error) {
var js [36]byte
encodeHex(js[:], uuid)
return js[:], nil
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (uuid *UUID) UnmarshalText(data []byte) error {
id, err := ParseBytes(data)
if err != nil {
return err
}
*uuid = id
return nil
}
// MarshalBinary implements encoding.BinaryMarshaler.
func (uuid UUID) MarshalBinary() ([]byte, error) {
return uuid[:], nil
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
func (uuid *UUID) UnmarshalBinary(data []byte) error {
if len(data) != 16 {
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
}
copy(uuid[:], data)
return nil
}

90
vendor/github.com/google/uuid/node.go generated vendored Normal file
View file

@ -0,0 +1,90 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"sync"
)
var (
nodeMu sync.Mutex
ifname string // name of interface being used
nodeID [6]byte // hardware for version 1 UUIDs
zeroID [6]byte // nodeID with only 0's
)
// NodeInterface returns the name of the interface from which the NodeID was
// derived. The interface "user" is returned if the NodeID was set by
// SetNodeID.
func NodeInterface() string {
defer nodeMu.Unlock()
nodeMu.Lock()
return ifname
}
// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
// If name is "" then the first usable interface found will be used or a random
// Node ID will be generated. If a named interface cannot be found then false
// is returned.
//
// SetNodeInterface never fails when name is "".
func SetNodeInterface(name string) bool {
defer nodeMu.Unlock()
nodeMu.Lock()
return setNodeInterface(name)
}
func setNodeInterface(name string) bool {
iname, addr := getHardwareInterface(name) // null implementation for js
if iname != "" && addr != nil {
ifname = iname
copy(nodeID[:], addr)
return true
}
// We found no interfaces with a valid hardware address. If name
// does not specify a specific interface generate a random Node ID
// (section 4.1.6)
if name == "" {
ifname = "random"
randomBits(nodeID[:])
return true
}
return false
}
// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
// if not already set.
func NodeID() []byte {
defer nodeMu.Unlock()
nodeMu.Lock()
if nodeID == zeroID {
setNodeInterface("")
}
nid := nodeID
return nid[:]
}
// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
// of id are used. If id is less than 6 bytes then false is returned and the
// Node ID is not set.
func SetNodeID(id []byte) bool {
if len(id) < 6 {
return false
}
defer nodeMu.Unlock()
nodeMu.Lock()
copy(nodeID[:], id)
ifname = "user"
return true
}
// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
func (uuid UUID) NodeID() []byte {
var node [6]byte
copy(node[:], uuid[10:])
return node[:]
}

12
vendor/github.com/google/uuid/node_js.go generated vendored Normal file
View file

@ -0,0 +1,12 @@
// Copyright 2017 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build js
package uuid
// getHardwareInterface returns nil values for the JS version of the code.
// This remvoves the "net" dependency, because it is not used in the browser.
// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
func getHardwareInterface(name string) (string, []byte) { return "", nil }

33
vendor/github.com/google/uuid/node_net.go generated vendored Normal file
View file

@ -0,0 +1,33 @@
// Copyright 2017 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !js
package uuid
import "net"
var interfaces []net.Interface // cached list of interfaces
// getHardwareInterface returns the name and hardware address of interface name.
// If name is "" then the name and hardware address of one of the system's
// interfaces is returned. If no interfaces are found (name does not exist or
// there are no interfaces) then "", nil is returned.
//
// Only addresses of at least 6 bytes are returned.
func getHardwareInterface(name string) (string, []byte) {
if interfaces == nil {
var err error
interfaces, err = net.Interfaces()
if err != nil {
return "", nil
}
}
for _, ifs := range interfaces {
if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
return ifs.Name, ifs.HardwareAddr
}
}
return "", nil
}

118
vendor/github.com/google/uuid/null.go generated vendored Normal file
View file

@ -0,0 +1,118 @@
// Copyright 2021 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"bytes"
"database/sql/driver"
"encoding/json"
"fmt"
)
var jsonNull = []byte("null")
// NullUUID represents a UUID that may be null.
// NullUUID implements the SQL driver.Scanner interface so
// it can be used as a scan destination:
//
// var u uuid.NullUUID
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
// ...
// if u.Valid {
// // use u.UUID
// } else {
// // NULL value
// }
//
type NullUUID struct {
UUID UUID
Valid bool // Valid is true if UUID is not NULL
}
// Scan implements the SQL driver.Scanner interface.
func (nu *NullUUID) Scan(value interface{}) error {
if value == nil {
nu.UUID, nu.Valid = Nil, false
return nil
}
err := nu.UUID.Scan(value)
if err != nil {
nu.Valid = false
return err
}
nu.Valid = true
return nil
}
// Value implements the driver Valuer interface.
func (nu NullUUID) Value() (driver.Value, error) {
if !nu.Valid {
return nil, nil
}
// Delegate to UUID Value function
return nu.UUID.Value()
}
// MarshalBinary implements encoding.BinaryMarshaler.
func (nu NullUUID) MarshalBinary() ([]byte, error) {
if nu.Valid {
return nu.UUID[:], nil
}
return []byte(nil), nil
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
func (nu *NullUUID) UnmarshalBinary(data []byte) error {
if len(data) != 16 {
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
}
copy(nu.UUID[:], data)
nu.Valid = true
return nil
}
// MarshalText implements encoding.TextMarshaler.
func (nu NullUUID) MarshalText() ([]byte, error) {
if nu.Valid {
return nu.UUID.MarshalText()
}
return jsonNull, nil
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (nu *NullUUID) UnmarshalText(data []byte) error {
id, err := ParseBytes(data)
if err != nil {
nu.Valid = false
return err
}
nu.UUID = id
nu.Valid = true
return nil
}
// MarshalJSON implements json.Marshaler.
func (nu NullUUID) MarshalJSON() ([]byte, error) {
if nu.Valid {
return json.Marshal(nu.UUID)
}
return jsonNull, nil
}
// UnmarshalJSON implements json.Unmarshaler.
func (nu *NullUUID) UnmarshalJSON(data []byte) error {
if bytes.Equal(data, jsonNull) {
*nu = NullUUID{}
return nil // valid null UUID
}
err := json.Unmarshal(data, &nu.UUID)
nu.Valid = err == nil
return err
}

59
vendor/github.com/google/uuid/sql.go generated vendored Normal file
View file

@ -0,0 +1,59 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"database/sql/driver"
"fmt"
)
// Scan implements sql.Scanner so UUIDs can be read from databases transparently.
// Currently, database types that map to string and []byte are supported. Please
// consult database-specific driver documentation for matching types.
func (uuid *UUID) Scan(src interface{}) error {
switch src := src.(type) {
case nil:
return nil
case string:
// if an empty UUID comes from a table, we return a null UUID
if src == "" {
return nil
}
// see Parse for required string format
u, err := Parse(src)
if err != nil {
return fmt.Errorf("Scan: %v", err)
}
*uuid = u
case []byte:
// if an empty UUID comes from a table, we return a null UUID
if len(src) == 0 {
return nil
}
// assumes a simple slice of bytes if 16 bytes
// otherwise attempts to parse
if len(src) != 16 {
return uuid.Scan(string(src))
}
copy((*uuid)[:], src)
default:
return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
}
return nil
}
// Value implements sql.Valuer so that UUIDs can be written to databases
// transparently. Currently, UUIDs map to strings. Please consult
// database-specific driver documentation for matching types.
func (uuid UUID) Value() (driver.Value, error) {
return uuid.String(), nil
}

123
vendor/github.com/google/uuid/time.go generated vendored Normal file
View file

@ -0,0 +1,123 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
"sync"
"time"
)
// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
// 1582.
type Time int64
const (
lillian = 2299160 // Julian day of 15 Oct 1582
unix = 2440587 // Julian day of 1 Jan 1970
epoch = unix - lillian // Days between epochs
g1582 = epoch * 86400 // seconds between epochs
g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
)
var (
timeMu sync.Mutex
lasttime uint64 // last time we returned
clockSeq uint16 // clock sequence for this run
timeNow = time.Now // for testing
)
// UnixTime converts t the number of seconds and nanoseconds using the Unix
// epoch of 1 Jan 1970.
func (t Time) UnixTime() (sec, nsec int64) {
sec = int64(t - g1582ns100)
nsec = (sec % 10000000) * 100
sec /= 10000000
return sec, nsec
}
// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
// clock sequence as well as adjusting the clock sequence as needed. An error
// is returned if the current time cannot be determined.
func GetTime() (Time, uint16, error) {
defer timeMu.Unlock()
timeMu.Lock()
return getTime()
}
func getTime() (Time, uint16, error) {
t := timeNow()
// If we don't have a clock sequence already, set one.
if clockSeq == 0 {
setClockSequence(-1)
}
now := uint64(t.UnixNano()/100) + g1582ns100
// If time has gone backwards with this clock sequence then we
// increment the clock sequence
if now <= lasttime {
clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000
}
lasttime = now
return Time(now), clockSeq, nil
}
// ClockSequence returns the current clock sequence, generating one if not
// already set. The clock sequence is only used for Version 1 UUIDs.
//
// The uuid package does not use global static storage for the clock sequence or
// the last time a UUID was generated. Unless SetClockSequence is used, a new
// random clock sequence is generated the first time a clock sequence is
// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1)
func ClockSequence() int {
defer timeMu.Unlock()
timeMu.Lock()
return clockSequence()
}
func clockSequence() int {
if clockSeq == 0 {
setClockSequence(-1)
}
return int(clockSeq & 0x3fff)
}
// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to
// -1 causes a new sequence to be generated.
func SetClockSequence(seq int) {
defer timeMu.Unlock()
timeMu.Lock()
setClockSequence(seq)
}
func setClockSequence(seq int) {
if seq == -1 {
var b [2]byte
randomBits(b[:]) // clock sequence
seq = int(b[0])<<8 | int(b[1])
}
oldSeq := clockSeq
clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
if oldSeq != clockSeq {
lasttime = 0
}
}
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
// uuid. The time is only defined for version 1 and 2 UUIDs.
func (uuid UUID) Time() Time {
time := int64(binary.BigEndian.Uint32(uuid[0:4]))
time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
return Time(time)
}
// ClockSequence returns the clock sequence encoded in uuid.
// The clock sequence is only well defined for version 1 and 2 UUIDs.
func (uuid UUID) ClockSequence() int {
return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff
}

43
vendor/github.com/google/uuid/util.go generated vendored Normal file
View file

@ -0,0 +1,43 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"io"
)
// randomBits completely fills slice b with random data.
func randomBits(b []byte) {
if _, err := io.ReadFull(rander, b); err != nil {
panic(err.Error()) // rand should never fail
}
}
// xvalues returns the value of a byte as a hexadecimal digit or 255.
var xvalues = [256]byte{
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
}
// xtob converts hex characters x1 and x2 into a byte.
func xtob(x1, x2 byte) (byte, bool) {
b1 := xvalues[x1]
b2 := xvalues[x2]
return (b1 << 4) | b2, b1 != 255 && b2 != 255
}

294
vendor/github.com/google/uuid/uuid.go generated vendored Normal file
View file

@ -0,0 +1,294 @@
// Copyright 2018 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"bytes"
"crypto/rand"
"encoding/hex"
"errors"
"fmt"
"io"
"strings"
"sync"
)
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
// 4122.
type UUID [16]byte
// A Version represents a UUID's version.
type Version byte
// A Variant represents a UUID's variant.
type Variant byte
// Constants returned by Variant.
const (
Invalid = Variant(iota) // Invalid UUID
RFC4122 // The variant specified in RFC4122
Reserved // Reserved, NCS backward compatibility.
Microsoft // Reserved, Microsoft Corporation backward compatibility.
Future // Reserved for future definition.
)
const randPoolSize = 16 * 16
var (
rander = rand.Reader // random function
poolEnabled = false
poolMu sync.Mutex
poolPos = randPoolSize // protected with poolMu
pool [randPoolSize]byte // protected with poolMu
)
type invalidLengthError struct{ len int }
func (err invalidLengthError) Error() string {
return fmt.Sprintf("invalid UUID length: %d", err.len)
}
// IsInvalidLengthError is matcher function for custom error invalidLengthError
func IsInvalidLengthError(err error) bool {
_, ok := err.(invalidLengthError)
return ok
}
// Parse decodes s into a UUID or returns an error. Both the standard UUID
// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex
// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.
func Parse(s string) (UUID, error) {
var uuid UUID
switch len(s) {
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36:
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36 + 9:
if strings.ToLower(s[:9]) != "urn:uuid:" {
return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
}
s = s[9:]
// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
case 36 + 2:
s = s[1:]
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
case 32:
var ok bool
for i := range uuid {
uuid[i], ok = xtob(s[i*2], s[i*2+1])
if !ok {
return uuid, errors.New("invalid UUID format")
}
}
return uuid, nil
default:
return uuid, invalidLengthError{len(s)}
}
// s is now at least 36 bytes long
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
return uuid, errors.New("invalid UUID format")
}
for i, x := range [16]int{
0, 2, 4, 6,
9, 11,
14, 16,
19, 21,
24, 26, 28, 30, 32, 34} {
v, ok := xtob(s[x], s[x+1])
if !ok {
return uuid, errors.New("invalid UUID format")
}
uuid[i] = v
}
return uuid, nil
}
// ParseBytes is like Parse, except it parses a byte slice instead of a string.
func ParseBytes(b []byte) (UUID, error) {
var uuid UUID
switch len(b) {
case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) {
return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
}
b = b[9:]
case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
b = b[1:]
case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
var ok bool
for i := 0; i < 32; i += 2 {
uuid[i/2], ok = xtob(b[i], b[i+1])
if !ok {
return uuid, errors.New("invalid UUID format")
}
}
return uuid, nil
default:
return uuid, invalidLengthError{len(b)}
}
// s is now at least 36 bytes long
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' {
return uuid, errors.New("invalid UUID format")
}
for i, x := range [16]int{
0, 2, 4, 6,
9, 11,
14, 16,
19, 21,
24, 26, 28, 30, 32, 34} {
v, ok := xtob(b[x], b[x+1])
if !ok {
return uuid, errors.New("invalid UUID format")
}
uuid[i] = v
}
return uuid, nil
}
// MustParse is like Parse but panics if the string cannot be parsed.
// It simplifies safe initialization of global variables holding compiled UUIDs.
func MustParse(s string) UUID {
uuid, err := Parse(s)
if err != nil {
panic(`uuid: Parse(` + s + `): ` + err.Error())
}
return uuid
}
// FromBytes creates a new UUID from a byte slice. Returns an error if the slice
// does not have a length of 16. The bytes are copied from the slice.
func FromBytes(b []byte) (uuid UUID, err error) {
err = uuid.UnmarshalBinary(b)
return uuid, err
}
// Must returns uuid if err is nil and panics otherwise.
func Must(uuid UUID, err error) UUID {
if err != nil {
panic(err)
}
return uuid
}
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// , or "" if uuid is invalid.
func (uuid UUID) String() string {
var buf [36]byte
encodeHex(buf[:], uuid)
return string(buf[:])
}
// URN returns the RFC 2141 URN form of uuid,
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
func (uuid UUID) URN() string {
var buf [36 + 9]byte
copy(buf[:], "urn:uuid:")
encodeHex(buf[9:], uuid)
return string(buf[:])
}
func encodeHex(dst []byte, uuid UUID) {
hex.Encode(dst, uuid[:4])
dst[8] = '-'
hex.Encode(dst[9:13], uuid[4:6])
dst[13] = '-'
hex.Encode(dst[14:18], uuid[6:8])
dst[18] = '-'
hex.Encode(dst[19:23], uuid[8:10])
dst[23] = '-'
hex.Encode(dst[24:], uuid[10:])
}
// Variant returns the variant encoded in uuid.
func (uuid UUID) Variant() Variant {
switch {
case (uuid[8] & 0xc0) == 0x80:
return RFC4122
case (uuid[8] & 0xe0) == 0xc0:
return Microsoft
case (uuid[8] & 0xe0) == 0xe0:
return Future
default:
return Reserved
}
}
// Version returns the version of uuid.
func (uuid UUID) Version() Version {
return Version(uuid[6] >> 4)
}
func (v Version) String() string {
if v > 15 {
return fmt.Sprintf("BAD_VERSION_%d", v)
}
return fmt.Sprintf("VERSION_%d", v)
}
func (v Variant) String() string {
switch v {
case RFC4122:
return "RFC4122"
case Reserved:
return "Reserved"
case Microsoft:
return "Microsoft"
case Future:
return "Future"
case Invalid:
return "Invalid"
}
return fmt.Sprintf("BadVariant%d", int(v))
}
// SetRand sets the random number generator to r, which implements io.Reader.
// If r.Read returns an error when the package requests random data then
// a panic will be issued.
//
// Calling SetRand with nil sets the random number generator to the default
// generator.
func SetRand(r io.Reader) {
if r == nil {
rander = rand.Reader
return
}
rander = r
}
// EnableRandPool enables internal randomness pool used for Random
// (Version 4) UUID generation. The pool contains random bytes read from
// the random number generator on demand in batches. Enabling the pool
// may improve the UUID generation throughput significantly.
//
// Since the pool is stored on the Go heap, this feature may be a bad fit
// for security sensitive applications.
//
// Both EnableRandPool and DisableRandPool are not thread-safe and should
// only be called when there is no possibility that New or any other
// UUID Version 4 generation function will be called concurrently.
func EnableRandPool() {
poolEnabled = true
}
// DisableRandPool disables the randomness pool if it was previously
// enabled with EnableRandPool.
//
// Both EnableRandPool and DisableRandPool are not thread-safe and should
// only be called when there is no possibility that New or any other
// UUID Version 4 generation function will be called concurrently.
func DisableRandPool() {
poolEnabled = false
defer poolMu.Unlock()
poolMu.Lock()
poolPos = randPoolSize
}

44
vendor/github.com/google/uuid/version1.go generated vendored Normal file
View file

@ -0,0 +1,44 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
)
// NewUUID returns a Version 1 UUID based on the current NodeID and clock
// sequence, and the current time. If the NodeID has not been set by SetNodeID
// or SetNodeInterface then it will be set automatically. If the NodeID cannot
// be set NewUUID returns nil. If clock sequence has not been set by
// SetClockSequence then it will be set automatically. If GetTime fails to
// return the current NewUUID returns nil and an error.
//
// In most cases, New should be used.
func NewUUID() (UUID, error) {
var uuid UUID
now, seq, err := GetTime()
if err != nil {
return uuid, err
}
timeLow := uint32(now & 0xffffffff)
timeMid := uint16((now >> 32) & 0xffff)
timeHi := uint16((now >> 48) & 0x0fff)
timeHi |= 0x1000 // Version 1
binary.BigEndian.PutUint32(uuid[0:], timeLow)
binary.BigEndian.PutUint16(uuid[4:], timeMid)
binary.BigEndian.PutUint16(uuid[6:], timeHi)
binary.BigEndian.PutUint16(uuid[8:], seq)
nodeMu.Lock()
if nodeID == zeroID {
setNodeInterface("")
}
copy(uuid[10:], nodeID[:])
nodeMu.Unlock()
return uuid, nil
}

76
vendor/github.com/google/uuid/version4.go generated vendored Normal file
View file

@ -0,0 +1,76 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import "io"
// New creates a new random UUID or panics. New is equivalent to
// the expression
//
// uuid.Must(uuid.NewRandom())
func New() UUID {
return Must(NewRandom())
}
// NewString creates a new random UUID and returns it as a string or panics.
// NewString is equivalent to the expression
//
// uuid.New().String()
func NewString() string {
return Must(NewRandom()).String()
}
// NewRandom returns a Random (Version 4) UUID.
//
// The strength of the UUIDs is based on the strength of the crypto/rand
// package.
//
// Uses the randomness pool if it was enabled with EnableRandPool.
//
// A note about uniqueness derived from the UUID Wikipedia entry:
//
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
// hit by a meteorite is estimated to be one chance in 17 billion, that
// means the probability is about 0.00000000006 (6 × 1011),
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
// year and having one duplicate.
func NewRandom() (UUID, error) {
if !poolEnabled {
return NewRandomFromReader(rander)
}
return newRandomFromPool()
}
// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
func NewRandomFromReader(r io.Reader) (UUID, error) {
var uuid UUID
_, err := io.ReadFull(r, uuid[:])
if err != nil {
return Nil, err
}
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
return uuid, nil
}
func newRandomFromPool() (UUID, error) {
var uuid UUID
poolMu.Lock()
if poolPos == randPoolSize {
_, err := io.ReadFull(rander, pool[:])
if err != nil {
poolMu.Unlock()
return Nil, err
}
poolPos = 0
}
copy(uuid[:], pool[poolPos:(poolPos+16)])
poolPos += 16
poolMu.Unlock()
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
return uuid, nil
}

View file

@ -0,0 +1,3 @@
{
"v2": "2.4.0"
}

18
vendor/github.com/googleapis/gax-go/v2/CHANGES.md generated vendored Normal file
View file

@ -0,0 +1,18 @@
# Changelog
## [2.4.0](https://github.com/googleapis/gax-go/compare/v2.3.0...v2.4.0) (2022-05-09)
### Features
* **v2:** add OnHTTPCodes CallOption ([#188](https://github.com/googleapis/gax-go/issues/188)) ([ba7c534](https://github.com/googleapis/gax-go/commit/ba7c5348363ab6c33e1cee3c03c0be68a46ca07c))
### Bug Fixes
* **v2/apierror:** use errors.As in FromError ([#189](https://github.com/googleapis/gax-go/issues/189)) ([f30f05b](https://github.com/googleapis/gax-go/commit/f30f05be583828f4c09cca4091333ea88ff8d79e))
### Miscellaneous Chores
* **v2:** bump release-please processing ([#192](https://github.com/googleapis/gax-go/issues/192)) ([56172f9](https://github.com/googleapis/gax-go/commit/56172f971d1141d7687edaac053ad3470af76719))

View file

@ -32,6 +32,7 @@
package apierror
import (
"errors"
"fmt"
"strings"
@ -215,7 +216,8 @@ func FromError(err error) (*APIError, bool) {
ae := APIError{err: err}
st, isStatus := status.FromError(err)
herr, isHTTPErr := err.(*googleapi.Error)
var herr *googleapi.Error
isHTTPErr := errors.As(err, &herr)
switch {
case isStatus:

View file

@ -30,9 +30,11 @@
package gax
import (
"errors"
"math/rand"
"time"
"google.golang.org/api/googleapi"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@ -119,6 +121,41 @@ func (r *boRetryer) Retry(err error) (time.Duration, bool) {
return 0, false
}
// OnHTTPCodes returns a Retryer that retries if and only if
// the previous attempt returns a googleapi.Error whose status code is stored in
// cc. Pause times between retries are specified by bo.
//
// bo is only used for its parameters; each Retryer has its own copy.
func OnHTTPCodes(bo Backoff, cc ...int) Retryer {
codes := make(map[int]bool, len(cc))
for _, c := range cc {
codes[c] = true
}
return &httpRetryer{
backoff: bo,
codes: codes,
}
}
type httpRetryer struct {
backoff Backoff
codes map[int]bool
}
func (r *httpRetryer) Retry(err error) (time.Duration, bool) {
var gerr *googleapi.Error
if !errors.As(err, &gerr) {
return 0, false
}
if r.codes[gerr.Code] {
return r.backoff.Pause(), true
}
return 0, false
}
// Backoff implements exponential backoff. The wait time between retries is a
// random value between 0 and the "retry period" - the time between retries. The
// retry period starts at Initial and increases by the factor of Multiplier

View file

@ -35,5 +35,7 @@
// to simplify code generation and to provide more convenient and idiomatic API surfaces.
package gax
import "github.com/googleapis/gax-go/v2/internal"
// Version specifies the gax-go version being used.
const Version = "2.3.0"
const Version = internal.Version

View file

@ -0,0 +1,33 @@
// Copyright 2022, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package internal
// Version is the current tagged release of the library.
const Version = "2.4.0"

View file

@ -0,0 +1,10 @@
{
"release-type": "go-yoshi",
"separate-pull-requests": true,
"include-component-in-tag": false,
"packages": {
"v2": {
"component": "v2"
}
}
}

View file

@ -17,6 +17,10 @@ This package provides various compression algorithms.
# changelog
* May 5, 2022 (v1.15.3)
* zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572)
* s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575)
* Apr 26, 2022 (v1.15.2)
* zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537)
* zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539)

View file

@ -24,7 +24,7 @@ func (f *decompressor) huffmanBytesBuffer() {
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
fnb, fb := f.nb, f.b
fnb, fb, dict := f.nb, f.b, &f.dict
switch f.stepState {
case stateInit:
@ -82,9 +82,9 @@ readLiteral:
var length int
switch {
case v < 256:
f.dict.writeByte(byte(v))
if f.dict.availWrite() == 0 {
f.toRead = f.dict.readFlush()
dict.writeByte(byte(v))
if dict.availWrite() == 0 {
f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanBytesBuffer
f.stepState = stateInit
f.b, f.nb = fb, fnb
@ -227,10 +227,10 @@ readLiteral:
}
// No check on length; encoding can be prescient.
if dist > uint32(f.dict.histSize()) {
if dist > uint32(dict.histSize()) {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
fmt.Println("dist > dict.histSize():", dist, dict.histSize())
}
f.err = CorruptInputError(f.roffset)
return
@ -243,14 +243,14 @@ readLiteral:
copyHistory:
// Perform a backwards copy according to RFC section 3.2.3.
{
cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
if cnt == 0 {
cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
cnt = dict.writeCopy(f.copyDist, f.copyLen)
}
f.copyLen -= cnt
if f.dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = f.dict.readFlush()
if dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work
f.stepState = stateDict
f.b, f.nb = fb, fnb
@ -275,7 +275,7 @@ func (f *decompressor) huffmanBytesReader() {
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
fnb, fb := f.nb, f.b
fnb, fb, dict := f.nb, f.b, &f.dict
switch f.stepState {
case stateInit:
@ -333,9 +333,9 @@ readLiteral:
var length int
switch {
case v < 256:
f.dict.writeByte(byte(v))
if f.dict.availWrite() == 0 {
f.toRead = f.dict.readFlush()
dict.writeByte(byte(v))
if dict.availWrite() == 0 {
f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanBytesReader
f.stepState = stateInit
f.b, f.nb = fb, fnb
@ -478,10 +478,10 @@ readLiteral:
}
// No check on length; encoding can be prescient.
if dist > uint32(f.dict.histSize()) {
if dist > uint32(dict.histSize()) {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
fmt.Println("dist > dict.histSize():", dist, dict.histSize())
}
f.err = CorruptInputError(f.roffset)
return
@ -494,14 +494,14 @@ readLiteral:
copyHistory:
// Perform a backwards copy according to RFC section 3.2.3.
{
cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
if cnt == 0 {
cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
cnt = dict.writeCopy(f.copyDist, f.copyLen)
}
f.copyLen -= cnt
if f.dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = f.dict.readFlush()
if dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanBytesReader // We need to continue this work
f.stepState = stateDict
f.b, f.nb = fb, fnb
@ -526,7 +526,7 @@ func (f *decompressor) huffmanBufioReader() {
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
fnb, fb := f.nb, f.b
fnb, fb, dict := f.nb, f.b, &f.dict
switch f.stepState {
case stateInit:
@ -584,9 +584,9 @@ readLiteral:
var length int
switch {
case v < 256:
f.dict.writeByte(byte(v))
if f.dict.availWrite() == 0 {
f.toRead = f.dict.readFlush()
dict.writeByte(byte(v))
if dict.availWrite() == 0 {
f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanBufioReader
f.stepState = stateInit
f.b, f.nb = fb, fnb
@ -729,10 +729,10 @@ readLiteral:
}
// No check on length; encoding can be prescient.
if dist > uint32(f.dict.histSize()) {
if dist > uint32(dict.histSize()) {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
fmt.Println("dist > dict.histSize():", dist, dict.histSize())
}
f.err = CorruptInputError(f.roffset)
return
@ -745,14 +745,14 @@ readLiteral:
copyHistory:
// Perform a backwards copy according to RFC section 3.2.3.
{
cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
if cnt == 0 {
cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
cnt = dict.writeCopy(f.copyDist, f.copyLen)
}
f.copyLen -= cnt
if f.dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = f.dict.readFlush()
if dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanBufioReader // We need to continue this work
f.stepState = stateDict
f.b, f.nb = fb, fnb
@ -777,7 +777,7 @@ func (f *decompressor) huffmanStringsReader() {
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
fnb, fb := f.nb, f.b
fnb, fb, dict := f.nb, f.b, &f.dict
switch f.stepState {
case stateInit:
@ -835,9 +835,9 @@ readLiteral:
var length int
switch {
case v < 256:
f.dict.writeByte(byte(v))
if f.dict.availWrite() == 0 {
f.toRead = f.dict.readFlush()
dict.writeByte(byte(v))
if dict.availWrite() == 0 {
f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanStringsReader
f.stepState = stateInit
f.b, f.nb = fb, fnb
@ -980,10 +980,10 @@ readLiteral:
}
// No check on length; encoding can be prescient.
if dist > uint32(f.dict.histSize()) {
if dist > uint32(dict.histSize()) {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
fmt.Println("dist > dict.histSize():", dist, dict.histSize())
}
f.err = CorruptInputError(f.roffset)
return
@ -996,14 +996,14 @@ readLiteral:
copyHistory:
// Perform a backwards copy according to RFC section 3.2.3.
{
cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
if cnt == 0 {
cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
cnt = dict.writeCopy(f.copyDist, f.copyLen)
}
f.copyLen -= cnt
if f.dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = f.dict.readFlush()
if dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanStringsReader // We need to continue this work
f.stepState = stateDict
f.b, f.nb = fb, fnb
@ -1028,7 +1028,7 @@ func (f *decompressor) huffmanGenericReader() {
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
fnb, fb := f.nb, f.b
fnb, fb, dict := f.nb, f.b, &f.dict
switch f.stepState {
case stateInit:
@ -1086,9 +1086,9 @@ readLiteral:
var length int
switch {
case v < 256:
f.dict.writeByte(byte(v))
if f.dict.availWrite() == 0 {
f.toRead = f.dict.readFlush()
dict.writeByte(byte(v))
if dict.availWrite() == 0 {
f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanGenericReader
f.stepState = stateInit
f.b, f.nb = fb, fnb
@ -1231,10 +1231,10 @@ readLiteral:
}
// No check on length; encoding can be prescient.
if dist > uint32(f.dict.histSize()) {
if dist > uint32(dict.histSize()) {
f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
fmt.Println("dist > dict.histSize():", dist, dict.histSize())
}
f.err = CorruptInputError(f.roffset)
return
@ -1247,14 +1247,14 @@ readLiteral:
copyHistory:
// Perform a backwards copy according to RFC section 3.2.3.
{
cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
if cnt == 0 {
cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
cnt = dict.writeCopy(f.copyDist, f.copyLen)
}
f.copyLen -= cnt
if f.dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = f.dict.readFlush()
if dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanGenericReader // We need to continue this work
f.stepState = stateDict
f.b, f.nb = fb, fnb

View file

@ -1,5 +0,0 @@
package huff0
//go:generate go run generate.go
//go:generate asmfmt -w decompress_amd64.s
//go:generate asmfmt -w decompress_8b_amd64.s

View file

@ -1,488 +0,0 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
#define off R8
#define buffer DI
#define table SI
#define br_bits_read R9
#define br_value R10
#define br_offset R11
#define peek_bits R12
#define exhausted DX
#define br0 R13
#define br1 R14
#define br2 R15
#define br3 BP
MOVQ BP, 0(SP)
XORQ exhausted, exhausted // exhausted = false
XORQ off, off // off = 0
MOVBQZX peekBits+32(FP), peek_bits
MOVQ buf+40(FP), buffer
MOVQ tbl+48(FP), table
MOVQ pbr0+0(FP), br0
MOVQ pbr1+8(FP), br1
MOVQ pbr2+16(FP), br2
MOVQ pbr3+24(FP), br3
main_loop:
// const stream = 0
// br0.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read
MOVQ bitReaderShifted_value(br0), br_value
MOVQ bitReaderShifted_off(br0), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill0
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br0), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br0.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill0:
// val0 := br0.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br0.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br0.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br0.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 0(buffer)(off*1)
// SECOND PART:
// val2 := br0.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br0.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br0.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br0.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 0+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br0)
MOVQ br_value, bitReaderShifted_value(br0)
MOVQ br_offset, bitReaderShifted_off(br0)
// const stream = 1
// br1.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read
MOVQ bitReaderShifted_value(br1), br_value
MOVQ bitReaderShifted_off(br1), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill1
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br1), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br1.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill1:
// val0 := br1.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br1.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br1.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br1.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 256(buffer)(off*1)
// SECOND PART:
// val2 := br1.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br1.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br1.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br1.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 256+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br1)
MOVQ br_value, bitReaderShifted_value(br1)
MOVQ br_offset, bitReaderShifted_off(br1)
// const stream = 2
// br2.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read
MOVQ bitReaderShifted_value(br2), br_value
MOVQ bitReaderShifted_off(br2), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill2
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br2), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br2.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill2:
// val0 := br2.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br2.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br2.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br2.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 512(buffer)(off*1)
// SECOND PART:
// val2 := br2.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br2.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br2.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br2.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 512+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br2)
MOVQ br_value, bitReaderShifted_value(br2)
MOVQ br_offset, bitReaderShifted_off(br2)
// const stream = 3
// br3.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read
MOVQ bitReaderShifted_value(br3), br_value
MOVQ bitReaderShifted_off(br3), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill3
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br3), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br3.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill3:
// val0 := br3.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br3.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br3.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br3.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 768(buffer)(off*1)
// SECOND PART:
// val2 := br3.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br3.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br3.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br3.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 768+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br3)
MOVQ br_value, bitReaderShifted_value(br3)
MOVQ br_offset, bitReaderShifted_off(br3)
ADDQ $4, off // off += 2
TESTB DH, DH // any br[i].ofs < 4?
JNZ end
CMPQ off, $bufoff
JL main_loop
end:
MOVQ 0(SP), BP
MOVB off, ret+56(FP)
RET
#undef off
#undef buffer
#undef table
#undef br_bits_read
#undef br_value
#undef br_offset
#undef peek_bits
#undef exhausted
#undef br0
#undef br1
#undef br2
#undef br3

View file

@ -1,197 +0,0 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
#define off R8
#define buffer DI
#define table SI
#define br_bits_read R9
#define br_value R10
#define br_offset R11
#define peek_bits R12
#define exhausted DX
#define br0 R13
#define br1 R14
#define br2 R15
#define br3 BP
MOVQ BP, 0(SP)
XORQ exhausted, exhausted // exhausted = false
XORQ off, off // off = 0
MOVBQZX peekBits+32(FP), peek_bits
MOVQ buf+40(FP), buffer
MOVQ tbl+48(FP), table
MOVQ pbr0+0(FP), br0
MOVQ pbr1+8(FP), br1
MOVQ pbr2+16(FP), br2
MOVQ pbr3+24(FP), br3
main_loop:
{{ define "decode_2_values_x86" }}
// const stream = {{ var "id" }}
// br{{ var "id"}}.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill{{ var "id" }}
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br{{ var "id"}}.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill{{ var "id" }}:
// val0 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br{{ var "id"}}.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br{{ var "id"}}.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
// SECOND PART:
// val2 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br{{ var "id"}}.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br{{ var "id"}}.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, {{ var "bufofs" }}+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
{{ end }}
{{ set "id" "0" }}
{{ set "ofs" "0" }}
{{ set "bufofs" "0" }} {{/* id * bufoff */}}
{{ template "decode_2_values_x86" . }}
{{ set "id" "1" }}
{{ set "ofs" "8" }}
{{ set "bufofs" "256" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "2" }}
{{ set "ofs" "16" }}
{{ set "bufofs" "512" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "3" }}
{{ set "ofs" "24" }}
{{ set "bufofs" "768" }}
{{ template "decode_2_values_x86" . }}
ADDQ $4, off // off += 2
TESTB DH, DH // any br[i].ofs < 4?
JNZ end
CMPQ off, $bufoff
JL main_loop
end:
MOVQ 0(SP), BP
MOVB off, ret+56(FP)
RET
#undef off
#undef buffer
#undef table
#undef br_bits_read
#undef br_value
#undef br_offset
#undef peek_bits
#undef exhausted
#undef br0
#undef br1
#undef br2
#undef br3

View file

@ -13,19 +13,30 @@ import (
// decompress4x_main_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog > 8.
//go:noescape
func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
func decompress4x_main_loop_amd64(ctx *decompress4xContext)
// decompress4x_8b_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog <= 8 which decodes 4 entries
// per loop.
//go:noescape
func decompress4x_8b_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
// fallback8BitSize is the size where using Go version is faster.
const fallback8BitSize = 800
type decompress4xContext struct {
pbr0 *bitReaderShifted
pbr1 *bitReaderShifted
pbr2 *bitReaderShifted
pbr3 *bitReaderShifted
peekBits uint8
out *byte
dstEvery int
tbl *dEntrySingle
decoded int
limit *byte
}
// Decompress4X will decompress a 4X encoded stream.
// The length of the supplied input must match the end of a block exactly.
// The *capacity* of the dst slice must match the destination size of
@ -42,6 +53,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
if cap(dst) < fallback8BitSize && use8BitTables {
return d.decompress4X8bit(dst, src)
}
var br [4]bitReaderShifted
// Decode "jump table"
start := 6
@ -71,70 +83,28 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
const tlMask = tlSize - 1
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
buf := d.buffer()
var off uint8
var decoded int
const debug = false
// see: bitReaderShifted.peekBitsFast()
peekBits := uint8((64 - d.actualTableLog) & 63)
// Decode 2 values from each decoder/loop.
const bufoff = 256
for {
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
break
if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) {
ctx := decompress4xContext{
pbr0: &br[0],
pbr1: &br[1],
pbr2: &br[2],
pbr3: &br[3],
peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
out: &out[0],
dstEvery: dstEvery,
tbl: &single[0],
limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last.
}
if use8BitTables {
off = decompress4x_8b_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
decompress4x_8b_main_loop_amd64(&ctx)
} else {
off = decompress4x_main_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
}
if debug {
fmt.Print("DEBUG: ")
fmt.Printf("off=%d,", off)
for i := 0; i < 4; i++ {
fmt.Printf(" br[%d]={bitsRead=%d, value=%x, off=%d}",
i, br[i].bitsRead, br[i].value, br[i].off)
}
fmt.Println("")
decompress4x_main_loop_amd64(&ctx)
}
if off != 0 {
break
}
if bufoff > dstEvery {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
copy(out, buf[0][:])
copy(out[dstEvery:], buf[1][:])
copy(out[dstEvery*2:], buf[2][:])
copy(out[dstEvery*3:], buf[3][:])
out = out[bufoff:]
decoded += bufoff * 4
// There must at least be 3 buffers left.
if len(out) < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
}
if off > 0 {
ioff := int(off)
if len(out) < dstEvery*3+ioff {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 3")
}
copy(out, buf[0][:off])
copy(out[dstEvery:], buf[1][:off])
copy(out[dstEvery*2:], buf[2][:off])
copy(out[dstEvery*3:], buf[3][:off])
decoded += int(off) * 4
out = out[off:]
decoded = ctx.decoded
out = out[decoded/4:]
}
// Decode remaining.
@ -150,7 +120,6 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
for bitsLeft > 0 {
br.fill()
if offset >= endsAt {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 4")
}
@ -164,7 +133,6 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
offset++
}
if offset != endsAt {
d.bufs.Put(buf)
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
}
decoded += offset - dstEvery*i
@ -173,7 +141,6 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
return nil, err
}
}
d.bufs.Put(buf)
if dstSize != decoded {
return nil, errors.New("corruption detected: short output block")
}

File diff suppressed because it is too large Load diff

View file

@ -1,195 +0,0 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
#ifdef GOAMD64_v4
#ifndef GOAMD64_v3
#define GOAMD64_v3
#endif
#endif
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8
#define off R8
#define buffer DI
#define table SI
#define br_bits_read R9
#define br_value R10
#define br_offset R11
#define peek_bits R12
#define exhausted DX
#define br0 R13
#define br1 R14
#define br2 R15
#define br3 BP
MOVQ BP, 0(SP)
XORQ exhausted, exhausted // exhausted = false
XORQ off, off // off = 0
MOVBQZX peekBits+32(FP), peek_bits
MOVQ buf+40(FP), buffer
MOVQ tbl+48(FP), table
MOVQ pbr0+0(FP), br0
MOVQ pbr1+8(FP), br1
MOVQ pbr2+16(FP), br2
MOVQ pbr3+24(FP), br3
main_loop:
{{ define "decode_2_values_x86" }}
// const stream = {{ var "id" }}
// br{{ var "id"}}.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
// We must have at least 2 * max tablelog left
CMPQ br_bits_read, $64-22
JBE skip_fill{{ var "id" }}
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
// b.value |= uint64(low) << (b.bitsRead & 63)
#ifdef GOAMD64_v3
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
#else
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
MOVQ br_bits_read, CX
SHLQ CL, AX
#endif
ORQ AX, br_value
// exhausted = exhausted || (br{{ var "id"}}.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill{{ var "id" }}:
// val0 := br{{ var "id"}}.peekTopBits(peekBits)
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br{{ var "id"}}.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
// val1 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br{{ var "id"}}.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
{{ end }}
{{ set "id" "0" }}
{{ set "ofs" "0" }}
{{ set "bufofs" "0" }} {{/* id * bufoff */}}
{{ template "decode_2_values_x86" . }}
{{ set "id" "1" }}
{{ set "ofs" "8" }}
{{ set "bufofs" "256" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "2" }}
{{ set "ofs" "16" }}
{{ set "bufofs" "512" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "3" }}
{{ set "ofs" "24" }}
{{ set "bufofs" "768" }}
{{ template "decode_2_values_x86" . }}
ADDQ $2, off // off += 2
TESTB DH, DH // any br[i].ofs < 4?
JNZ end
CMPQ off, $bufoff
JL main_loop
end:
MOVQ 0(SP), BP
MOVB off, ret+56(FP)
RET
#undef off
#undef buffer
#undef table
#undef br_bits_read
#undef br_value
#undef br_offset
#undef peek_bits
#undef exhausted
#undef br0
#undef br1
#undef br2
#undef br3

View file

@ -451,7 +451,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
got := d.current.crc.Sum64()
var tmp [4]byte
binary.LittleEndian.PutUint32(tmp[:], uint32(got))
if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) && !ignoreCRC {
if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) {
if debugDecoder {
println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)")
}
@ -534,10 +534,16 @@ func (d *Decoder) nextBlockSync() (ok bool) {
}
// Update/Check CRC
if !d.o.ignoreChecksum && d.frame.HasCheckSum {
d.frame.crc.Write(d.current.b)
if d.frame.HasCheckSum {
if !d.o.ignoreChecksum {
d.frame.crc.Write(d.current.b)
}
if d.current.d.Last {
d.current.err = d.frame.checkCRC()
if !d.o.ignoreChecksum {
d.current.err = d.frame.checkCRC()
} else {
d.current.err = d.frame.consumeCRC()
}
if d.current.err != nil {
println("CRC error:", d.current.err)
return false

View file

@ -310,7 +310,7 @@ func (d *frameDec) checkCRC() error {
tmp[2] = byte(got >> 16)
tmp[3] = byte(got >> 24)
if !bytes.Equal(tmp[:], want) && !ignoreCRC {
if !bytes.Equal(tmp[:], want) {
if debugDecoder {
println("CRC Check Failed:", tmp[:], "!=", want)
}

View file

@ -1,11 +0,0 @@
//go:build ignorecrc
// +build ignorecrc
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
// ignoreCRC can be used for fuzz testing to ignore CRC values...
const ignoreCRC = true

View file

@ -1,11 +0,0 @@
//go:build !ignorecrc
// +build !ignorecrc
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
// ignoreCRC can be used for fuzz testing to ignore CRC values...
const ignoreCRC = false

View file

@ -1326,30 +1326,30 @@ copy_match:
JA copy_overlapping_match
// Copy non-overlapping match
XORQ R12, R12
ADDQ R13, DI
MOVQ BX, R12
ADDQ R13, BX
copy_2:
MOVUPS (R11)(R12*1), X0
MOVUPS X0, (BX)(R12*1)
MOVUPS (R11), X0
MOVUPS X0, (R12)
ADDQ $0x10, R11
ADDQ $0x10, R12
CMPQ R12, R13
JB copy_2
ADDQ R13, BX
ADDQ R13, DI
SUBQ $0x10, R13
JHI copy_2
JMP handle_loop
// Copy overlapping match
copy_overlapping_match:
XORQ R12, R12
ADDQ R13, DI
copy_slow_3:
MOVB (R11)(R12*1), R14
MOVB R14, (BX)(R12*1)
INCQ R12
CMPQ R12, R13
JB copy_slow_3
ADDQ R13, BX
ADDQ R13, DI
MOVB (R11), R12
MOVB R12, (BX)
INCQ R11
INCQ BX
DECQ R13
JNZ copy_slow_3
handle_loop:
ADDQ $0x18, AX
@ -1826,30 +1826,30 @@ copy_match:
JA copy_overlapping_match
// Copy non-overlapping match
XORQ CX, CX
ADDQ R13, R12
MOVQ R10, CX
ADDQ R13, R10
copy_2:
MOVUPS (AX)(CX*1), X0
MOVUPS X0, (R10)(CX*1)
MOVUPS (AX), X0
MOVUPS X0, (CX)
ADDQ $0x10, AX
ADDQ $0x10, CX
CMPQ CX, R13
JB copy_2
ADDQ R13, R10
ADDQ R13, R12
SUBQ $0x10, R13
JHI copy_2
JMP handle_loop
// Copy overlapping match
copy_overlapping_match:
XORQ CX, CX
ADDQ R13, R12
copy_slow_3:
MOVB (AX)(CX*1), R14
MOVB R14, (R10)(CX*1)
INCQ CX
CMPQ CX, R13
JB copy_slow_3
ADDQ R13, R10
ADDQ R13, R12
MOVB (AX), CL
MOVB CL, (R10)
INCQ AX
INCQ R10
DECQ R13
JNZ copy_slow_3
handle_loop:
MOVQ ctx+16(FP), AX
@ -2333,30 +2333,30 @@ copy_match:
JA copy_overlapping_match
// Copy non-overlapping match
XORQ R12, R12
ADDQ R13, R11
MOVQ R9, R12
ADDQ R13, R9
copy_2:
MOVUPS (CX)(R12*1), X0
MOVUPS X0, (R9)(R12*1)
MOVUPS (CX), X0
MOVUPS X0, (R12)
ADDQ $0x10, CX
ADDQ $0x10, R12
CMPQ R12, R13
JB copy_2
ADDQ R13, R9
ADDQ R13, R11
SUBQ $0x10, R13
JHI copy_2
JMP handle_loop
// Copy overlapping match
copy_overlapping_match:
XORQ R12, R12
ADDQ R13, R11
copy_slow_3:
MOVB (CX)(R12*1), R14
MOVB R14, (R9)(R12*1)
INCQ R12
CMPQ R12, R13
JB copy_slow_3
ADDQ R13, R9
ADDQ R13, R11
MOVB (CX), R12
MOVB R12, (R9)
INCQ CX
INCQ R9
DECQ R13
JNZ copy_slow_3
handle_loop:
MOVQ ctx+16(FP), CX
@ -2862,6 +2862,7 @@ copy_match:
JA copy_overlapping_match
// Copy non-overlapping match
ADDQ R13, R12
XORQ CX, CX
TESTQ $0x00000001, R13
JZ copy_2_word
@ -2900,21 +2901,19 @@ copy_2_test:
CMPQ CX, R13
JB copy_2
ADDQ R13, R10
ADDQ R13, R12
JMP handle_loop
// Copy overlapping match
copy_overlapping_match:
XORQ CX, CX
ADDQ R13, R12
copy_slow_3:
MOVB (AX)(CX*1), R14
MOVB R14, (R10)(CX*1)
INCQ CX
CMPQ CX, R13
JB copy_slow_3
ADDQ R13, R10
ADDQ R13, R12
MOVB (AX), CL
MOVB CL, (R10)
INCQ AX
INCQ R10
DECQ R13
JNZ copy_slow_3
handle_loop:
MOVQ ctx+16(FP), AX
@ -3398,6 +3397,7 @@ copy_match:
JA copy_overlapping_match
// Copy non-overlapping match
ADDQ R13, R11
XORQ R12, R12
TESTQ $0x00000001, R13
JZ copy_2_word
@ -3436,21 +3436,19 @@ copy_2_test:
CMPQ R12, R13
JB copy_2
ADDQ R13, R9
ADDQ R13, R11
JMP handle_loop
// Copy overlapping match
copy_overlapping_match:
XORQ R12, R12
ADDQ R13, R11
copy_slow_3:
MOVB (CX)(R12*1), R14
MOVB R14, (R9)(R12*1)
INCQ R12
CMPQ R12, R13
JB copy_slow_3
ADDQ R13, R9
ADDQ R13, R11
MOVB (CX), R12
MOVB R12, (R9)
INCQ CX
INCQ R9
DECQ R13
JNZ copy_slow_3
handle_loop:
MOVQ ctx+16(FP), CX

View file

@ -197,14 +197,6 @@ func goRuntimeMemStats() memStatsMetrics {
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("gc_cpu_fraction"),
"The fraction of this program's available CPU time used by the GC since the program started.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
valType: GaugeValue,
},
}
}
@ -268,7 +260,6 @@ func (c *baseGoCollector) Collect(ch chan<- Metric) {
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9)
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
}
@ -278,6 +269,7 @@ func memstatNamespace(s string) string {
// memStatsMetrics provide description, evaluator, runtime/metrics name, and
// value type for memstat metrics.
// TODO(bwplotka): Remove with end Go 1.16 EOL and replace with runtime/metrics.Description
type memStatsMetrics []struct {
desc *Desc
eval func(*runtime.MemStats) float64

View file

@ -40,13 +40,28 @@ type goCollector struct {
//
// Deprecated: Use collectors.NewGoCollector instead.
func NewGoCollector() Collector {
msMetrics := goRuntimeMemStats()
msMetrics = append(msMetrics, struct {
desc *Desc
eval func(*runtime.MemStats) float64
valType ValueType
}{
// This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
desc: NewDesc(
memstatNamespace("gc_cpu_fraction"),
"The fraction of this program's available CPU time used by the GC since the program started.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
valType: GaugeValue,
})
return &goCollector{
base: newBaseGoCollector(),
msLast: &runtime.MemStats{},
msRead: runtime.ReadMemStats,
msMaxWait: time.Second,
msMaxAge: 5 * time.Minute,
msMetrics: goRuntimeMemStats(),
msMetrics: msMetrics,
}
}

View file

@ -25,11 +25,71 @@ import (
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/client_golang/prometheus/internal"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/client_golang/prometheus/internal"
)
const (
goGCHeapTinyAllocsObjects = "/gc/heap/tiny/allocs:objects"
goGCHeapAllocsObjects = "/gc/heap/allocs:objects"
goGCHeapFreesObjects = "/gc/heap/frees:objects"
goGCHeapAllocsBytes = "/gc/heap/allocs:bytes"
goGCHeapObjects = "/gc/heap/objects:objects"
goGCHeapGoalBytes = "/gc/heap/goal:bytes"
goMemoryClassesTotalBytes = "/memory/classes/total:bytes"
goMemoryClassesHeapObjectsBytes = "/memory/classes/heap/objects:bytes"
goMemoryClassesHeapUnusedBytes = "/memory/classes/heap/unused:bytes"
goMemoryClassesHeapReleasedBytes = "/memory/classes/heap/released:bytes"
goMemoryClassesHeapFreeBytes = "/memory/classes/heap/free:bytes"
goMemoryClassesHeapStacksBytes = "/memory/classes/heap/stacks:bytes"
goMemoryClassesOSStacksBytes = "/memory/classes/os-stacks:bytes"
goMemoryClassesMetadataMSpanInuseBytes = "/memory/classes/metadata/mspan/inuse:bytes"
goMemoryClassesMetadataMSPanFreeBytes = "/memory/classes/metadata/mspan/free:bytes"
goMemoryClassesMetadataMCacheInuseBytes = "/memory/classes/metadata/mcache/inuse:bytes"
goMemoryClassesMetadataMCacheFreeBytes = "/memory/classes/metadata/mcache/free:bytes"
goMemoryClassesProfilingBucketsBytes = "/memory/classes/profiling/buckets:bytes"
goMemoryClassesMetadataOtherBytes = "/memory/classes/metadata/other:bytes"
goMemoryClassesOtherBytes = "/memory/classes/other:bytes"
)
// runtime/metrics names required for runtimeMemStats like logic.
var rmForMemStats = []string{goGCHeapTinyAllocsObjects,
goGCHeapAllocsObjects,
goGCHeapFreesObjects,
goGCHeapAllocsBytes,
goGCHeapObjects,
goGCHeapGoalBytes,
goMemoryClassesTotalBytes,
goMemoryClassesHeapObjectsBytes,
goMemoryClassesHeapUnusedBytes,
goMemoryClassesHeapReleasedBytes,
goMemoryClassesHeapFreeBytes,
goMemoryClassesHeapStacksBytes,
goMemoryClassesOSStacksBytes,
goMemoryClassesMetadataMSpanInuseBytes,
goMemoryClassesMetadataMSPanFreeBytes,
goMemoryClassesMetadataMCacheInuseBytes,
goMemoryClassesMetadataMCacheFreeBytes,
goMemoryClassesProfilingBucketsBytes,
goMemoryClassesMetadataOtherBytes,
goMemoryClassesOtherBytes,
}
func bestEffortLookupRM(lookup []string) []metrics.Description {
ret := make([]metrics.Description, 0, len(lookup))
for _, rm := range metrics.All() {
for _, m := range lookup {
if m == rm.Name {
ret = append(ret, rm)
}
}
}
return ret
}
type goCollector struct {
opt GoCollectorOptions
base baseGoCollector
// mu protects updates to all fields ensuring a consistent
@ -51,12 +111,46 @@ type goCollector struct {
msMetrics memStatsMetrics
}
const (
// Those are not exposed due to need to move Go collector to another package in v2.
// See issue https://github.com/prometheus/client_golang/issues/1030.
goRuntimeMemStatsCollection uint32 = 1 << iota
goRuntimeMetricsCollection
)
// GoCollectorOptions should not be used be directly by anything, except `collectors` package.
// Use it via collectors package instead. See issue
// https://github.com/prometheus/client_golang/issues/1030.
//
// Deprecated: Use collectors.WithGoCollections
type GoCollectorOptions struct {
// EnabledCollection sets what type of collections collector should expose on top of base collection.
// By default it's goMemStatsCollection | goRuntimeMetricsCollection.
EnabledCollections uint32
}
func (c GoCollectorOptions) isEnabled(flag uint32) bool {
return c.EnabledCollections&flag != 0
}
const defaultGoCollections = goRuntimeMemStatsCollection
// NewGoCollector is the obsolete version of collectors.NewGoCollector.
// See there for documentation.
//
// Deprecated: Use collectors.NewGoCollector instead.
func NewGoCollector() Collector {
descriptions := metrics.All()
func NewGoCollector(opts ...func(o *GoCollectorOptions)) Collector {
opt := GoCollectorOptions{EnabledCollections: defaultGoCollections}
for _, o := range opts {
o(&opt)
}
var descriptions []metrics.Description
if opt.isEnabled(goRuntimeMetricsCollection) {
descriptions = metrics.All()
} else if opt.isEnabled(goRuntimeMemStatsCollection) {
descriptions = bestEffortLookupRM(rmForMemStats)
}
// Collect all histogram samples so that we can get their buckets.
// The API guarantees that the buckets are always fixed for the lifetime
@ -67,7 +161,11 @@ func NewGoCollector() Collector {
histograms = append(histograms, metrics.Sample{Name: d.Name})
}
}
metrics.Read(histograms)
if len(histograms) > 0 {
metrics.Read(histograms)
}
bucketsMap := make(map[string][]float64)
for i := range histograms {
bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets
@ -83,7 +181,7 @@ func NewGoCollector() Collector {
if !ok {
// Just ignore this metric; we can't do anything with it here.
// If a user decides to use the latest version of Go, we don't want
// to fail here. This condition is tested elsewhere.
// to fail here. This condition is tested in TestExpectedRuntimeMetrics.
continue
}
@ -123,12 +221,18 @@ func NewGoCollector() Collector {
}
metricSet = append(metricSet, m)
}
var msMetrics memStatsMetrics
if opt.isEnabled(goRuntimeMemStatsCollection) {
msMetrics = goRuntimeMemStats()
}
return &goCollector{
opt: opt,
base: newBaseGoCollector(),
rmSampleBuf: sampleBuf,
rmSampleMap: sampleMap,
rmMetrics: metricSet,
msMetrics: goRuntimeMemStats(),
msMetrics: msMetrics,
}
}
@ -163,40 +267,47 @@ func (c *goCollector) Collect(ch chan<- Metric) {
c.mu.Lock()
defer c.mu.Unlock()
// Populate runtime/metrics sample buffer.
metrics.Read(c.rmSampleBuf)
if len(c.rmSampleBuf) > 0 {
// Populate runtime/metrics sample buffer.
metrics.Read(c.rmSampleBuf)
}
// Update all our metrics from rmSampleBuf.
for i, sample := range c.rmSampleBuf {
// N.B. switch on concrete type because it's significantly more efficient
// than checking for the Counter and Gauge interface implementations. In
// this case, we control all the types here.
switch m := c.rmMetrics[i].(type) {
case *counter:
// Guard against decreases. This should never happen, but a failure
// to do so will result in a panic, which is a harsh consequence for
// a metrics collection bug.
v0, v1 := m.get(), unwrapScalarRMValue(sample.Value)
if v1 > v0 {
m.Add(unwrapScalarRMValue(sample.Value) - m.get())
if c.opt.isEnabled(goRuntimeMetricsCollection) {
// Collect all our metrics from rmSampleBuf.
for i, sample := range c.rmSampleBuf {
// N.B. switch on concrete type because it's significantly more efficient
// than checking for the Counter and Gauge interface implementations. In
// this case, we control all the types here.
switch m := c.rmMetrics[i].(type) {
case *counter:
// Guard against decreases. This should never happen, but a failure
// to do so will result in a panic, which is a harsh consequence for
// a metrics collection bug.
v0, v1 := m.get(), unwrapScalarRMValue(sample.Value)
if v1 > v0 {
m.Add(unwrapScalarRMValue(sample.Value) - m.get())
}
m.Collect(ch)
case *gauge:
m.Set(unwrapScalarRMValue(sample.Value))
m.Collect(ch)
case *batchHistogram:
m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name))
m.Collect(ch)
default:
panic("unexpected metric type")
}
m.Collect(ch)
case *gauge:
m.Set(unwrapScalarRMValue(sample.Value))
m.Collect(ch)
case *batchHistogram:
m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name))
m.Collect(ch)
default:
panic("unexpected metric type")
}
}
// ms is a dummy MemStats that we populate ourselves so that we can
// populate the old metrics from it.
var ms runtime.MemStats
memStatsFromRM(&ms, c.rmSampleMap)
for _, i := range c.msMetrics {
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms))
// populate the old metrics from it if goMemStatsCollection is enabled.
if c.opt.isEnabled(goRuntimeMemStatsCollection) {
var ms runtime.MemStats
memStatsFromRM(&ms, c.rmSampleMap)
for _, i := range c.msMetrics {
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms))
}
}
}
@ -261,35 +372,30 @@ func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) {
// while having Mallocs - Frees still represent a live object count.
// Unfortunately, MemStats doesn't actually export a large allocation count,
// so it's impossible to pull this number out directly.
tinyAllocs := lookupOrZero("/gc/heap/tiny/allocs:objects")
ms.Mallocs = lookupOrZero("/gc/heap/allocs:objects") + tinyAllocs
ms.Frees = lookupOrZero("/gc/heap/frees:objects") + tinyAllocs
tinyAllocs := lookupOrZero(goGCHeapTinyAllocsObjects)
ms.Mallocs = lookupOrZero(goGCHeapAllocsObjects) + tinyAllocs
ms.Frees = lookupOrZero(goGCHeapFreesObjects) + tinyAllocs
ms.TotalAlloc = lookupOrZero("/gc/heap/allocs:bytes")
ms.Sys = lookupOrZero("/memory/classes/total:bytes")
ms.TotalAlloc = lookupOrZero(goGCHeapAllocsBytes)
ms.Sys = lookupOrZero(goMemoryClassesTotalBytes)
ms.Lookups = 0 // Already always zero.
ms.HeapAlloc = lookupOrZero("/memory/classes/heap/objects:bytes")
ms.HeapAlloc = lookupOrZero(goMemoryClassesHeapObjectsBytes)
ms.Alloc = ms.HeapAlloc
ms.HeapInuse = ms.HeapAlloc + lookupOrZero("/memory/classes/heap/unused:bytes")
ms.HeapReleased = lookupOrZero("/memory/classes/heap/released:bytes")
ms.HeapIdle = ms.HeapReleased + lookupOrZero("/memory/classes/heap/free:bytes")
ms.HeapInuse = ms.HeapAlloc + lookupOrZero(goMemoryClassesHeapUnusedBytes)
ms.HeapReleased = lookupOrZero(goMemoryClassesHeapReleasedBytes)
ms.HeapIdle = ms.HeapReleased + lookupOrZero(goMemoryClassesHeapFreeBytes)
ms.HeapSys = ms.HeapInuse + ms.HeapIdle
ms.HeapObjects = lookupOrZero("/gc/heap/objects:objects")
ms.StackInuse = lookupOrZero("/memory/classes/heap/stacks:bytes")
ms.StackSys = ms.StackInuse + lookupOrZero("/memory/classes/os-stacks:bytes")
ms.MSpanInuse = lookupOrZero("/memory/classes/metadata/mspan/inuse:bytes")
ms.MSpanSys = ms.MSpanInuse + lookupOrZero("/memory/classes/metadata/mspan/free:bytes")
ms.MCacheInuse = lookupOrZero("/memory/classes/metadata/mcache/inuse:bytes")
ms.MCacheSys = ms.MCacheInuse + lookupOrZero("/memory/classes/metadata/mcache/free:bytes")
ms.BuckHashSys = lookupOrZero("/memory/classes/profiling/buckets:bytes")
ms.GCSys = lookupOrZero("/memory/classes/metadata/other:bytes")
ms.OtherSys = lookupOrZero("/memory/classes/other:bytes")
ms.NextGC = lookupOrZero("/gc/heap/goal:bytes")
// N.B. LastGC is omitted because runtime.GCStats already has this.
// See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
// for more details.
ms.LastGC = 0
ms.HeapObjects = lookupOrZero(goGCHeapObjects)
ms.StackInuse = lookupOrZero(goMemoryClassesHeapStacksBytes)
ms.StackSys = ms.StackInuse + lookupOrZero(goMemoryClassesOSStacksBytes)
ms.MSpanInuse = lookupOrZero(goMemoryClassesMetadataMSpanInuseBytes)
ms.MSpanSys = ms.MSpanInuse + lookupOrZero(goMemoryClassesMetadataMSPanFreeBytes)
ms.MCacheInuse = lookupOrZero(goMemoryClassesMetadataMCacheInuseBytes)
ms.MCacheSys = ms.MCacheInuse + lookupOrZero(goMemoryClassesMetadataMCacheFreeBytes)
ms.BuckHashSys = lookupOrZero(goMemoryClassesProfilingBucketsBytes)
ms.GCSys = lookupOrZero(goMemoryClassesMetadataOtherBytes)
ms.OtherSys = lookupOrZero(goMemoryClassesOtherBytes)
ms.NextGC = lookupOrZero(goGCHeapGoalBytes)
// N.B. GCCPUFraction is intentionally omitted. This metric is not useful,
// and often misleading due to the fact that it's an average over the lifetime
@ -324,6 +430,11 @@ type batchHistogram struct {
// buckets must always be from the runtime/metrics package, following
// the same conventions.
func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram {
// We need to remove -Inf values. runtime/metrics keeps them around.
// But -Inf bucket should not be allowed for prometheus histograms.
if buckets[0] == math.Inf(-1) {
buckets = buckets[1:]
}
h := &batchHistogram{
desc: desc,
buckets: buckets,
@ -382,8 +493,10 @@ func (h *batchHistogram) Write(out *dto.Metric) error {
for i, count := range h.counts {
totalCount += count
if !h.hasSum {
// N.B. This computed sum is an underestimate.
sum += h.buckets[i] * float64(count)
if count != 0 {
// N.B. This computed sum is an underestimate.
sum += h.buckets[i] * float64(count)
}
}
// Skip the +Inf bucket, but only for the bucket list.

View file

@ -62,7 +62,7 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool)
// other data.
name = strings.ReplaceAll(name, "-", "_")
name = name + "_" + unit
if d.Cumulative {
if d.Cumulative && d.Kind != metrics.KindFloat64Histogram {
name = name + "_total"
}
@ -84,12 +84,12 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool)
func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 {
switch unit {
case "bytes":
// Rebucket as powers of 2.
return rebucketExp(buckets, 2)
// Re-bucket as powers of 2.
return reBucketExp(buckets, 2)
case "seconds":
// Rebucket as powers of 10 and then merge all buckets greater
// Re-bucket as powers of 10 and then merge all buckets greater
// than 1 second into the +Inf bucket.
b := rebucketExp(buckets, 10)
b := reBucketExp(buckets, 10)
for i := range b {
if b[i] <= 1 {
continue
@ -103,11 +103,11 @@ func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 {
return buckets
}
// rebucketExp takes a list of bucket boundaries (lower bound inclusive) and
// reBucketExp takes a list of bucket boundaries (lower bound inclusive) and
// downsamples the buckets to those a multiple of base apart. The end result
// is a roughly exponential (in many cases, perfectly exponential) bucketing
// scheme.
func rebucketExp(buckets []float64, base float64) []float64 {
func reBucketExp(buckets []float64, base float64) []float64 {
bucket := buckets[0]
var newBuckets []float64
// We may see a -Inf here, in which case, add it and skip it

View file

@ -1,10 +1,10 @@
*.coverprofile
*.orig
node_modules/
vendor
.idea
internal/*/built-example
coverage.txt
/.local/
/site/
*.exe

40
vendor/github.com/urfave/cli/v2/Makefile generated vendored Normal file
View file

@ -0,0 +1,40 @@
# NOTE: this Makefile is meant to provide a simplified entry point for humans to
# run all of the critical steps to verify one's changes are harmonious in
# nature. Keeping target bodies to one line each and abstaining from make magic
# are very important so that maintainers and contributors can focus their
# attention on files that are primarily Go.
.PHONY: all
all: generate vet tag-test test check-binary-size tag-check-binary-size gfmrun v2diff
# NOTE: this is a special catch-all rule to run any of the commands
# defined in internal/build/build.go with optional arguments passed
# via GFLAGS (global flags) and FLAGS (command-specific flags), e.g.:
#
# $ make test GFLAGS='--packages cli'
%:
go run internal/build/build.go $(GFLAGS) $* $(FLAGS)
.PHONY: tag-test
tag-test:
go run internal/build/build.go -tags urfave_cli_no_docs test
.PHONY: tag-check-binary-size
tag-check-binary-size:
go run internal/build/build.go -tags urfave_cli_no_docs check-binary-size
.PHONY: gfmrun
gfmrun:
go run internal/build/build.go gfmrun docs/v2/manual.md
.PHONY: docs
docs:
mkdocs build
.PHONY: docs-deps
docs-deps:
pip install -r mkdocs-requirements.txt
.PHONY: serve-docs
serve-docs:
mkdocs serve

View file

@ -63,7 +63,7 @@ You can use the following build tags:
When set, this removes `ToMarkdown` and `ToMan` methods, so your application
won't be able to call those. This reduces the resulting binary size by about
300-400 KB (measured using Go 1.18.1 on Linux/amd64), due to less dependencies.
300-400 KB (measured using Go 1.18.1 on Linux/amd64), due to fewer dependencies.
### GOPATH

View file

@ -94,6 +94,8 @@ type App struct {
// single-character bool arguments into one
// i.e. foobar -o -v -> foobar -ov
UseShortOptionHandling bool
// Enable suggestions for commands and flags
Suggest bool
didSetup bool
}
@ -264,6 +266,11 @@ func (a *App) RunContext(ctx context.Context, arguments []string) (err error) {
return err
}
_, _ = fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error())
if a.Suggest {
if suggestion, err := a.suggestFlagFromError(err, ""); err == nil {
fmt.Fprintf(a.Writer, suggestion)
}
}
_ = ShowAppHelp(cCtx)
return err
}
@ -383,6 +390,11 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) {
return err
}
_, _ = fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error())
if a.Suggest {
if suggestion, err := a.suggestFlagFromError(err, cCtx.Command.Name); err == nil {
fmt.Fprintf(a.Writer, suggestion)
}
}
_ = ShowSubcommandHelp(cCtx)
return err
}

View file

@ -20,4 +20,4 @@
// }
package cli
//go:generate go run flag-gen/main.go flag-gen/assets_vfsdata.go
//go:generate go run internal/genflags/cmd/genflags/main.go

View file

@ -119,6 +119,11 @@ func (c *Command) Run(ctx *Context) (err error) {
}
_, _ = fmt.Fprintln(cCtx.App.Writer, "Incorrect Usage:", err.Error())
_, _ = fmt.Fprintln(cCtx.App.Writer)
if ctx.App.Suggest {
if suggestion, err := ctx.App.suggestFlagFromError(err, c.Name); err == nil {
fmt.Fprintf(cCtx.App.Writer, suggestion)
}
}
_ = ShowCommandHelp(cCtx, c.Name)
return err
}
@ -249,6 +254,7 @@ func (c *Command) startApp(ctx *Context) error {
app.ErrWriter = ctx.App.ErrWriter
app.ExitErrHandler = ctx.App.ExitErrHandler
app.UseShortOptionHandling = ctx.App.UseShortOptionHandling
app.Suggest = ctx.App.Suggest
app.categories = newCommandCategories()
for _, command := range c.Subcommands {

50
vendor/github.com/urfave/cli/v2/flag-spec.yaml generated vendored Normal file
View file

@ -0,0 +1,50 @@
# NOTE: this file is used by the tool defined in
# ./internal/genflags/cmd/genflags/main.go which uses the
# `genflags.Spec` type that maps to this file structure.
flag_types:
bool: {}
float64: {}
int64: {}
int: {}
time.Duration: {}
uint64: {}
uint: {}
string:
struct_fields:
- { name: TakesFile, type: bool }
Generic:
struct_fields:
- { name: TakesFile, type: bool }
Path:
struct_fields:
- { name: TakesFile, type: bool }
Float64Slice:
value_pointer: true
skip_interfaces:
- fmt.Stringer
Int64Slice:
value_pointer: true
skip_interfaces:
- fmt.Stringer
IntSlice:
value_pointer: true
skip_interfaces:
- fmt.Stringer
StringSlice:
value_pointer: true
skip_interfaces:
- fmt.Stringer
struct_fields:
- { name: TakesFile, type: bool }
Timestamp:
value_pointer: true
struct_fields:
- { name: Layout, type: string }
# TODO: enable UintSlice
# UintSlice: {}
# TODO: enable Uint64Slice once #1334 lands
# Uint64Slice: {}

View file

@ -258,7 +258,7 @@ func withEnvHint(envVars []string, str string) string {
return str + envText
}
func flagNames(name string, aliases []string) []string {
func FlagNames(name string, aliases []string) []string {
var ret []string
for _, part := range append([]string{name}, aliases...) {

View file

@ -6,42 +6,6 @@ import (
"strconv"
)
// BoolFlag is a flag with type bool
type BoolFlag struct {
Name string
Aliases []string
Usage string
EnvVars []string
FilePath string
Required bool
Hidden bool
Value bool
DefaultText string
Destination *bool
HasBeenSet bool
}
// IsSet returns whether or not the flag has been set through env or file
func (f *BoolFlag) IsSet() bool {
return f.HasBeenSet
}
// String returns a readable representation of this value
// (for usage defaults)
func (f *BoolFlag) String() string {
return FlagStringer(f)
}
// Names returns the names of the flag
func (f *BoolFlag) Names() []string {
return flagNames(f.Name, f.Aliases)
}
// IsRequired returns whether or not the flag is required
func (f *BoolFlag) IsRequired() bool {
return f.Required
}
// TakesValue returns true of the flag takes a value, otherwise false
func (f *BoolFlag) TakesValue() bool {
return false
@ -58,11 +22,6 @@ func (f *BoolFlag) GetValue() string {
return ""
}
// IsVisible returns true if the flag is not hidden, otherwise false
func (f *BoolFlag) IsVisible() bool {
return !f.Hidden
}
// GetDefaultText returns the default text for this flag
func (f *BoolFlag) GetDefaultText() string {
if f.DefaultText != "" {

View file

@ -6,42 +6,6 @@ import (
"time"
)
// DurationFlag is a flag with type time.Duration (see https://golang.org/pkg/time/#ParseDuration)
type DurationFlag struct {
Name string
Aliases []string
Usage string
EnvVars []string
FilePath string
Required bool
Hidden bool
Value time.Duration
DefaultText string
Destination *time.Duration
HasBeenSet bool
}
// IsSet returns whether or not the flag has been set through env or file
func (f *DurationFlag) IsSet() bool {
return f.HasBeenSet
}
// String returns a readable representation of this value
// (for usage defaults)
func (f *DurationFlag) String() string {
return FlagStringer(f)
}
// Names returns the names of the flag
func (f *DurationFlag) Names() []string {
return flagNames(f.Name, f.Aliases)
}
// IsRequired returns whether or not the flag is required
func (f *DurationFlag) IsRequired() bool {
return f.Required
}
// TakesValue returns true of the flag takes a value, otherwise false
func (f *DurationFlag) TakesValue() bool {
return true
@ -58,11 +22,6 @@ func (f *DurationFlag) GetValue() string {
return f.Value.String()
}
// IsVisible returns true if the flag is not hidden, otherwise false
func (f *DurationFlag) IsVisible() bool {
return !f.Hidden
}
// GetDefaultText returns the default text for this flag
func (f *DurationFlag) GetDefaultText() string {
if f.DefaultText != "" {

View file

@ -6,42 +6,6 @@ import (
"strconv"
)
// Float64Flag is a flag with type float64
type Float64Flag struct {
Name string
Aliases []string
Usage string
EnvVars []string
FilePath string
Required bool
Hidden bool
Value float64
DefaultText string
Destination *float64
HasBeenSet bool
}
// IsSet returns whether or not the flag has been set through env or file
func (f *Float64Flag) IsSet() bool {
return f.HasBeenSet
}
// String returns a readable representation of this value
// (for usage defaults)
func (f *Float64Flag) String() string {
return FlagStringer(f)
}
// Names returns the names of the flag
func (f *Float64Flag) Names() []string {
return flagNames(f.Name, f.Aliases)
}
// IsRequired returns whether or not the flag is required
func (f *Float64Flag) IsRequired() bool {
return f.Required
}
// TakesValue returns true of the flag takes a value, otherwise false
func (f *Float64Flag) TakesValue() bool {
return true
@ -71,11 +35,6 @@ func (f *Float64Flag) GetEnvVars() []string {
return f.EnvVars
}
// IsVisible returns true if the flag is not hidden, otherwise false
func (f *Float64Flag) IsVisible() bool {
return !f.Hidden
}
// Apply populates the flag given the flag set and environment
func (f *Float64Flag) Apply(set *flag.FlagSet) error {
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {

View file

@ -75,41 +75,12 @@ func (f *Float64Slice) Get() interface{} {
return *f
}
// Float64SliceFlag is a flag with type *Float64Slice
type Float64SliceFlag struct {
Name string
Aliases []string
Usage string
EnvVars []string
FilePath string
Required bool
Hidden bool
Value *Float64Slice
DefaultText string
HasBeenSet bool
}
// IsSet returns whether or not the flag has been set through env or file
func (f *Float64SliceFlag) IsSet() bool {
return f.HasBeenSet
}
// String returns a readable representation of this value
// (for usage defaults)
func (f *Float64SliceFlag) String() string {
return withEnvHint(f.GetEnvVars(), stringifyFloat64SliceFlag(f))
}
// Names returns the names of the flag
func (f *Float64SliceFlag) Names() []string {
return flagNames(f.Name, f.Aliases)
}
// IsRequired returns whether or not the flag is required
func (f *Float64SliceFlag) IsRequired() bool {
return f.Required
}
// TakesValue returns true if the flag takes a value, otherwise false
func (f *Float64SliceFlag) TakesValue() bool {
return true
@ -129,11 +100,6 @@ func (f *Float64SliceFlag) GetValue() string {
return ""
}
// IsVisible returns true if the flag is not hidden, otherwise false
func (f *Float64SliceFlag) IsVisible() bool {
return !f.Hidden
}
// GetDefaultText returns the default text for this flag
func (f *Float64SliceFlag) GetDefaultText() string {
if f.DefaultText != "" {

View file

@ -11,42 +11,6 @@ type Generic interface {
String() string
}
// GenericFlag is a flag with type Generic
type GenericFlag struct {
Name string
Aliases []string
Usage string
EnvVars []string
FilePath string
Required bool
Hidden bool
TakesFile bool
Value Generic
DefaultText string
HasBeenSet bool
}
// IsSet returns whether or not the flag has been set through env or file
func (f *GenericFlag) IsSet() bool {
return f.HasBeenSet
}
// String returns a readable representation of this value
// (for usage defaults)
func (f *GenericFlag) String() string {
return FlagStringer(f)
}
// Names returns the names of the flag
func (f *GenericFlag) Names() []string {
return flagNames(f.Name, f.Aliases)
}
// IsRequired returns whether or not the flag is required
func (f *GenericFlag) IsRequired() bool {
return f.Required
}
// TakesValue returns true of the flag takes a value, otherwise false
func (f *GenericFlag) TakesValue() bool {
return true
@ -66,11 +30,6 @@ func (f *GenericFlag) GetValue() string {
return ""
}
// IsVisible returns true if the flag is not hidden, otherwise false
func (f *GenericFlag) IsVisible() bool {
return !f.Hidden
}
// GetDefaultText returns the default text for this flag
func (f *GenericFlag) GetDefaultText() string {
if f.DefaultText != "" {

View file

@ -6,42 +6,6 @@ import (
"strconv"
)
// IntFlag is a flag with type int
type IntFlag struct {
Name string
Aliases []string
Usage string
EnvVars []string
FilePath string
Required bool
Hidden bool
Value int
DefaultText string
Destination *int
HasBeenSet bool
}
// IsSet returns whether or not the flag has been set through env or file
func (f *IntFlag) IsSet() bool {
return f.HasBeenSet
}
// String returns a readable representation of this value
// (for usage defaults)
func (f *IntFlag) String() string {
return FlagStringer(f)
}
// Names returns the names of the flag
func (f *IntFlag) Names() []string {
return flagNames(f.Name, f.Aliases)
}
// IsRequired returns whether or not the flag is required
func (f *IntFlag) IsRequired() bool {
return f.Required
}
// TakesValue returns true of the flag takes a value, otherwise false
func (f *IntFlag) TakesValue() bool {
return true
@ -58,11 +22,6 @@ func (f *IntFlag) GetValue() string {
return fmt.Sprintf("%d", f.Value)
}
// IsVisible returns true if the flag is not hidden, otherwise false
func (f *IntFlag) IsVisible() bool {
return !f.Hidden
}
// GetDefaultText returns the default text for this flag
func (f *IntFlag) GetDefaultText() string {
if f.DefaultText != "" {

Some files were not shown because too many files have changed in this diff Show more