mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
build(deps): bump cloud.google.com/go/storage from 1.16.0 to 1.16.1 (#1589)
Bumps [cloud.google.com/go/storage](https://github.com/googleapis/google-cloud-go) from 1.16.0 to 1.16.1. - [Release notes](https://github.com/googleapis/google-cloud-go/releases) - [Changelog](https://github.com/googleapis/google-cloud-go/blob/master/CHANGES.md) - [Commits](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.16.0...storage/v1.16.1) --- updated-dependencies: - dependency-name: cloud.google.com/go/storage dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
parent
111ea89a7d
commit
1a6b9157e2
94 changed files with 24389 additions and 190 deletions
2
go.mod
2
go.mod
|
@ -1,7 +1,7 @@
|
|||
module github.com/VictoriaMetrics/VictoriaMetrics
|
||||
|
||||
require (
|
||||
cloud.google.com/go/storage v1.16.0
|
||||
cloud.google.com/go/storage v1.16.1
|
||||
github.com/VictoriaMetrics/fastcache v1.6.0
|
||||
|
||||
// Do not use the original github.com/valyala/fasthttp because of issues
|
||||
|
|
9
go.sum
9
go.sum
|
@ -45,8 +45,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
|
|||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
cloud.google.com/go/storage v1.16.0 h1:1UwAux2OZP4310YXg5ohqBEpV16Y93uZG4+qOX7K2Kg=
|
||||
cloud.google.com/go/storage v1.16.0/go.mod h1:ieKBmUyzcftN5tbxwnXClMKH00CfcQ+xL6NN0r5QfmE=
|
||||
cloud.google.com/go/storage v1.16.1 h1:sMEIc4wxvoY3NXG7Rn9iP7jb/2buJgWR1vNXCR/UPfs=
|
||||
cloud.google.com/go/storage v1.16.1/go.mod h1:LaNorbty3ehnU3rEjXSNV/NRgQA0O8Y+uh6bPe5UOk4=
|
||||
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-sdk-for-go v41.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
|
@ -1125,7 +1125,6 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ
|
|||
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210615190721-d04028783cf1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw=
|
||||
|
@ -1370,7 +1369,6 @@ google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBz
|
|||
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
|
||||
google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
|
||||
google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
|
||||
google.golang.org/api v0.49.0/go.mod h1:BECiH72wsfwUvOVn3+btPD5WHi0LzavZReBndi42L18=
|
||||
google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
|
||||
google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
|
||||
google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
|
||||
|
@ -1434,8 +1432,6 @@ google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQ
|
|||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20210617175327-b9e0b3197ced/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
|
||||
google.golang.org/genproto v0.0.0-20210624174822-c5cf32407d0a/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
|
||||
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
|
||||
google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
|
||||
google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
|
||||
|
@ -1443,6 +1439,7 @@ google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKr
|
|||
google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
|
||||
google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
|
||||
google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210825212027-de86158e7fda/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 h1:NHN4wOCScVzKhPenJ2dt+BTs3X/XkBVI/Rh4iDt55T8=
|
||||
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
|
|
12
vendor/cloud.google.com/go/storage/CHANGES.md
generated
vendored
12
vendor/cloud.google.com/go/storage/CHANGES.md
generated
vendored
|
@ -1,5 +1,17 @@
|
|||
# Changes
|
||||
|
||||
### [1.16.1](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.16.0...storage/v1.16.1) (2021-08-30)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage/internal:** Update encryption_key fields to "bytes" type. fix: Improve date/times and field name clarity in lifecycle conditions. ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758))
|
||||
* **storage:** accept emulator env var without scheme ([#4616](https://www.github.com/googleapis/google-cloud-go/issues/4616)) ([5f8cbb9](https://www.github.com/googleapis/google-cloud-go/commit/5f8cbb98070109e2a34409ac775ed63b94d37efd))
|
||||
* **storage:** preserve supplied endpoint's scheme ([#4609](https://www.github.com/googleapis/google-cloud-go/issues/4609)) ([ee2756f](https://www.github.com/googleapis/google-cloud-go/commit/ee2756fb0a335d591464a770c9fa4f8fe0ba2e01))
|
||||
* **storage:** remove unnecessary variable ([#4608](https://www.github.com/googleapis/google-cloud-go/issues/4608)) ([27fc784](https://www.github.com/googleapis/google-cloud-go/commit/27fc78456fb251652bdf5cdb493734a7e1e643e1))
|
||||
* **storage:** retry LockRetentionPolicy ([#4439](https://www.github.com/googleapis/google-cloud-go/issues/4439)) ([09879ea](https://www.github.com/googleapis/google-cloud-go/commit/09879ea80cb67f9bfd8fc9384b0fda335567cba9)), refs [#4437](https://www.github.com/googleapis/google-cloud-go/issues/4437)
|
||||
* **storage:** revise Reader to send XML preconditions ([#4479](https://www.github.com/googleapis/google-cloud-go/issues/4479)) ([e36b29a](https://www.github.com/googleapis/google-cloud-go/commit/e36b29a3d43bce5c1c044f7daf6e1db00b0a49e0)), refs [#4470](https://www.github.com/googleapis/google-cloud-go/issues/4470)
|
||||
|
||||
## [1.16.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.15.0...storage/v1.16.0) (2021-06-28)
|
||||
|
||||
|
||||
|
|
49
vendor/cloud.google.com/go/storage/acl.go
generated
vendored
49
vendor/cloud.google.com/go/storage/acl.go
generated
vendored
|
@ -22,6 +22,7 @@ import (
|
|||
"cloud.google.com/go/internal/trace"
|
||||
"google.golang.org/api/googleapi"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
storagepb "google.golang.org/genproto/googleapis/storage/v2"
|
||||
)
|
||||
|
||||
// ACLRole is the level of access to grant.
|
||||
|
@ -244,6 +245,14 @@ func toObjectACLRules(items []*raw.ObjectAccessControl) []ACLRule {
|
|||
return rs
|
||||
}
|
||||
|
||||
func fromProtoToObjectACLRules(items []*storagepb.ObjectAccessControl) []ACLRule {
|
||||
var rs []ACLRule
|
||||
for _, item := range items {
|
||||
rs = append(rs, fromProtoToObjectACLRule(item))
|
||||
}
|
||||
return rs
|
||||
}
|
||||
|
||||
func toBucketACLRules(items []*raw.BucketAccessControl) []ACLRule {
|
||||
var rs []ACLRule
|
||||
for _, item := range items {
|
||||
|
@ -263,6 +272,17 @@ func toObjectACLRule(a *raw.ObjectAccessControl) ACLRule {
|
|||
}
|
||||
}
|
||||
|
||||
func fromProtoToObjectACLRule(a *storagepb.ObjectAccessControl) ACLRule {
|
||||
return ACLRule{
|
||||
Entity: ACLEntity(a.GetEntity()),
|
||||
EntityID: a.GetEntityId(),
|
||||
Role: ACLRole(a.GetRole()),
|
||||
Domain: a.GetDomain(),
|
||||
Email: a.GetEmail(),
|
||||
ProjectTeam: fromProtoToObjectProjectTeam(a.GetProjectTeam()),
|
||||
}
|
||||
}
|
||||
|
||||
func toBucketACLRule(a *raw.BucketAccessControl) ACLRule {
|
||||
return ACLRule{
|
||||
Entity: ACLEntity(a.Entity),
|
||||
|
@ -285,6 +305,17 @@ func toRawObjectACL(rules []ACLRule) []*raw.ObjectAccessControl {
|
|||
return r
|
||||
}
|
||||
|
||||
func toProtoObjectACL(rules []ACLRule) []*storagepb.ObjectAccessControl {
|
||||
if len(rules) == 0 {
|
||||
return nil
|
||||
}
|
||||
r := make([]*storagepb.ObjectAccessControl, 0, len(rules))
|
||||
for _, rule := range rules {
|
||||
r = append(r, rule.toProtoObjectAccessControl("")) // bucket name unnecessary
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func toRawBucketACL(rules []ACLRule) []*raw.BucketAccessControl {
|
||||
if len(rules) == 0 {
|
||||
return nil
|
||||
|
@ -314,6 +345,14 @@ func (r ACLRule) toRawObjectAccessControl(bucket string) *raw.ObjectAccessContro
|
|||
}
|
||||
}
|
||||
|
||||
func (r ACLRule) toProtoObjectAccessControl(bucket string) *storagepb.ObjectAccessControl {
|
||||
return &storagepb.ObjectAccessControl{
|
||||
Entity: string(r.Entity),
|
||||
Role: string(r.Role),
|
||||
// The other fields are not settable.
|
||||
}
|
||||
}
|
||||
|
||||
func toBucketProjectTeam(p *raw.BucketAccessControlProjectTeam) *ProjectTeam {
|
||||
if p == nil {
|
||||
return nil
|
||||
|
@ -333,3 +372,13 @@ func toObjectProjectTeam(p *raw.ObjectAccessControlProjectTeam) *ProjectTeam {
|
|||
Team: p.Team,
|
||||
}
|
||||
}
|
||||
|
||||
func fromProtoToObjectProjectTeam(p *storagepb.ProjectTeam) *ProjectTeam {
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
return &ProjectTeam{
|
||||
ProjectNumber: p.GetProjectNumber(),
|
||||
Team: p.GetTeam(),
|
||||
}
|
||||
}
|
||||
|
|
5
vendor/cloud.google.com/go/storage/bucket.go
generated
vendored
5
vendor/cloud.google.com/go/storage/bucket.go
generated
vendored
|
@ -130,7 +130,8 @@ func (b *BucketHandle) DefaultObjectACL() *ACLHandle {
|
|||
}
|
||||
|
||||
// Object returns an ObjectHandle, which provides operations on the named object.
|
||||
// This call does not perform any network operations.
|
||||
// This call does not perform any network operations such as fetching the object or verifying its existence.
|
||||
// Use methods on ObjectHandle to perform network operations.
|
||||
//
|
||||
// name must consist entirely of valid UTF-8-encoded runes. The full specification
|
||||
// for valid object names can be found at:
|
||||
|
@ -949,8 +950,10 @@ func (b *BucketHandle) LockRetentionPolicy(ctx context.Context) error {
|
|||
metageneration = b.conds.MetagenerationMatch
|
||||
}
|
||||
req := b.c.raw.Buckets.LockRetentionPolicy(b.name, metageneration)
|
||||
return runWithRetry(ctx, func() error {
|
||||
_, err := req.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
// applyBucketConds modifies the provided call using the conditions in conds.
|
||||
|
|
25
vendor/cloud.google.com/go/storage/doc.go
generated
vendored
25
vendor/cloud.google.com/go/storage/doc.go
generated
vendored
|
@ -48,6 +48,31 @@ an unauthenticated client with
|
|||
|
||||
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
|
||||
|
||||
To use an emulator with this library, you can set the STORAGE_EMULATOR_HOST
|
||||
environment variable to the address at which your emulator is running. This will
|
||||
send requests to that address instead of to Cloud Storage. You can then create
|
||||
and use a client as usual:
|
||||
|
||||
// Set STORAGE_EMULATOR_HOST environment variable.
|
||||
err := os.Setenv("STORAGE_EMULATOR_HOST", "localhost:9000")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
// Create client as usual.
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
// This request is now directed to http://localhost:9000/storage/v1/b
|
||||
// instead of https://storage.googleapis.com/storage/v1/b
|
||||
if err := client.Bucket("my-bucket").Create(ctx, projectID, nil); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Please note that there is no official emulator for Cloud Storage.
|
||||
|
||||
Buckets
|
||||
|
||||
A Google Cloud Storage bucket is a collection of objects. To work with a
|
||||
|
|
11
vendor/cloud.google.com/go/storage/go.mod
generated
vendored
11
vendor/cloud.google.com/go/storage/go.mod
generated
vendored
|
@ -3,13 +3,14 @@ module cloud.google.com/go/storage
|
|||
go 1.11
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.84.0
|
||||
cloud.google.com/go v0.93.3
|
||||
github.com/golang/protobuf v1.5.2
|
||||
github.com/google/go-cmp v0.5.6
|
||||
github.com/googleapis/gax-go/v2 v2.0.5
|
||||
golang.org/x/oauth2 v0.0.0-20210615190721-d04028783cf1
|
||||
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1
|
||||
google.golang.org/api v0.49.0
|
||||
google.golang.org/genproto v0.0.0-20210624174822-c5cf32407d0a
|
||||
google.golang.org/grpc v1.38.0
|
||||
google.golang.org/api v0.54.0
|
||||
google.golang.org/genproto v0.0.0-20210825212027-de86158e7fda
|
||||
google.golang.org/grpc v1.40.0
|
||||
google.golang.org/protobuf v1.27.1
|
||||
)
|
||||
|
|
61
vendor/cloud.google.com/go/storage/go.sum
generated
vendored
61
vendor/cloud.google.com/go/storage/go.sum
generated
vendored
|
@ -19,8 +19,11 @@ cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECH
|
|||
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
|
||||
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
|
||||
cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
|
||||
cloud.google.com/go v0.84.0 h1:hVhK90DwCdOAYGME/FJd9vNIZye9HBR6Yy3fu4js3N8=
|
||||
cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
|
||||
cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
|
||||
cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
|
||||
cloud.google.com/go v0.93.3 h1:wPBktZFzYBcCZVARvwVKqH1uEj+aLXofJEtrb4oOsio=
|
||||
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
|
@ -41,7 +44,10 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9
|
|||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
|
@ -49,6 +55,7 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
|
|||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
|
@ -56,7 +63,9 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
|
|||
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
|
@ -73,6 +82,7 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
|
|||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
|
||||
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
|
@ -126,17 +136,19 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe
|
|||
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
|
@ -144,7 +156,9 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
|
@ -162,6 +176,7 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
|
@ -190,7 +205,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu
|
|||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
|
@ -202,7 +216,6 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -252,8 +265,9 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ
|
|||
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210615190721-d04028783cf1 h1:x622Z2o4hgCr/4CiKWc51jHVKaWdtVpBNmEI8wI9Qns=
|
||||
golang.org/x/oauth2 v0.0.0-20210615190721-d04028783cf1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a h1:4Kd8OPUx1xgUwrHDaviWZO8MsgoZTZYC3g+8m16RBww=
|
||||
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -305,8 +319,10 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069 h1:siQdpVirKtzPhKl3lZWozZraCFObP8S1v6PRp0bLrtU=
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -368,8 +384,9 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f
|
|||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.3 h1:L69ShwSZEyCsLKoAxDKeMvLDZkumEe8gXUZAjab0tX8=
|
||||
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -398,8 +415,10 @@ google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBz
|
|||
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
|
||||
google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
|
||||
google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
|
||||
google.golang.org/api v0.49.0 h1:gjIBDxlTG7vnzMmEnYwTnvLTF8Rjzo+ETCgEX1YZ/fY=
|
||||
google.golang.org/api v0.49.0/go.mod h1:BECiH72wsfwUvOVn3+btPD5WHi0LzavZReBndi42L18=
|
||||
google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
|
||||
google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
|
||||
google.golang.org/api v0.54.0 h1:ECJUVngj71QI6XEm7b1sAf8BljU5inEhMbKPR8Lxhhk=
|
||||
google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -431,6 +450,7 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG
|
|||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
|
@ -451,9 +471,14 @@ google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQ
|
|||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20210617175327-b9e0b3197ced/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
|
||||
google.golang.org/genproto v0.0.0-20210624174822-c5cf32407d0a h1:b5Bhxmy6Tppar7Yl4J6c6xF33YSBhkm2FtV9/ZQuBkQ=
|
||||
google.golang.org/genproto v0.0.0-20210624174822-c5cf32407d0a/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
|
||||
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
|
||||
google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
|
||||
google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
|
||||
google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
|
||||
google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
|
||||
google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
|
||||
google.golang.org/genproto v0.0.0-20210825212027-de86158e7fda h1:iT5uhT54PtbqUsWddv/nnEWdE5e/MTr+Nv3vjxlBP1A=
|
||||
google.golang.org/genproto v0.0.0-20210825212027-de86158e7fda/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
|
@ -467,6 +492,7 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
|
|||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
|
||||
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
|
@ -474,8 +500,11 @@ google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
|
|||
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0=
|
||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||
google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
|
@ -488,12 +517,14 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
|
|||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
|
56
vendor/cloud.google.com/go/storage/go110.go
generated
vendored
56
vendor/cloud.google.com/go/storage/go110.go
generated
vendored
|
@ -1,56 +0,0 @@
|
|||
// Copyright 2017 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build go1.10
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
func shouldRetry(err error) bool {
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
return true
|
||||
}
|
||||
switch e := err.(type) {
|
||||
case *googleapi.Error:
|
||||
// Retry on 429 and 5xx, according to
|
||||
// https://cloud.google.com/storage/docs/exponential-backoff.
|
||||
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
|
||||
case *url.Error:
|
||||
// Retry socket-level errors ECONNREFUSED and ENETUNREACH (from syscall).
|
||||
// Unfortunately the error type is unexported, so we resort to string
|
||||
// matching.
|
||||
retriable := []string{"connection refused", "connection reset"}
|
||||
for _, s := range retriable {
|
||||
if strings.Contains(e.Error(), s) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
case interface{ Temporary() bool }:
|
||||
if e.Temporary() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
// Unwrap is only supported in go1.13.x+
|
||||
if e, ok := err.(interface{ Unwrap() error }); ok {
|
||||
return shouldRetry(e.Unwrap())
|
||||
}
|
||||
return false
|
||||
}
|
1
vendor/cloud.google.com/go/storage/go_mod_tidy_hack.go
generated
vendored
1
vendor/cloud.google.com/go/storage/go_mod_tidy_hack.go
generated
vendored
|
@ -14,6 +14,7 @@
|
|||
|
||||
// This file, and the cloud.google.com/go import, won't actually become part of
|
||||
// the resultant binary.
|
||||
//go:build modhack
|
||||
// +build modhack
|
||||
|
||||
package storage
|
||||
|
|
140
vendor/cloud.google.com/go/storage/internal/apiv2/doc.go
generated
vendored
Normal file
140
vendor/cloud.google.com/go/storage/internal/apiv2/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,140 @@
|
|||
// Copyright 2021 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
|
||||
|
||||
// Package storage is an auto-generated package for the
|
||||
// Cloud Storage API.
|
||||
//
|
||||
// Lets you store and retrieve potentially-large, immutable data objects.
|
||||
//
|
||||
// NOTE: This package is in alpha. It is not stable, and is likely to change.
|
||||
//
|
||||
// Example usage
|
||||
//
|
||||
// To get started with this package, create a client.
|
||||
// ctx := context.Background()
|
||||
// c, err := storage.NewClient(ctx)
|
||||
// if err != nil {
|
||||
// // TODO: Handle error.
|
||||
// }
|
||||
// defer c.Close()
|
||||
//
|
||||
// The client will use your default application credentials. Clients should be reused instead of created as needed.
|
||||
// The methods of Client are safe for concurrent use by multiple goroutines.
|
||||
// The returned client must be Closed when it is done being used.
|
||||
//
|
||||
// Using the Client
|
||||
//
|
||||
// The following is an example of making an API call with the newly created client.
|
||||
//
|
||||
// Use of Context
|
||||
//
|
||||
// The ctx passed to NewClient is used for authentication requests and
|
||||
// for creating the underlying connection, but is not used for subsequent calls.
|
||||
// Individual methods on the client use the ctx given to them.
|
||||
//
|
||||
// To close the open connection, use the Close() method.
|
||||
//
|
||||
// For information about setting deadlines, reusing contexts, and more
|
||||
// please visit https://pkg.go.dev/cloud.google.com/go.
|
||||
package storage // import "cloud.google.com/go/storage/internal/apiv2"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// For more information on implementing a client constructor hook, see
|
||||
// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
|
||||
type clientHookParams struct{}
|
||||
type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
|
||||
|
||||
const versionClient = "20210821"
|
||||
|
||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||
out, _ := metadata.FromOutgoingContext(ctx)
|
||||
out = out.Copy()
|
||||
for _, md := range mds {
|
||||
for k, v := range md {
|
||||
out[k] = append(out[k], v...)
|
||||
}
|
||||
}
|
||||
return metadata.NewOutgoingContext(ctx, out)
|
||||
}
|
||||
|
||||
func checkDisableDeadlines() (bool, error) {
|
||||
raw, ok := os.LookupEnv("GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE")
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
b, err := strconv.ParseBool(raw)
|
||||
return b, err
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||
func DefaultAuthScopes() []string {
|
||||
return []string{
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/cloud-platform.read-only",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control",
|
||||
"https://www.googleapis.com/auth/devstorage.read_only",
|
||||
"https://www.googleapis.com/auth/devstorage.read_write",
|
||||
}
|
||||
}
|
||||
|
||||
// versionGo returns the Go runtime version. The returned string
|
||||
// has no whitespace, suitable for reporting in header.
|
||||
func versionGo() string {
|
||||
const develPrefix = "devel +"
|
||||
|
||||
s := runtime.Version()
|
||||
if strings.HasPrefix(s, develPrefix) {
|
||||
s = s[len(develPrefix):]
|
||||
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
|
||||
s = s[:p]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
notSemverRune := func(r rune) bool {
|
||||
return !strings.ContainsRune("0123456789.", r)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(s, "go1") {
|
||||
s = s[2:]
|
||||
var prerelease string
|
||||
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
|
||||
s, prerelease = s[:p], s[p:]
|
||||
}
|
||||
if strings.HasSuffix(s, ".") {
|
||||
s += "0"
|
||||
} else if strings.Count(s, ".") < 2 {
|
||||
s += ".0"
|
||||
}
|
||||
if prerelease != "" {
|
||||
s += "-" + prerelease
|
||||
}
|
||||
return s
|
||||
}
|
||||
return "UNKNOWN"
|
||||
}
|
38
vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json
generated
vendored
Normal file
38
vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
{
|
||||
"schema": "1.0",
|
||||
"comment": "This file maps proto services/RPCs to the corresponding library clients/methods.",
|
||||
"language": "go",
|
||||
"protoPackage": "google.storage.v2",
|
||||
"libraryPackage": "cloud.google.com/go/storage/internal/apiv2",
|
||||
"services": {
|
||||
"Storage": {
|
||||
"clients": {
|
||||
"grpc": {
|
||||
"libraryClient": "Client",
|
||||
"rpcs": {
|
||||
"QueryWriteStatus": {
|
||||
"methods": [
|
||||
"QueryWriteStatus"
|
||||
]
|
||||
},
|
||||
"ReadObject": {
|
||||
"methods": [
|
||||
"ReadObject"
|
||||
]
|
||||
},
|
||||
"StartResumableWrite": {
|
||||
"methods": [
|
||||
"StartResumableWrite"
|
||||
]
|
||||
},
|
||||
"WriteObject": {
|
||||
"methods": [
|
||||
"WriteObject"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
359
vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
generated
vendored
Normal file
359
vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
generated
vendored
Normal file
|
@ -0,0 +1,359 @@
|
|||
// Copyright 2021 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
gax "github.com/googleapis/gax-go/v2"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/option/internaloption"
|
||||
gtransport "google.golang.org/api/transport/grpc"
|
||||
storagepb "google.golang.org/genproto/googleapis/storage/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
var newClientHook clientHook
|
||||
|
||||
// CallOptions contains the retry settings for each method of Client.
|
||||
type CallOptions struct {
|
||||
ReadObject []gax.CallOption
|
||||
WriteObject []gax.CallOption
|
||||
StartResumableWrite []gax.CallOption
|
||||
QueryWriteStatus []gax.CallOption
|
||||
}
|
||||
|
||||
func defaultGRPCClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
internaloption.WithDefaultEndpoint("storage.googleapis.com:443"),
|
||||
internaloption.WithDefaultMTLSEndpoint("storage.mtls.googleapis.com:443"),
|
||||
internaloption.WithDefaultAudience("https://storage.googleapis.com/"),
|
||||
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
|
||||
internaloption.EnableJwtWithScope(),
|
||||
option.WithGRPCDialOption(grpc.WithDisableServiceConfig()),
|
||||
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
|
||||
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultCallOptions() *CallOptions {
|
||||
return &CallOptions{
|
||||
ReadObject: []gax.CallOption{
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 1000 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 2.00,
|
||||
})
|
||||
}),
|
||||
},
|
||||
WriteObject: []gax.CallOption{
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 1000 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 2.00,
|
||||
})
|
||||
}),
|
||||
},
|
||||
StartResumableWrite: []gax.CallOption{
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 1000 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 2.00,
|
||||
})
|
||||
}),
|
||||
},
|
||||
QueryWriteStatus: []gax.CallOption{
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 1000 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 2.00,
|
||||
})
|
||||
}),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// internalClient is an interface that defines the methods availaible from Cloud Storage API.
|
||||
type internalClient interface {
|
||||
Close() error
|
||||
setGoogleClientInfo(...string)
|
||||
Connection() *grpc.ClientConn
|
||||
ReadObject(context.Context, *storagepb.ReadObjectRequest, ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error)
|
||||
WriteObject(context.Context, ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error)
|
||||
StartResumableWrite(context.Context, *storagepb.StartResumableWriteRequest, ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error)
|
||||
QueryWriteStatus(context.Context, *storagepb.QueryWriteStatusRequest, ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error)
|
||||
}
|
||||
|
||||
// Client is a client for interacting with Cloud Storage API.
|
||||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
|
||||
//
|
||||
// Manages Google Cloud Storage resources.
|
||||
type Client struct {
|
||||
// The internal transport-dependent client.
|
||||
internalClient internalClient
|
||||
|
||||
// The call options for this service.
|
||||
CallOptions *CallOptions
|
||||
}
|
||||
|
||||
// Wrapper methods routed to the internal client.
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *Client) Close() error {
|
||||
return c.internalClient.Close()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *Client) setGoogleClientInfo(keyval ...string) {
|
||||
c.internalClient.setGoogleClientInfo(keyval...)
|
||||
}
|
||||
|
||||
// Connection returns a connection to the API service.
|
||||
//
|
||||
// Deprecated.
|
||||
func (c *Client) Connection() *grpc.ClientConn {
|
||||
return c.internalClient.Connection()
|
||||
}
|
||||
|
||||
// ReadObject reads an object’s data.
|
||||
func (c *Client) ReadObject(ctx context.Context, req *storagepb.ReadObjectRequest, opts ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) {
|
||||
return c.internalClient.ReadObject(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// WriteObject stores a new object and metadata.
|
||||
//
|
||||
// An object can be written either in a single message stream or in a
|
||||
// resumable sequence of message streams. To write using a single stream,
|
||||
// the client should include in the first message of the stream an
|
||||
// WriteObjectSpec describing the destination bucket, object, and any
|
||||
// preconditions. Additionally, the final message must set ‘finish_write’ to
|
||||
// true, or else it is an error.
|
||||
//
|
||||
// For a resumable write, the client should instead call
|
||||
// StartResumableWrite() and provide that method an WriteObjectSpec.
|
||||
// They should then attach the returned upload_id to the first message of
|
||||
// each following call to Create. If there is an error or the connection is
|
||||
// broken during the resumable Create(), the client should check the status
|
||||
// of the Create() by calling QueryWriteStatus() and continue writing from
|
||||
// the returned committed_size. This may be less than the amount of data the
|
||||
// client previously sent.
|
||||
//
|
||||
// The service will not view the object as complete until the client has
|
||||
// sent a WriteObjectRequest with finish_write set to true. Sending any
|
||||
// requests on a stream after sending a request with finish_write set to
|
||||
// true will cause an error. The client should check the response it
|
||||
// receives to determine how much data the service was able to commit and
|
||||
// whether the service views the object as complete.
|
||||
func (c *Client) WriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) {
|
||||
return c.internalClient.WriteObject(ctx, opts...)
|
||||
}
|
||||
|
||||
// StartResumableWrite starts a resumable write. How long the write operation remains valid, and
|
||||
// what happens when the write operation becomes invalid, are
|
||||
// service-dependent.
|
||||
func (c *Client) StartResumableWrite(ctx context.Context, req *storagepb.StartResumableWriteRequest, opts ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) {
|
||||
return c.internalClient.StartResumableWrite(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// QueryWriteStatus determines the committed_size for an object that is being written, which
|
||||
// can then be used as the write_offset for the next Write() call.
|
||||
//
|
||||
// If the object does not exist (i.e., the object has been deleted, or the
|
||||
// first Write() has not yet reached the service), this method returns the
|
||||
// error NOT_FOUND.
|
||||
//
|
||||
// The client may call QueryWriteStatus() at any time to determine how
|
||||
// much data has been processed for this object. This is useful if the
|
||||
// client is buffering data and needs to know which data can be safely
|
||||
// evicted. For any sequence of QueryWriteStatus() calls for a given
|
||||
// object name, the sequence of returned committed_size values will be
|
||||
// non-decreasing.
|
||||
func (c *Client) QueryWriteStatus(ctx context.Context, req *storagepb.QueryWriteStatusRequest, opts ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) {
|
||||
return c.internalClient.QueryWriteStatus(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// gRPCClient is a client for interacting with Cloud Storage API over gRPC transport.
|
||||
//
|
||||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
|
||||
type gRPCClient struct {
|
||||
// Connection pool of gRPC connections to the service.
|
||||
connPool gtransport.ConnPool
|
||||
|
||||
// flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE
|
||||
disableDeadlines bool
|
||||
|
||||
// Points back to the CallOptions field of the containing Client
|
||||
CallOptions **CallOptions
|
||||
|
||||
// The gRPC API client.
|
||||
client storagepb.StorageClient
|
||||
|
||||
// The x-goog-* metadata to be sent with each request.
|
||||
xGoogMetadata metadata.MD
|
||||
}
|
||||
|
||||
// NewClient creates a new storage client based on gRPC.
|
||||
// The returned client must be Closed when it is done being used to clean up its underlying connections.
|
||||
//
|
||||
// Manages Google Cloud Storage resources.
|
||||
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
|
||||
clientOpts := defaultGRPCClientOptions()
|
||||
if newClientHook != nil {
|
||||
hookOpts, err := newClientHook(ctx, clientHookParams{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientOpts = append(clientOpts, hookOpts...)
|
||||
}
|
||||
|
||||
disableDeadlines, err := checkDisableDeadlines()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := Client{CallOptions: defaultCallOptions()}
|
||||
|
||||
c := &gRPCClient{
|
||||
connPool: connPool,
|
||||
disableDeadlines: disableDeadlines,
|
||||
client: storagepb.NewStorageClient(connPool),
|
||||
CallOptions: &client.CallOptions,
|
||||
}
|
||||
c.setGoogleClientInfo()
|
||||
|
||||
client.internalClient = c
|
||||
|
||||
return &client, nil
|
||||
}
|
||||
|
||||
// Connection returns a connection to the API service.
|
||||
//
|
||||
// Deprecated.
|
||||
func (c *gRPCClient) Connection() *grpc.ClientConn {
|
||||
return c.connPool.Conn()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *gRPCClient) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", versionGo()}, keyval...)
|
||||
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *gRPCClient) Close() error {
|
||||
return c.connPool.Close()
|
||||
}
|
||||
|
||||
func (c *gRPCClient) ReadObject(ctx context.Context, req *storagepb.ReadObjectRequest, opts ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
var resp storagepb.Storage_ReadObjectClient
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.ReadObject(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *gRPCClient) WriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
var resp storagepb.Storage_WriteObjectClient
|
||||
opts = append((*c.CallOptions).WriteObject[0:len((*c.CallOptions).WriteObject):len((*c.CallOptions).WriteObject)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.WriteObject(ctx, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *gRPCClient) StartResumableWrite(ctx context.Context, req *storagepb.StartResumableWriteRequest, opts ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) {
|
||||
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
|
||||
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
|
||||
defer cancel()
|
||||
ctx = cctx
|
||||
}
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append((*c.CallOptions).StartResumableWrite[0:len((*c.CallOptions).StartResumableWrite):len((*c.CallOptions).StartResumableWrite)], opts...)
|
||||
var resp *storagepb.StartResumableWriteResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.StartResumableWrite(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *gRPCClient) QueryWriteStatus(ctx context.Context, req *storagepb.QueryWriteStatusRequest, opts ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) {
|
||||
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
|
||||
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
|
||||
defer cancel()
|
||||
ctx = cctx
|
||||
}
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append((*c.CallOptions).QueryWriteStatus[0:len((*c.CallOptions).QueryWriteStatus):len((*c.CallOptions).QueryWriteStatus)], opts...)
|
||||
var resp *storagepb.QueryWriteStatusResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.QueryWriteStatus(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
44
vendor/cloud.google.com/go/storage/invoke.go
generated
vendored
44
vendor/cloud.google.com/go/storage/invoke.go
generated
vendored
|
@ -16,9 +16,15 @@ package storage
|
|||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"cloud.google.com/go/internal"
|
||||
gax "github.com/googleapis/gax-go/v2"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// runWithRetry calls the function until it returns nil or a non-retryable error, or
|
||||
|
@ -35,3 +41,41 @@ func runWithRetry(ctx context.Context, call func() error) error {
|
|||
return true, err
|
||||
})
|
||||
}
|
||||
|
||||
func shouldRetry(err error) bool {
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
return true
|
||||
}
|
||||
switch e := err.(type) {
|
||||
case *googleapi.Error:
|
||||
// Retry on 429 and 5xx, according to
|
||||
// https://cloud.google.com/storage/docs/exponential-backoff.
|
||||
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
|
||||
case *url.Error:
|
||||
// Retry socket-level errors ECONNREFUSED and ENETUNREACH (from syscall).
|
||||
// Unfortunately the error type is unexported, so we resort to string
|
||||
// matching.
|
||||
retriable := []string{"connection refused", "connection reset"}
|
||||
for _, s := range retriable {
|
||||
if strings.Contains(e.Error(), s) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
case interface{ Temporary() bool }:
|
||||
if e.Temporary() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
// HTTP 429, 502, 503, and 504 all map to gRPC UNAVAILABLE per
|
||||
// https://grpc.github.io/grpc/core/md_doc_http-grpc-status-mapping.html.
|
||||
//
|
||||
// This is only necessary for the experimental gRPC-based media operations.
|
||||
if st, ok := status.FromError(err); ok && st.Code() == codes.Unavailable {
|
||||
return true
|
||||
}
|
||||
// Unwrap is only supported in go1.13.x+
|
||||
if e, ok := err.(interface{ Unwrap() error }); ok {
|
||||
return shouldRetry(e.Unwrap())
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
42
vendor/cloud.google.com/go/storage/not_go110.go
generated
vendored
42
vendor/cloud.google.com/go/storage/not_go110.go
generated
vendored
|
@ -1,42 +0,0 @@
|
|||
// Copyright 2017 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !go1.10
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
func shouldRetry(err error) bool {
|
||||
switch e := err.(type) {
|
||||
case *googleapi.Error:
|
||||
// Retry on 429 and 5xx, according to
|
||||
// https://cloud.google.com/storage/docs/exponential-backoff.
|
||||
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
|
||||
case *url.Error:
|
||||
// Retry on REFUSED_STREAM.
|
||||
// Unfortunately the error type is unexported, so we resort to string
|
||||
// matching.
|
||||
return strings.Contains(e.Error(), "REFUSED_STREAM")
|
||||
case interface{ Temporary() bool }:
|
||||
return e.Temporary()
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
320
vendor/cloud.google.com/go/storage/reader.go
generated
vendored
320
vendor/cloud.google.com/go/storage/reader.go
generated
vendored
|
@ -29,6 +29,8 @@ import (
|
|||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"google.golang.org/api/googleapi"
|
||||
storagepb "google.golang.org/genproto/googleapis/storage/v2"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
|
||||
|
@ -94,6 +96,10 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
|
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if o.c.gc != nil {
|
||||
return o.newRangeReaderWithGRPC(ctx, offset, length)
|
||||
}
|
||||
|
||||
if err := o.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -149,7 +155,14 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
|
|||
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, offset+length-1))
|
||||
}
|
||||
// We wait to assign conditions here because the generation number can change in between reopen() runs.
|
||||
req.URL.RawQuery = conditionsQuery(gen, o.conds)
|
||||
if err := setConditionsHeaders(req.Header, o.conds); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// If an object generation is specified, include generation as query string parameters.
|
||||
if gen >= 0 {
|
||||
req.URL.RawQuery = fmt.Sprintf("generation=%d", gen)
|
||||
}
|
||||
|
||||
var res *http.Response
|
||||
err = runWithRetry(ctx, func() error {
|
||||
res, err = o.c.hc.Do(req)
|
||||
|
@ -220,6 +233,8 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
|
|||
if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") {
|
||||
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
|
||||
}
|
||||
// Content range is formatted <first byte>-<last byte>/<total size>. We take
|
||||
// the total size.
|
||||
size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
|
||||
|
@ -324,6 +339,24 @@ func parseCRC32c(res *http.Response) (uint32, bool) {
|
|||
return 0, false
|
||||
}
|
||||
|
||||
// setConditionsHeaders sets precondition request headers for downloads
|
||||
// using the XML API. It assumes that the conditions have been validated.
|
||||
func setConditionsHeaders(headers http.Header, conds *Conditions) error {
|
||||
if conds == nil {
|
||||
return nil
|
||||
}
|
||||
if conds.MetagenerationMatch != 0 {
|
||||
headers.Set("x-goog-if-metageneration-match", fmt.Sprint(conds.MetagenerationMatch))
|
||||
}
|
||||
switch {
|
||||
case conds.GenerationMatch != 0:
|
||||
headers.Set("x-goog-if-generation-match", fmt.Sprint(conds.GenerationMatch))
|
||||
case conds.DoesNotExist:
|
||||
headers.Set("x-goog-if-generation-match", "0")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
|
||||
|
||||
// Reader reads a Cloud Storage object.
|
||||
|
@ -340,15 +373,36 @@ type Reader struct {
|
|||
wantCRC uint32 // the CRC32c value the server sent in the header
|
||||
gotCRC uint32 // running crc
|
||||
reopen func(seen int64) (*http.Response, error)
|
||||
|
||||
// The following fields are only for use in the gRPC hybrid client.
|
||||
stream storagepb.Storage_ReadObjectClient
|
||||
reopenWithGRPC func(seen int64) (*readStreamResponse, context.CancelFunc, error)
|
||||
leftovers []byte
|
||||
cancelStream context.CancelFunc
|
||||
}
|
||||
|
||||
type readStreamResponse struct {
|
||||
stream storagepb.Storage_ReadObjectClient
|
||||
response *storagepb.ReadObjectResponse
|
||||
}
|
||||
|
||||
// Close closes the Reader. It must be called when done reading.
|
||||
func (r *Reader) Close() error {
|
||||
if r.body != nil {
|
||||
return r.body.Close()
|
||||
}
|
||||
|
||||
r.closeStream()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Reader) Read(p []byte) (int, error) {
|
||||
n, err := r.readWithRetry(p)
|
||||
read := r.readWithRetry
|
||||
if r.reopenWithGRPC != nil {
|
||||
read = r.readWithGRPC
|
||||
}
|
||||
|
||||
n, err := read(p)
|
||||
if r.remain != -1 {
|
||||
r.remain -= int64(n)
|
||||
}
|
||||
|
@ -367,6 +421,136 @@ func (r *Reader) Read(p []byte) (int, error) {
|
|||
return n, err
|
||||
}
|
||||
|
||||
// newRangeReaderWithGRPC creates a new Reader with the given range that uses
|
||||
// gRPC to read Object content.
|
||||
//
|
||||
// This is an experimental API and not intended for public use.
|
||||
func (o *ObjectHandle) newRangeReaderWithGRPC(ctx context.Context, offset, length int64) (r *Reader, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.newRangeReaderWithGRPC")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if o.c.gc == nil {
|
||||
err = fmt.Errorf("handle doesn't have a gRPC client initialized")
|
||||
return
|
||||
}
|
||||
if err = o.validate(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// A negative length means "read to the end of the object", but the
|
||||
// read_limit field it corresponds to uses zero to mean the same thing. Thus
|
||||
// we coerce the length to 0 to read to the end of the object.
|
||||
if length < 0 {
|
||||
length = 0
|
||||
}
|
||||
|
||||
// For now, there are only globally unique buckets, and "_" is the alias
|
||||
// project ID for such buckets.
|
||||
b := bucketResourceName("_", o.bucket)
|
||||
req := &storagepb.ReadObjectRequest{
|
||||
Bucket: b,
|
||||
Object: o.object,
|
||||
}
|
||||
// The default is a negative value, which means latest.
|
||||
if o.gen >= 0 {
|
||||
req.Generation = o.gen
|
||||
}
|
||||
|
||||
// Define a function that initiates a Read with offset and length, assuming
|
||||
// we have already read seen bytes.
|
||||
reopen := func(seen int64) (*readStreamResponse, context.CancelFunc, error) {
|
||||
// If the context has already expired, return immediately without making
|
||||
// we call.
|
||||
if err := ctx.Err(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
cc, cancel := context.WithCancel(ctx)
|
||||
|
||||
start := offset + seen
|
||||
// Only set a ReadLimit if length is greater than zero, because zero
|
||||
// means read it all.
|
||||
if length > 0 {
|
||||
req.ReadLimit = length - seen
|
||||
}
|
||||
req.ReadOffset = start
|
||||
|
||||
setRequestConditions(req, o.conds)
|
||||
|
||||
var stream storagepb.Storage_ReadObjectClient
|
||||
var msg *storagepb.ReadObjectResponse
|
||||
var err error
|
||||
|
||||
err = runWithRetry(cc, func() error {
|
||||
stream, err = o.c.gc.ReadObject(cc, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msg, err = stream.Recv()
|
||||
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
// Close the stream context we just created to ensure we don't leak
|
||||
// resources.
|
||||
cancel()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return &readStreamResponse{stream, msg}, cancel, nil
|
||||
}
|
||||
|
||||
res, cancel, err := reopen(0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r = &Reader{
|
||||
stream: res.stream,
|
||||
reopenWithGRPC: reopen,
|
||||
cancelStream: cancel,
|
||||
}
|
||||
|
||||
// The first message was Recv'd on stream open, use it to populate the
|
||||
// object metadata.
|
||||
msg := res.response
|
||||
obj := msg.GetMetadata()
|
||||
// This is the size of the entire object, even if only a range was requested.
|
||||
size := obj.GetSize()
|
||||
|
||||
r.Attrs = ReaderObjectAttrs{
|
||||
Size: size,
|
||||
ContentType: obj.GetContentType(),
|
||||
ContentEncoding: obj.GetContentEncoding(),
|
||||
CacheControl: obj.GetCacheControl(),
|
||||
LastModified: obj.GetUpdateTime().AsTime(),
|
||||
Metageneration: obj.GetMetageneration(),
|
||||
Generation: obj.GetGeneration(),
|
||||
}
|
||||
|
||||
r.size = size
|
||||
cr := msg.GetContentRange()
|
||||
if cr != nil {
|
||||
r.Attrs.StartOffset = cr.GetStart()
|
||||
r.remain = cr.GetEnd() - cr.GetStart() + 1
|
||||
} else {
|
||||
r.remain = size
|
||||
}
|
||||
|
||||
// Only support checksums when reading an entire object, not a range.
|
||||
if msg.GetObjectChecksums().Crc32C != nil && offset == 0 && length == 0 {
|
||||
r.wantCRC = msg.GetObjectChecksums().GetCrc32C()
|
||||
r.checkCRC = true
|
||||
}
|
||||
|
||||
// Store the content from the first Recv in the client buffer for reading
|
||||
// later.
|
||||
r.leftovers = msg.GetChecksummedData().GetContent()
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (r *Reader) readWithRetry(p []byte) (int, error) {
|
||||
n := 0
|
||||
for len(p[n:]) > 0 {
|
||||
|
@ -390,6 +574,138 @@ func (r *Reader) readWithRetry(p []byte) (int, error) {
|
|||
return n, nil
|
||||
}
|
||||
|
||||
// closeStream cancels a stream's context in order for it to be closed and
|
||||
// collected.
|
||||
//
|
||||
// This is an experimental API and not intended for public use.
|
||||
func (r *Reader) closeStream() {
|
||||
if r.cancelStream != nil {
|
||||
r.cancelStream()
|
||||
}
|
||||
r.stream = nil
|
||||
}
|
||||
|
||||
// readWithGRPC reads bytes into the user's buffer from an open gRPC stream.
|
||||
//
|
||||
// This is an experimental API and not intended for public use.
|
||||
func (r *Reader) readWithGRPC(p []byte) (int, error) {
|
||||
// No stream to read from, either never initiliazed or Close was called.
|
||||
// Note: There is a potential concurrency issue if multiple routines are
|
||||
// using the same reader. One encounters an error and the stream is closed
|
||||
// and then reopened while the other routine attempts to read from it.
|
||||
if r.stream == nil {
|
||||
return 0, fmt.Errorf("reader has been closed")
|
||||
}
|
||||
|
||||
// The entire object has been read by this reader, return EOF.
|
||||
if r.size != 0 && r.size == r.seen {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
var n int
|
||||
// Read leftovers and return what was available to conform to the Reader
|
||||
// interface: https://pkg.go.dev/io#Reader.
|
||||
if len(r.leftovers) > 0 {
|
||||
n = copy(p, r.leftovers)
|
||||
r.seen += int64(n)
|
||||
r.leftovers = r.leftovers[n:]
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Attempt to Recv the next message on the stream.
|
||||
msg, err := r.recv()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// TODO: Determine if we need to capture incremental CRC32C for this
|
||||
// chunk. The Object CRC32C checksum is captured when directed to read
|
||||
// the entire Object. If directed to read a range, we may need to
|
||||
// calculate the range's checksum for verification if the checksum is
|
||||
// present in the response here.
|
||||
// TODO: Figure out if we need to support decompressive transcoding
|
||||
// https://cloud.google.com/storage/docs/transcoding.
|
||||
content := msg.GetChecksummedData().GetContent()
|
||||
n = copy(p[n:], content)
|
||||
leftover := len(content) - n
|
||||
if leftover > 0 {
|
||||
// Wasn't able to copy all of the data in the message, store for
|
||||
// future Read calls.
|
||||
// TODO: Instead of acquiring a new block of memory, should we reuse
|
||||
// the existing leftovers slice, expanding it if necessary?
|
||||
r.leftovers = make([]byte, leftover)
|
||||
copy(r.leftovers, content[n:])
|
||||
}
|
||||
r.seen += int64(n)
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// recv attempts to Recv the next message on the stream. In the event
|
||||
// that a retryable error is encountered, the stream will be closed, reopened,
|
||||
// and Recv again. This will attempt to Recv until one of the following is true:
|
||||
//
|
||||
// * Recv is successful
|
||||
// * A non-retryable error is encountered
|
||||
// * The Reader's context is canceled
|
||||
//
|
||||
// The last error received is the one that is returned, which could be from
|
||||
// an attempt to reopen the stream.
|
||||
//
|
||||
// This is an experimental API and not intended for public use.
|
||||
func (r *Reader) recv() (*storagepb.ReadObjectResponse, error) {
|
||||
msg, err := r.stream.Recv()
|
||||
if err != nil && shouldRetry(err) {
|
||||
// This will "close" the existing stream and immediately attempt to
|
||||
// reopen the stream, but will backoff if further attempts are necessary.
|
||||
// Reopening the stream Recvs the first message, so if retrying is
|
||||
// successful, the next logical chunk will be returned.
|
||||
msg, err = r.reopenStream(r.seen)
|
||||
}
|
||||
|
||||
return msg, err
|
||||
}
|
||||
|
||||
// reopenStream "closes" the existing stream and attempts to reopen a stream and
|
||||
// sets the Reader's stream and cancelStream properties in the process.
|
||||
//
|
||||
// This is an experimental API and not intended for public use.
|
||||
func (r *Reader) reopenStream(seen int64) (*storagepb.ReadObjectResponse, error) {
|
||||
// Close existing stream and initialize new stream with updated offset.
|
||||
r.closeStream()
|
||||
|
||||
res, cancel, err := r.reopenWithGRPC(r.seen)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.stream = res.stream
|
||||
r.cancelStream = cancel
|
||||
return res.response, nil
|
||||
}
|
||||
|
||||
// setRequestConditions is used to apply the given Conditions to a gRPC request
|
||||
// message.
|
||||
//
|
||||
// This is an experimental API and not intended for public use.
|
||||
func setRequestConditions(req *storagepb.ReadObjectRequest, conds *Conditions) {
|
||||
if conds == nil {
|
||||
return
|
||||
}
|
||||
if conds.MetagenerationMatch != 0 {
|
||||
req.IfMetagenerationMatch = proto.Int64(conds.MetagenerationMatch)
|
||||
} else if conds.MetagenerationNotMatch != 0 {
|
||||
req.IfMetagenerationNotMatch = proto.Int64(conds.MetagenerationNotMatch)
|
||||
}
|
||||
switch {
|
||||
case conds.GenerationNotMatch != 0:
|
||||
req.IfGenerationNotMatch = proto.Int64(conds.GenerationNotMatch)
|
||||
case conds.GenerationMatch != 0:
|
||||
req.IfGenerationMatch = proto.Int64(conds.GenerationMatch)
|
||||
case conds.DoesNotExist:
|
||||
req.IfGenerationMatch = proto.Int64(0)
|
||||
}
|
||||
}
|
||||
|
||||
// Size returns the size of the object in bytes.
|
||||
// The returned value is always the same and is not affected by
|
||||
// calls to Read or Close.
|
||||
|
|
217
vendor/cloud.google.com/go/storage/storage.go
generated
vendored
217
vendor/cloud.google.com/go/storage/storage.go
generated
vendored
|
@ -33,7 +33,6 @@ import (
|
|||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
@ -41,11 +40,15 @@ import (
|
|||
"cloud.google.com/go/internal/optional"
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"cloud.google.com/go/internal/version"
|
||||
gapic "cloud.google.com/go/storage/internal/apiv2"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/option/internaloption"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
htransport "google.golang.org/api/transport/http"
|
||||
storagepb "google.golang.org/genproto/googleapis/storage/v2"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
// Methods which can be used in signed URLs.
|
||||
|
@ -91,10 +94,13 @@ type Client struct {
|
|||
raw *raw.Service
|
||||
// Scheme describes the scheme under the current host.
|
||||
scheme string
|
||||
// EnvHost is the host set on the STORAGE_EMULATOR_HOST variable.
|
||||
envHost string
|
||||
// ReadHost is the default host used on the reader.
|
||||
readHost string
|
||||
|
||||
// gc is an optional gRPC-based, GAPIC client.
|
||||
//
|
||||
// This is an experimental field and not intended for public use.
|
||||
gc *gapic.Client
|
||||
}
|
||||
|
||||
// NewClient creates a new Google Cloud Storage client.
|
||||
|
@ -104,7 +110,6 @@ type Client struct {
|
|||
// Clients should be reused instead of created as needed. The methods of Client
|
||||
// are safe for concurrent use by multiple goroutines.
|
||||
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
|
||||
var host, readHost, scheme string
|
||||
|
||||
// In general, it is recommended to use raw.NewService instead of htransport.NewClient
|
||||
// since raw.NewService configures the correct default endpoints when initializing the
|
||||
|
@ -113,23 +118,35 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
|
|||
// here so it can be re-used by both reader.go and raw.NewService. This means we need to
|
||||
// manually configure the default endpoint options on the http client. Furthermore, we
|
||||
// need to account for STORAGE_EMULATOR_HOST override when setting the default endpoints.
|
||||
if host = os.Getenv("STORAGE_EMULATOR_HOST"); host == "" {
|
||||
scheme = "https"
|
||||
readHost = "storage.googleapis.com"
|
||||
|
||||
if host := os.Getenv("STORAGE_EMULATOR_HOST"); host == "" {
|
||||
// Prepend default options to avoid overriding options passed by the user.
|
||||
opts = append([]option.ClientOption{option.WithScopes(ScopeFullControl), option.WithUserAgent(userAgent)}, opts...)
|
||||
|
||||
opts = append(opts, internaloption.WithDefaultEndpoint("https://storage.googleapis.com/storage/v1/"))
|
||||
opts = append(opts, internaloption.WithDefaultMTLSEndpoint("https://storage.mtls.googleapis.com/storage/v1/"))
|
||||
} else {
|
||||
scheme = "http"
|
||||
readHost = host
|
||||
var hostURL *url.URL
|
||||
|
||||
if strings.Contains(host, "://") {
|
||||
h, err := url.Parse(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hostURL = h
|
||||
} else {
|
||||
// Add scheme for user if not supplied in STORAGE_EMULATOR_HOST
|
||||
// URL is only parsed correctly if it has a scheme, so we build it ourselves
|
||||
hostURL = &url.URL{Scheme: "http", Host: host}
|
||||
}
|
||||
|
||||
hostURL.Path = "storage/v1/"
|
||||
endpoint := hostURL.String()
|
||||
|
||||
// Append the emulator host as default endpoint for the user
|
||||
opts = append([]option.ClientOption{option.WithoutAuthentication()}, opts...)
|
||||
|
||||
opts = append(opts, internaloption.WithDefaultEndpoint(host))
|
||||
opts = append(opts, internaloption.WithDefaultMTLSEndpoint(host))
|
||||
opts = append(opts, internaloption.WithDefaultEndpoint(endpoint))
|
||||
opts = append(opts, internaloption.WithDefaultMTLSEndpoint(endpoint))
|
||||
}
|
||||
|
||||
// htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint.
|
||||
|
@ -142,22 +159,48 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("storage client: %v", err)
|
||||
}
|
||||
// Update readHost with the chosen endpoint.
|
||||
// Update readHost and scheme with the chosen endpoint.
|
||||
u, err := url.Parse(ep)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("supplied endpoint %q is not valid: %v", ep, err)
|
||||
}
|
||||
readHost = u.Host
|
||||
|
||||
return &Client{
|
||||
hc: hc,
|
||||
raw: rawService,
|
||||
scheme: scheme,
|
||||
envHost: host,
|
||||
readHost: readHost,
|
||||
scheme: u.Scheme,
|
||||
readHost: u.Host,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// hybridClientOptions carries the set of client options for HTTP and gRPC clients.
|
||||
type hybridClientOptions struct {
|
||||
HTTPOpts []option.ClientOption
|
||||
GRPCOpts []option.ClientOption
|
||||
}
|
||||
|
||||
// newHybridClient creates a new Storage client that initializes a gRPC-based client
|
||||
// for media upload and download operations.
|
||||
//
|
||||
// This is an experimental API and not intended for public use.
|
||||
func newHybridClient(ctx context.Context, opts *hybridClientOptions) (*Client, error) {
|
||||
if opts == nil {
|
||||
opts = &hybridClientOptions{}
|
||||
}
|
||||
c, err := NewClient(ctx, opts.HTTPOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
g, err := gapic.NewClient(ctx, opts.GRPCOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.gc = g
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Close closes the Client.
|
||||
//
|
||||
// Close need not be called at program exit.
|
||||
|
@ -165,6 +208,9 @@ func (c *Client) Close() error {
|
|||
// Set fields to nil so that subsequent uses will panic.
|
||||
c.hc = nil
|
||||
c.raw = nil
|
||||
if c.gc != nil {
|
||||
return c.gc.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1089,6 +1135,42 @@ func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object {
|
|||
}
|
||||
}
|
||||
|
||||
// toProtoObject copies the editable attributes from o to the proto library's Object type.
|
||||
func (o *ObjectAttrs) toProtoObject(b string) *storagepb.Object {
|
||||
checksums := &storagepb.ObjectChecksums{Md5Hash: o.MD5}
|
||||
if o.CRC32C > 0 {
|
||||
checksums.Crc32C = proto.Uint32(o.CRC32C)
|
||||
}
|
||||
|
||||
// For now, there are only globally unique buckets, and "_" is the alias
|
||||
// project ID for such buckets.
|
||||
b = bucketResourceName("_", b)
|
||||
|
||||
return &storagepb.Object{
|
||||
Bucket: b,
|
||||
Name: o.Name,
|
||||
EventBasedHold: proto.Bool(o.EventBasedHold),
|
||||
TemporaryHold: o.TemporaryHold,
|
||||
ContentType: o.ContentType,
|
||||
ContentEncoding: o.ContentEncoding,
|
||||
ContentLanguage: o.ContentLanguage,
|
||||
CacheControl: o.CacheControl,
|
||||
ContentDisposition: o.ContentDisposition,
|
||||
StorageClass: o.StorageClass,
|
||||
Acl: toProtoObjectACL(o.ACL),
|
||||
Metadata: o.Metadata,
|
||||
CreateTime: toProtoTimestamp(o.Created),
|
||||
CustomTime: toProtoTimestamp(o.CustomTime),
|
||||
DeleteTime: toProtoTimestamp(o.Deleted),
|
||||
RetentionExpireTime: toProtoTimestamp(o.RetentionExpirationTime),
|
||||
UpdateTime: toProtoTimestamp(o.Updated),
|
||||
KmsKey: o.KMSKeyName,
|
||||
Generation: o.Generation,
|
||||
Size: o.Size,
|
||||
Checksums: checksums,
|
||||
}
|
||||
}
|
||||
|
||||
// ObjectAttrs represents the metadata for a Google Cloud Storage (GCS) object.
|
||||
type ObjectAttrs struct {
|
||||
// Bucket is the name of the bucket containing this GCS object.
|
||||
|
@ -1245,6 +1327,22 @@ func convertTime(t string) time.Time {
|
|||
return r
|
||||
}
|
||||
|
||||
func convertProtoTime(t *timestamppb.Timestamp) time.Time {
|
||||
var r time.Time
|
||||
if t != nil {
|
||||
r = t.AsTime()
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func toProtoTimestamp(t time.Time) *timestamppb.Timestamp {
|
||||
if t.IsZero() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return timestamppb.New(t)
|
||||
}
|
||||
|
||||
func newObject(o *raw.Object) *ObjectAttrs {
|
||||
if o == nil {
|
||||
return nil
|
||||
|
@ -1290,6 +1388,40 @@ func newObject(o *raw.Object) *ObjectAttrs {
|
|||
}
|
||||
}
|
||||
|
||||
func newObjectFromProto(r *storagepb.WriteObjectResponse) *ObjectAttrs {
|
||||
o := r.GetResource()
|
||||
if r == nil || o == nil {
|
||||
return nil
|
||||
}
|
||||
return &ObjectAttrs{
|
||||
Bucket: parseBucketName(o.Bucket),
|
||||
Name: o.Name,
|
||||
ContentType: o.ContentType,
|
||||
ContentLanguage: o.ContentLanguage,
|
||||
CacheControl: o.CacheControl,
|
||||
EventBasedHold: o.GetEventBasedHold(),
|
||||
TemporaryHold: o.TemporaryHold,
|
||||
RetentionExpirationTime: convertProtoTime(o.GetRetentionExpireTime()),
|
||||
ACL: fromProtoToObjectACLRules(o.GetAcl()),
|
||||
Owner: o.GetOwner().GetEntity(),
|
||||
ContentEncoding: o.ContentEncoding,
|
||||
ContentDisposition: o.ContentDisposition,
|
||||
Size: int64(o.Size),
|
||||
MD5: o.GetChecksums().GetMd5Hash(),
|
||||
CRC32C: o.GetChecksums().GetCrc32C(),
|
||||
Metadata: o.Metadata,
|
||||
Generation: o.Generation,
|
||||
Metageneration: o.Metageneration,
|
||||
StorageClass: o.StorageClass,
|
||||
CustomerKeySHA256: o.GetCustomerEncryption().GetKeySha256(),
|
||||
KMSKeyName: o.GetKmsKey(),
|
||||
Created: convertProtoTime(o.GetCreateTime()),
|
||||
Deleted: convertProtoTime(o.GetDeleteTime()),
|
||||
Updated: convertProtoTime(o.GetUpdateTime()),
|
||||
CustomTime: convertProtoTime(o.GetCustomTime()),
|
||||
}
|
||||
}
|
||||
|
||||
// Decode a uint32 encoded in Base64 in big-endian byte order.
|
||||
func decodeUint32(b64 string) (uint32, error) {
|
||||
d, err := base64.StdEncoding.DecodeString(b64)
|
||||
|
@ -1596,44 +1728,6 @@ func setConditionField(call reflect.Value, name string, value interface{}) bool
|
|||
return true
|
||||
}
|
||||
|
||||
// conditionsQuery returns the generation and conditions as a URL query
|
||||
// string suitable for URL.RawQuery. It assumes that the conditions
|
||||
// have been validated.
|
||||
func conditionsQuery(gen int64, conds *Conditions) string {
|
||||
// URL escapes are elided because integer strings are URL-safe.
|
||||
var buf []byte
|
||||
|
||||
appendParam := func(s string, n int64) {
|
||||
if len(buf) > 0 {
|
||||
buf = append(buf, '&')
|
||||
}
|
||||
buf = append(buf, s...)
|
||||
buf = strconv.AppendInt(buf, n, 10)
|
||||
}
|
||||
|
||||
if gen >= 0 {
|
||||
appendParam("generation=", gen)
|
||||
}
|
||||
if conds == nil {
|
||||
return string(buf)
|
||||
}
|
||||
switch {
|
||||
case conds.GenerationMatch != 0:
|
||||
appendParam("ifGenerationMatch=", conds.GenerationMatch)
|
||||
case conds.GenerationNotMatch != 0:
|
||||
appendParam("ifGenerationNotMatch=", conds.GenerationNotMatch)
|
||||
case conds.DoesNotExist:
|
||||
appendParam("ifGenerationMatch=", 0)
|
||||
}
|
||||
switch {
|
||||
case conds.MetagenerationMatch != 0:
|
||||
appendParam("ifMetagenerationMatch=", conds.MetagenerationMatch)
|
||||
case conds.MetagenerationNotMatch != 0:
|
||||
appendParam("ifMetagenerationNotMatch=", conds.MetagenerationNotMatch)
|
||||
}
|
||||
return string(buf)
|
||||
}
|
||||
|
||||
// composeSourceObj wraps a *raw.ComposeRequestSourceObjects, but adds the methods
|
||||
// that modifyCall searches for by name.
|
||||
type composeSourceObj struct {
|
||||
|
@ -1681,3 +1775,16 @@ func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string,
|
|||
}
|
||||
return res.EmailAddress, nil
|
||||
}
|
||||
|
||||
// bucketResourceName formats the given project ID and bucketResourceName ID
|
||||
// into a Bucket resource name. This is the format necessary for the gRPC API as
|
||||
// it conforms to the Resource-oriented design practices in https://google.aip.dev/121.
|
||||
func bucketResourceName(p, b string) string {
|
||||
return fmt.Sprintf("projects/%s/buckets/%s", p, b)
|
||||
}
|
||||
|
||||
// parseBucketName strips the leading resource path segment and returns the
|
||||
// bucket ID, which is the simple Bucket name typical of the v1 API.
|
||||
func parseBucketName(b string) string {
|
||||
return strings.TrimPrefix(b, "projects/_/buckets/")
|
||||
}
|
||||
|
|
3
vendor/cloud.google.com/go/storage/writer.go
generated
vendored
3
vendor/cloud.google.com/go/storage/writer.go
generated
vendored
|
@ -125,9 +125,6 @@ func (w *Writer) open() error {
|
|||
if w.MD5 != nil {
|
||||
rawObj.Md5Hash = base64.StdEncoding.EncodeToString(w.MD5)
|
||||
}
|
||||
if w.o.c.envHost != "" {
|
||||
w.o.c.raw.BasePath = fmt.Sprintf("%s://%s", w.o.c.scheme, w.o.c.envHost)
|
||||
}
|
||||
call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj).
|
||||
Media(pr, mediaOpts...).
|
||||
Projection("full").
|
||||
|
|
27
vendor/github.com/google/go-cmp/LICENSE
generated
vendored
Normal file
27
vendor/github.com/google/go-cmp/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2017 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
682
vendor/github.com/google/go-cmp/cmp/compare.go
generated
vendored
Normal file
682
vendor/github.com/google/go-cmp/cmp/compare.go
generated
vendored
Normal file
|
@ -0,0 +1,682 @@
|
|||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package cmp determines equality of values.
|
||||
//
|
||||
// This package is intended to be a more powerful and safer alternative to
|
||||
// reflect.DeepEqual for comparing whether two values are semantically equal.
|
||||
// It is intended to only be used in tests, as performance is not a goal and
|
||||
// it may panic if it cannot compare the values. Its propensity towards
|
||||
// panicking means that its unsuitable for production environments where a
|
||||
// spurious panic may be fatal.
|
||||
//
|
||||
// The primary features of cmp are:
|
||||
//
|
||||
// • When the default behavior of equality does not suit the needs of the test,
|
||||
// custom equality functions can override the equality operation.
|
||||
// For example, an equality function may report floats as equal so long as they
|
||||
// are within some tolerance of each other.
|
||||
//
|
||||
// • Types that have an Equal method may use that method to determine equality.
|
||||
// This allows package authors to determine the equality operation for the types
|
||||
// that they define.
|
||||
//
|
||||
// • If no custom equality functions are used and no Equal method is defined,
|
||||
// equality is determined by recursively comparing the primitive kinds on both
|
||||
// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported
|
||||
// fields are not compared by default; they result in panics unless suppressed
|
||||
// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly
|
||||
// compared using the Exporter option.
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/diff"
|
||||
"github.com/google/go-cmp/cmp/internal/flags"
|
||||
"github.com/google/go-cmp/cmp/internal/function"
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
// Equal reports whether x and y are equal by recursively applying the
|
||||
// following rules in the given order to x and y and all of their sub-values:
|
||||
//
|
||||
// • Let S be the set of all Ignore, Transformer, and Comparer options that
|
||||
// remain after applying all path filters, value filters, and type filters.
|
||||
// If at least one Ignore exists in S, then the comparison is ignored.
|
||||
// If the number of Transformer and Comparer options in S is greater than one,
|
||||
// then Equal panics because it is ambiguous which option to use.
|
||||
// If S contains a single Transformer, then use that to transform the current
|
||||
// values and recursively call Equal on the output values.
|
||||
// If S contains a single Comparer, then use that to compare the current values.
|
||||
// Otherwise, evaluation proceeds to the next rule.
|
||||
//
|
||||
// • If the values have an Equal method of the form "(T) Equal(T) bool" or
|
||||
// "(T) Equal(I) bool" where T is assignable to I, then use the result of
|
||||
// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
|
||||
// evaluation proceeds to the next rule.
|
||||
//
|
||||
// • Lastly, try to compare x and y based on their basic kinds.
|
||||
// Simple kinds like booleans, integers, floats, complex numbers, strings, and
|
||||
// channels are compared using the equivalent of the == operator in Go.
|
||||
// Functions are only equal if they are both nil, otherwise they are unequal.
|
||||
//
|
||||
// Structs are equal if recursively calling Equal on all fields report equal.
|
||||
// If a struct contains unexported fields, Equal panics unless an Ignore option
|
||||
// (e.g., cmpopts.IgnoreUnexported) ignores that field or the Exporter option
|
||||
// explicitly permits comparing the unexported field.
|
||||
//
|
||||
// Slices are equal if they are both nil or both non-nil, where recursively
|
||||
// calling Equal on all non-ignored slice or array elements report equal.
|
||||
// Empty non-nil slices and nil slices are not equal; to equate empty slices,
|
||||
// consider using cmpopts.EquateEmpty.
|
||||
//
|
||||
// Maps are equal if they are both nil or both non-nil, where recursively
|
||||
// calling Equal on all non-ignored map entries report equal.
|
||||
// Map keys are equal according to the == operator.
|
||||
// To use custom comparisons for map keys, consider using cmpopts.SortMaps.
|
||||
// Empty non-nil maps and nil maps are not equal; to equate empty maps,
|
||||
// consider using cmpopts.EquateEmpty.
|
||||
//
|
||||
// Pointers and interfaces are equal if they are both nil or both non-nil,
|
||||
// where they have the same underlying concrete type and recursively
|
||||
// calling Equal on the underlying values reports equal.
|
||||
//
|
||||
// Before recursing into a pointer, slice element, or map, the current path
|
||||
// is checked to detect whether the address has already been visited.
|
||||
// If there is a cycle, then the pointed at values are considered equal
|
||||
// only if both addresses were previously visited in the same path step.
|
||||
func Equal(x, y interface{}, opts ...Option) bool {
|
||||
s := newState(opts)
|
||||
s.compareAny(rootStep(x, y))
|
||||
return s.result.Equal()
|
||||
}
|
||||
|
||||
// Diff returns a human-readable report of the differences between two values:
|
||||
// y - x. It returns an empty string if and only if Equal returns true for the
|
||||
// same input values and options.
|
||||
//
|
||||
// The output is displayed as a literal in pseudo-Go syntax.
|
||||
// At the start of each line, a "-" prefix indicates an element removed from x,
|
||||
// a "+" prefix to indicates an element added from y, and the lack of a prefix
|
||||
// indicates an element common to both x and y. If possible, the output
|
||||
// uses fmt.Stringer.String or error.Error methods to produce more humanly
|
||||
// readable outputs. In such cases, the string is prefixed with either an
|
||||
// 's' or 'e' character, respectively, to indicate that the method was called.
|
||||
//
|
||||
// Do not depend on this output being stable. If you need the ability to
|
||||
// programmatically interpret the difference, consider using a custom Reporter.
|
||||
func Diff(x, y interface{}, opts ...Option) string {
|
||||
s := newState(opts)
|
||||
|
||||
// Optimization: If there are no other reporters, we can optimize for the
|
||||
// common case where the result is equal (and thus no reported difference).
|
||||
// This avoids the expensive construction of a difference tree.
|
||||
if len(s.reporters) == 0 {
|
||||
s.compareAny(rootStep(x, y))
|
||||
if s.result.Equal() {
|
||||
return ""
|
||||
}
|
||||
s.result = diff.Result{} // Reset results
|
||||
}
|
||||
|
||||
r := new(defaultReporter)
|
||||
s.reporters = append(s.reporters, reporter{r})
|
||||
s.compareAny(rootStep(x, y))
|
||||
d := r.String()
|
||||
if (d == "") != s.result.Equal() {
|
||||
panic("inconsistent difference and equality results")
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// rootStep constructs the first path step. If x and y have differing types,
|
||||
// then they are stored within an empty interface type.
|
||||
func rootStep(x, y interface{}) PathStep {
|
||||
vx := reflect.ValueOf(x)
|
||||
vy := reflect.ValueOf(y)
|
||||
|
||||
// If the inputs are different types, auto-wrap them in an empty interface
|
||||
// so that they have the same parent type.
|
||||
var t reflect.Type
|
||||
if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() {
|
||||
t = reflect.TypeOf((*interface{})(nil)).Elem()
|
||||
if vx.IsValid() {
|
||||
vvx := reflect.New(t).Elem()
|
||||
vvx.Set(vx)
|
||||
vx = vvx
|
||||
}
|
||||
if vy.IsValid() {
|
||||
vvy := reflect.New(t).Elem()
|
||||
vvy.Set(vy)
|
||||
vy = vvy
|
||||
}
|
||||
} else {
|
||||
t = vx.Type()
|
||||
}
|
||||
|
||||
return &pathStep{t, vx, vy}
|
||||
}
|
||||
|
||||
type state struct {
|
||||
// These fields represent the "comparison state".
|
||||
// Calling statelessCompare must not result in observable changes to these.
|
||||
result diff.Result // The current result of comparison
|
||||
curPath Path // The current path in the value tree
|
||||
curPtrs pointerPath // The current set of visited pointers
|
||||
reporters []reporter // Optional reporters
|
||||
|
||||
// recChecker checks for infinite cycles applying the same set of
|
||||
// transformers upon the output of itself.
|
||||
recChecker recChecker
|
||||
|
||||
// dynChecker triggers pseudo-random checks for option correctness.
|
||||
// It is safe for statelessCompare to mutate this value.
|
||||
dynChecker dynChecker
|
||||
|
||||
// These fields, once set by processOption, will not change.
|
||||
exporters []exporter // List of exporters for structs with unexported fields
|
||||
opts Options // List of all fundamental and filter options
|
||||
}
|
||||
|
||||
func newState(opts []Option) *state {
|
||||
// Always ensure a validator option exists to validate the inputs.
|
||||
s := &state{opts: Options{validator{}}}
|
||||
s.curPtrs.Init()
|
||||
s.processOption(Options(opts))
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *state) processOption(opt Option) {
|
||||
switch opt := opt.(type) {
|
||||
case nil:
|
||||
case Options:
|
||||
for _, o := range opt {
|
||||
s.processOption(o)
|
||||
}
|
||||
case coreOption:
|
||||
type filtered interface {
|
||||
isFiltered() bool
|
||||
}
|
||||
if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() {
|
||||
panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt))
|
||||
}
|
||||
s.opts = append(s.opts, opt)
|
||||
case exporter:
|
||||
s.exporters = append(s.exporters, opt)
|
||||
case reporter:
|
||||
s.reporters = append(s.reporters, opt)
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown option %T", opt))
|
||||
}
|
||||
}
|
||||
|
||||
// statelessCompare compares two values and returns the result.
|
||||
// This function is stateless in that it does not alter the current result,
|
||||
// or output to any registered reporters.
|
||||
func (s *state) statelessCompare(step PathStep) diff.Result {
|
||||
// We do not save and restore curPath and curPtrs because all of the
|
||||
// compareX methods should properly push and pop from them.
|
||||
// It is an implementation bug if the contents of the paths differ from
|
||||
// when calling this function to when returning from it.
|
||||
|
||||
oldResult, oldReporters := s.result, s.reporters
|
||||
s.result = diff.Result{} // Reset result
|
||||
s.reporters = nil // Remove reporters to avoid spurious printouts
|
||||
s.compareAny(step)
|
||||
res := s.result
|
||||
s.result, s.reporters = oldResult, oldReporters
|
||||
return res
|
||||
}
|
||||
|
||||
func (s *state) compareAny(step PathStep) {
|
||||
// Update the path stack.
|
||||
s.curPath.push(step)
|
||||
defer s.curPath.pop()
|
||||
for _, r := range s.reporters {
|
||||
r.PushStep(step)
|
||||
defer r.PopStep()
|
||||
}
|
||||
s.recChecker.Check(s.curPath)
|
||||
|
||||
// Cycle-detection for slice elements (see NOTE in compareSlice).
|
||||
t := step.Type()
|
||||
vx, vy := step.Values()
|
||||
if si, ok := step.(SliceIndex); ok && si.isSlice && vx.IsValid() && vy.IsValid() {
|
||||
px, py := vx.Addr(), vy.Addr()
|
||||
if eq, visited := s.curPtrs.Push(px, py); visited {
|
||||
s.report(eq, reportByCycle)
|
||||
return
|
||||
}
|
||||
defer s.curPtrs.Pop(px, py)
|
||||
}
|
||||
|
||||
// Rule 1: Check whether an option applies on this node in the value tree.
|
||||
if s.tryOptions(t, vx, vy) {
|
||||
return
|
||||
}
|
||||
|
||||
// Rule 2: Check whether the type has a valid Equal method.
|
||||
if s.tryMethod(t, vx, vy) {
|
||||
return
|
||||
}
|
||||
|
||||
// Rule 3: Compare based on the underlying kind.
|
||||
switch t.Kind() {
|
||||
case reflect.Bool:
|
||||
s.report(vx.Bool() == vy.Bool(), 0)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
s.report(vx.Int() == vy.Int(), 0)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
s.report(vx.Uint() == vy.Uint(), 0)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
s.report(vx.Float() == vy.Float(), 0)
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
s.report(vx.Complex() == vy.Complex(), 0)
|
||||
case reflect.String:
|
||||
s.report(vx.String() == vy.String(), 0)
|
||||
case reflect.Chan, reflect.UnsafePointer:
|
||||
s.report(vx.Pointer() == vy.Pointer(), 0)
|
||||
case reflect.Func:
|
||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
||||
case reflect.Struct:
|
||||
s.compareStruct(t, vx, vy)
|
||||
case reflect.Slice, reflect.Array:
|
||||
s.compareSlice(t, vx, vy)
|
||||
case reflect.Map:
|
||||
s.compareMap(t, vx, vy)
|
||||
case reflect.Ptr:
|
||||
s.comparePtr(t, vx, vy)
|
||||
case reflect.Interface:
|
||||
s.compareInterface(t, vx, vy)
|
||||
default:
|
||||
panic(fmt.Sprintf("%v kind not handled", t.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool {
|
||||
// Evaluate all filters and apply the remaining options.
|
||||
if opt := s.opts.filter(s, t, vx, vy); opt != nil {
|
||||
opt.apply(s, vx, vy)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool {
|
||||
// Check if this type even has an Equal method.
|
||||
m, ok := t.MethodByName("Equal")
|
||||
if !ok || !function.IsType(m.Type, function.EqualAssignable) {
|
||||
return false
|
||||
}
|
||||
|
||||
eq := s.callTTBFunc(m.Func, vx, vy)
|
||||
s.report(eq, reportByMethod)
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value {
|
||||
v = sanitizeValue(v, f.Type().In(0))
|
||||
if !s.dynChecker.Next() {
|
||||
return f.Call([]reflect.Value{v})[0]
|
||||
}
|
||||
|
||||
// Run the function twice and ensure that we get the same results back.
|
||||
// We run in goroutines so that the race detector (if enabled) can detect
|
||||
// unsafe mutations to the input.
|
||||
c := make(chan reflect.Value)
|
||||
go detectRaces(c, f, v)
|
||||
got := <-c
|
||||
want := f.Call([]reflect.Value{v})[0]
|
||||
if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() {
|
||||
// To avoid false-positives with non-reflexive equality operations,
|
||||
// we sanity check whether a value is equal to itself.
|
||||
if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() {
|
||||
return want
|
||||
}
|
||||
panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f)))
|
||||
}
|
||||
return want
|
||||
}
|
||||
|
||||
func (s *state) callTTBFunc(f, x, y reflect.Value) bool {
|
||||
x = sanitizeValue(x, f.Type().In(0))
|
||||
y = sanitizeValue(y, f.Type().In(1))
|
||||
if !s.dynChecker.Next() {
|
||||
return f.Call([]reflect.Value{x, y})[0].Bool()
|
||||
}
|
||||
|
||||
// Swapping the input arguments is sufficient to check that
|
||||
// f is symmetric and deterministic.
|
||||
// We run in goroutines so that the race detector (if enabled) can detect
|
||||
// unsafe mutations to the input.
|
||||
c := make(chan reflect.Value)
|
||||
go detectRaces(c, f, y, x)
|
||||
got := <-c
|
||||
want := f.Call([]reflect.Value{x, y})[0].Bool()
|
||||
if !got.IsValid() || got.Bool() != want {
|
||||
panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f)))
|
||||
}
|
||||
return want
|
||||
}
|
||||
|
||||
func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) {
|
||||
var ret reflect.Value
|
||||
defer func() {
|
||||
recover() // Ignore panics, let the other call to f panic instead
|
||||
c <- ret
|
||||
}()
|
||||
ret = f.Call(vs)[0]
|
||||
}
|
||||
|
||||
// sanitizeValue converts nil interfaces of type T to those of type R,
|
||||
// assuming that T is assignable to R.
|
||||
// Otherwise, it returns the input value as is.
|
||||
func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value {
|
||||
// TODO(≥go1.10): Workaround for reflect bug (https://golang.org/issue/22143).
|
||||
if !flags.AtLeastGo110 {
|
||||
if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t {
|
||||
return reflect.New(t).Elem()
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) {
|
||||
var addr bool
|
||||
var vax, vay reflect.Value // Addressable versions of vx and vy
|
||||
|
||||
var mayForce, mayForceInit bool
|
||||
step := StructField{&structField{}}
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
step.typ = t.Field(i).Type
|
||||
step.vx = vx.Field(i)
|
||||
step.vy = vy.Field(i)
|
||||
step.name = t.Field(i).Name
|
||||
step.idx = i
|
||||
step.unexported = !isExported(step.name)
|
||||
if step.unexported {
|
||||
if step.name == "_" {
|
||||
continue
|
||||
}
|
||||
// Defer checking of unexported fields until later to give an
|
||||
// Ignore a chance to ignore the field.
|
||||
if !vax.IsValid() || !vay.IsValid() {
|
||||
// For retrieveUnexportedField to work, the parent struct must
|
||||
// be addressable. Create a new copy of the values if
|
||||
// necessary to make them addressable.
|
||||
addr = vx.CanAddr() || vy.CanAddr()
|
||||
vax = makeAddressable(vx)
|
||||
vay = makeAddressable(vy)
|
||||
}
|
||||
if !mayForceInit {
|
||||
for _, xf := range s.exporters {
|
||||
mayForce = mayForce || xf(t)
|
||||
}
|
||||
mayForceInit = true
|
||||
}
|
||||
step.mayForce = mayForce
|
||||
step.paddr = addr
|
||||
step.pvx = vax
|
||||
step.pvy = vay
|
||||
step.field = t.Field(i)
|
||||
}
|
||||
s.compareAny(step)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) {
|
||||
isSlice := t.Kind() == reflect.Slice
|
||||
if isSlice && (vx.IsNil() || vy.IsNil()) {
|
||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
||||
return
|
||||
}
|
||||
|
||||
// NOTE: It is incorrect to call curPtrs.Push on the slice header pointer
|
||||
// since slices represents a list of pointers, rather than a single pointer.
|
||||
// The pointer checking logic must be handled on a per-element basis
|
||||
// in compareAny.
|
||||
//
|
||||
// A slice header (see reflect.SliceHeader) in Go is a tuple of a starting
|
||||
// pointer P, a length N, and a capacity C. Supposing each slice element has
|
||||
// a memory size of M, then the slice is equivalent to the list of pointers:
|
||||
// [P+i*M for i in range(N)]
|
||||
//
|
||||
// For example, v[:0] and v[:1] are slices with the same starting pointer,
|
||||
// but they are clearly different values. Using the slice pointer alone
|
||||
// violates the assumption that equal pointers implies equal values.
|
||||
|
||||
step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}, isSlice: isSlice}}
|
||||
withIndexes := func(ix, iy int) SliceIndex {
|
||||
if ix >= 0 {
|
||||
step.vx, step.xkey = vx.Index(ix), ix
|
||||
} else {
|
||||
step.vx, step.xkey = reflect.Value{}, -1
|
||||
}
|
||||
if iy >= 0 {
|
||||
step.vy, step.ykey = vy.Index(iy), iy
|
||||
} else {
|
||||
step.vy, step.ykey = reflect.Value{}, -1
|
||||
}
|
||||
return step
|
||||
}
|
||||
|
||||
// Ignore options are able to ignore missing elements in a slice.
|
||||
// However, detecting these reliably requires an optimal differencing
|
||||
// algorithm, for which diff.Difference is not.
|
||||
//
|
||||
// Instead, we first iterate through both slices to detect which elements
|
||||
// would be ignored if standing alone. The index of non-discarded elements
|
||||
// are stored in a separate slice, which diffing is then performed on.
|
||||
var indexesX, indexesY []int
|
||||
var ignoredX, ignoredY []bool
|
||||
for ix := 0; ix < vx.Len(); ix++ {
|
||||
ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0
|
||||
if !ignored {
|
||||
indexesX = append(indexesX, ix)
|
||||
}
|
||||
ignoredX = append(ignoredX, ignored)
|
||||
}
|
||||
for iy := 0; iy < vy.Len(); iy++ {
|
||||
ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0
|
||||
if !ignored {
|
||||
indexesY = append(indexesY, iy)
|
||||
}
|
||||
ignoredY = append(ignoredY, ignored)
|
||||
}
|
||||
|
||||
// Compute an edit-script for slices vx and vy (excluding ignored elements).
|
||||
edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result {
|
||||
return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy]))
|
||||
})
|
||||
|
||||
// Replay the ignore-scripts and the edit-script.
|
||||
var ix, iy int
|
||||
for ix < vx.Len() || iy < vy.Len() {
|
||||
var e diff.EditType
|
||||
switch {
|
||||
case ix < len(ignoredX) && ignoredX[ix]:
|
||||
e = diff.UniqueX
|
||||
case iy < len(ignoredY) && ignoredY[iy]:
|
||||
e = diff.UniqueY
|
||||
default:
|
||||
e, edits = edits[0], edits[1:]
|
||||
}
|
||||
switch e {
|
||||
case diff.UniqueX:
|
||||
s.compareAny(withIndexes(ix, -1))
|
||||
ix++
|
||||
case diff.UniqueY:
|
||||
s.compareAny(withIndexes(-1, iy))
|
||||
iy++
|
||||
default:
|
||||
s.compareAny(withIndexes(ix, iy))
|
||||
ix++
|
||||
iy++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) {
|
||||
if vx.IsNil() || vy.IsNil() {
|
||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
||||
return
|
||||
}
|
||||
|
||||
// Cycle-detection for maps.
|
||||
if eq, visited := s.curPtrs.Push(vx, vy); visited {
|
||||
s.report(eq, reportByCycle)
|
||||
return
|
||||
}
|
||||
defer s.curPtrs.Pop(vx, vy)
|
||||
|
||||
// We combine and sort the two map keys so that we can perform the
|
||||
// comparisons in a deterministic order.
|
||||
step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}}
|
||||
for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) {
|
||||
step.vx = vx.MapIndex(k)
|
||||
step.vy = vy.MapIndex(k)
|
||||
step.key = k
|
||||
if !step.vx.IsValid() && !step.vy.IsValid() {
|
||||
// It is possible for both vx and vy to be invalid if the
|
||||
// key contained a NaN value in it.
|
||||
//
|
||||
// Even with the ability to retrieve NaN keys in Go 1.12,
|
||||
// there still isn't a sensible way to compare the values since
|
||||
// a NaN key may map to multiple unordered values.
|
||||
// The most reasonable way to compare NaNs would be to compare the
|
||||
// set of values. However, this is impossible to do efficiently
|
||||
// since set equality is provably an O(n^2) operation given only
|
||||
// an Equal function. If we had a Less function or Hash function,
|
||||
// this could be done in O(n*log(n)) or O(n), respectively.
|
||||
//
|
||||
// Rather than adding complex logic to deal with NaNs, make it
|
||||
// the user's responsibility to compare such obscure maps.
|
||||
const help = "consider providing a Comparer to compare the map"
|
||||
panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help))
|
||||
}
|
||||
s.compareAny(step)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) {
|
||||
if vx.IsNil() || vy.IsNil() {
|
||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
||||
return
|
||||
}
|
||||
|
||||
// Cycle-detection for pointers.
|
||||
if eq, visited := s.curPtrs.Push(vx, vy); visited {
|
||||
s.report(eq, reportByCycle)
|
||||
return
|
||||
}
|
||||
defer s.curPtrs.Pop(vx, vy)
|
||||
|
||||
vx, vy = vx.Elem(), vy.Elem()
|
||||
s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}})
|
||||
}
|
||||
|
||||
func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) {
|
||||
if vx.IsNil() || vy.IsNil() {
|
||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
||||
return
|
||||
}
|
||||
vx, vy = vx.Elem(), vy.Elem()
|
||||
if vx.Type() != vy.Type() {
|
||||
s.report(false, 0)
|
||||
return
|
||||
}
|
||||
s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}})
|
||||
}
|
||||
|
||||
func (s *state) report(eq bool, rf resultFlags) {
|
||||
if rf&reportByIgnore == 0 {
|
||||
if eq {
|
||||
s.result.NumSame++
|
||||
rf |= reportEqual
|
||||
} else {
|
||||
s.result.NumDiff++
|
||||
rf |= reportUnequal
|
||||
}
|
||||
}
|
||||
for _, r := range s.reporters {
|
||||
r.Report(Result{flags: rf})
|
||||
}
|
||||
}
|
||||
|
||||
// recChecker tracks the state needed to periodically perform checks that
|
||||
// user provided transformers are not stuck in an infinitely recursive cycle.
|
||||
type recChecker struct{ next int }
|
||||
|
||||
// Check scans the Path for any recursive transformers and panics when any
|
||||
// recursive transformers are detected. Note that the presence of a
|
||||
// recursive Transformer does not necessarily imply an infinite cycle.
|
||||
// As such, this check only activates after some minimal number of path steps.
|
||||
func (rc *recChecker) Check(p Path) {
|
||||
const minLen = 1 << 16
|
||||
if rc.next == 0 {
|
||||
rc.next = minLen
|
||||
}
|
||||
if len(p) < rc.next {
|
||||
return
|
||||
}
|
||||
rc.next <<= 1
|
||||
|
||||
// Check whether the same transformer has appeared at least twice.
|
||||
var ss []string
|
||||
m := map[Option]int{}
|
||||
for _, ps := range p {
|
||||
if t, ok := ps.(Transform); ok {
|
||||
t := t.Option()
|
||||
if m[t] == 1 { // Transformer was used exactly once before
|
||||
tf := t.(*transformer).fnc.Type()
|
||||
ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0)))
|
||||
}
|
||||
m[t]++
|
||||
}
|
||||
}
|
||||
if len(ss) > 0 {
|
||||
const warning = "recursive set of Transformers detected"
|
||||
const help = "consider using cmpopts.AcyclicTransformer"
|
||||
set := strings.Join(ss, "\n\t")
|
||||
panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help))
|
||||
}
|
||||
}
|
||||
|
||||
// dynChecker tracks the state needed to periodically perform checks that
|
||||
// user provided functions are symmetric and deterministic.
|
||||
// The zero value is safe for immediate use.
|
||||
type dynChecker struct{ curr, next int }
|
||||
|
||||
// Next increments the state and reports whether a check should be performed.
|
||||
//
|
||||
// Checks occur every Nth function call, where N is a triangular number:
|
||||
// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ...
|
||||
// See https://en.wikipedia.org/wiki/Triangular_number
|
||||
//
|
||||
// This sequence ensures that the cost of checks drops significantly as
|
||||
// the number of functions calls grows larger.
|
||||
func (dc *dynChecker) Next() bool {
|
||||
ok := dc.curr == dc.next
|
||||
if ok {
|
||||
dc.curr = 0
|
||||
dc.next++
|
||||
}
|
||||
dc.curr++
|
||||
return ok
|
||||
}
|
||||
|
||||
// makeAddressable returns a value that is always addressable.
|
||||
// It returns the input verbatim if it is already addressable,
|
||||
// otherwise it creates a new value and returns an addressable copy.
|
||||
func makeAddressable(v reflect.Value) reflect.Value {
|
||||
if v.CanAddr() {
|
||||
return v
|
||||
}
|
||||
vc := reflect.New(v.Type()).Elem()
|
||||
vc.Set(v)
|
||||
return vc
|
||||
}
|
15
vendor/github.com/google/go-cmp/cmp/export_panic.go
generated
vendored
Normal file
15
vendor/github.com/google/go-cmp/cmp/export_panic.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build purego
|
||||
|
||||
package cmp
|
||||
|
||||
import "reflect"
|
||||
|
||||
const supportExporters = false
|
||||
|
||||
func retrieveUnexportedField(reflect.Value, reflect.StructField, bool) reflect.Value {
|
||||
panic("no support for forcibly accessing unexported fields")
|
||||
}
|
35
vendor/github.com/google/go-cmp/cmp/export_unsafe.go
generated
vendored
Normal file
35
vendor/github.com/google/go-cmp/cmp/export_unsafe.go
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !purego
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const supportExporters = true
|
||||
|
||||
// retrieveUnexportedField uses unsafe to forcibly retrieve any field from
|
||||
// a struct such that the value has read-write permissions.
|
||||
//
|
||||
// The parent struct, v, must be addressable, while f must be a StructField
|
||||
// describing the field to retrieve. If addr is false,
|
||||
// then the returned value will be shallowed copied to be non-addressable.
|
||||
func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value {
|
||||
ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem()
|
||||
if !addr {
|
||||
// A field is addressable if and only if the struct is addressable.
|
||||
// If the original parent value was not addressable, shallow copy the
|
||||
// value to make it non-addressable to avoid leaking an implementation
|
||||
// detail of how forcibly exporting a field works.
|
||||
if ve.Kind() == reflect.Interface && ve.IsNil() {
|
||||
return reflect.Zero(f.Type)
|
||||
}
|
||||
return reflect.ValueOf(ve.Interface()).Convert(f.Type)
|
||||
}
|
||||
return ve
|
||||
}
|
17
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
generated
vendored
Normal file
17
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !cmp_debug
|
||||
|
||||
package diff
|
||||
|
||||
var debug debugger
|
||||
|
||||
type debugger struct{}
|
||||
|
||||
func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc {
|
||||
return f
|
||||
}
|
||||
func (debugger) Update() {}
|
||||
func (debugger) Finish() {}
|
122
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
generated
vendored
Normal file
122
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
generated
vendored
Normal file
|
@ -0,0 +1,122 @@
|
|||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build cmp_debug
|
||||
|
||||
package diff
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// The algorithm can be seen running in real-time by enabling debugging:
|
||||
// go test -tags=cmp_debug -v
|
||||
//
|
||||
// Example output:
|
||||
// === RUN TestDifference/#34
|
||||
// ┌───────────────────────────────┐
|
||||
// │ \ · · · · · · · · · · · · · · │
|
||||
// │ · # · · · · · · · · · · · · · │
|
||||
// │ · \ · · · · · · · · · · · · · │
|
||||
// │ · · \ · · · · · · · · · · · · │
|
||||
// │ · · · X # · · · · · · · · · · │
|
||||
// │ · · · # \ · · · · · · · · · · │
|
||||
// │ · · · · · # # · · · · · · · · │
|
||||
// │ · · · · · # \ · · · · · · · · │
|
||||
// │ · · · · · · · \ · · · · · · · │
|
||||
// │ · · · · · · · · \ · · · · · · │
|
||||
// │ · · · · · · · · · \ · · · · · │
|
||||
// │ · · · · · · · · · · \ · · # · │
|
||||
// │ · · · · · · · · · · · \ # # · │
|
||||
// │ · · · · · · · · · · · # # # · │
|
||||
// │ · · · · · · · · · · # # # # · │
|
||||
// │ · · · · · · · · · # # # # # · │
|
||||
// │ · · · · · · · · · · · · · · \ │
|
||||
// └───────────────────────────────┘
|
||||
// [.Y..M.XY......YXYXY.|]
|
||||
//
|
||||
// The grid represents the edit-graph where the horizontal axis represents
|
||||
// list X and the vertical axis represents list Y. The start of the two lists
|
||||
// is the top-left, while the ends are the bottom-right. The '·' represents
|
||||
// an unexplored node in the graph. The '\' indicates that the two symbols
|
||||
// from list X and Y are equal. The 'X' indicates that two symbols are similar
|
||||
// (but not exactly equal) to each other. The '#' indicates that the two symbols
|
||||
// are different (and not similar). The algorithm traverses this graph trying to
|
||||
// make the paths starting in the top-left and the bottom-right connect.
|
||||
//
|
||||
// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents
|
||||
// the currently established path from the forward and reverse searches,
|
||||
// separated by a '|' character.
|
||||
|
||||
const (
|
||||
updateDelay = 100 * time.Millisecond
|
||||
finishDelay = 500 * time.Millisecond
|
||||
ansiTerminal = true // ANSI escape codes used to move terminal cursor
|
||||
)
|
||||
|
||||
var debug debugger
|
||||
|
||||
type debugger struct {
|
||||
sync.Mutex
|
||||
p1, p2 EditScript
|
||||
fwdPath, revPath *EditScript
|
||||
grid []byte
|
||||
lines int
|
||||
}
|
||||
|
||||
func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc {
|
||||
dbg.Lock()
|
||||
dbg.fwdPath, dbg.revPath = p1, p2
|
||||
top := "┌─" + strings.Repeat("──", nx) + "┐\n"
|
||||
row := "│ " + strings.Repeat("· ", nx) + "│\n"
|
||||
btm := "└─" + strings.Repeat("──", nx) + "┘\n"
|
||||
dbg.grid = []byte(top + strings.Repeat(row, ny) + btm)
|
||||
dbg.lines = strings.Count(dbg.String(), "\n")
|
||||
fmt.Print(dbg)
|
||||
|
||||
// Wrap the EqualFunc so that we can intercept each result.
|
||||
return func(ix, iy int) (r Result) {
|
||||
cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")]
|
||||
for i := range cell {
|
||||
cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot
|
||||
}
|
||||
switch r = f(ix, iy); {
|
||||
case r.Equal():
|
||||
cell[0] = '\\'
|
||||
case r.Similar():
|
||||
cell[0] = 'X'
|
||||
default:
|
||||
cell[0] = '#'
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (dbg *debugger) Update() {
|
||||
dbg.print(updateDelay)
|
||||
}
|
||||
|
||||
func (dbg *debugger) Finish() {
|
||||
dbg.print(finishDelay)
|
||||
dbg.Unlock()
|
||||
}
|
||||
|
||||
func (dbg *debugger) String() string {
|
||||
dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0]
|
||||
for i := len(*dbg.revPath) - 1; i >= 0; i-- {
|
||||
dbg.p2 = append(dbg.p2, (*dbg.revPath)[i])
|
||||
}
|
||||
return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2)
|
||||
}
|
||||
|
||||
func (dbg *debugger) print(d time.Duration) {
|
||||
if ansiTerminal {
|
||||
fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor
|
||||
}
|
||||
fmt.Print(dbg)
|
||||
time.Sleep(d)
|
||||
}
|
398
vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
generated
vendored
Normal file
398
vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
generated
vendored
Normal file
|
@ -0,0 +1,398 @@
|
|||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package diff implements an algorithm for producing edit-scripts.
|
||||
// The edit-script is a sequence of operations needed to transform one list
|
||||
// of symbols into another (or vice-versa). The edits allowed are insertions,
|
||||
// deletions, and modifications. The summation of all edits is called the
|
||||
// Levenshtein distance as this problem is well-known in computer science.
|
||||
//
|
||||
// This package prioritizes performance over accuracy. That is, the run time
|
||||
// is more important than obtaining a minimal Levenshtein distance.
|
||||
package diff
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/flags"
|
||||
)
|
||||
|
||||
// EditType represents a single operation within an edit-script.
|
||||
type EditType uint8
|
||||
|
||||
const (
|
||||
// Identity indicates that a symbol pair is identical in both list X and Y.
|
||||
Identity EditType = iota
|
||||
// UniqueX indicates that a symbol only exists in X and not Y.
|
||||
UniqueX
|
||||
// UniqueY indicates that a symbol only exists in Y and not X.
|
||||
UniqueY
|
||||
// Modified indicates that a symbol pair is a modification of each other.
|
||||
Modified
|
||||
)
|
||||
|
||||
// EditScript represents the series of differences between two lists.
|
||||
type EditScript []EditType
|
||||
|
||||
// String returns a human-readable string representing the edit-script where
|
||||
// Identity, UniqueX, UniqueY, and Modified are represented by the
|
||||
// '.', 'X', 'Y', and 'M' characters, respectively.
|
||||
func (es EditScript) String() string {
|
||||
b := make([]byte, len(es))
|
||||
for i, e := range es {
|
||||
switch e {
|
||||
case Identity:
|
||||
b[i] = '.'
|
||||
case UniqueX:
|
||||
b[i] = 'X'
|
||||
case UniqueY:
|
||||
b[i] = 'Y'
|
||||
case Modified:
|
||||
b[i] = 'M'
|
||||
default:
|
||||
panic("invalid edit-type")
|
||||
}
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// stats returns a histogram of the number of each type of edit operation.
|
||||
func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) {
|
||||
for _, e := range es {
|
||||
switch e {
|
||||
case Identity:
|
||||
s.NI++
|
||||
case UniqueX:
|
||||
s.NX++
|
||||
case UniqueY:
|
||||
s.NY++
|
||||
case Modified:
|
||||
s.NM++
|
||||
default:
|
||||
panic("invalid edit-type")
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if
|
||||
// lists X and Y are equal.
|
||||
func (es EditScript) Dist() int { return len(es) - es.stats().NI }
|
||||
|
||||
// LenX is the length of the X list.
|
||||
func (es EditScript) LenX() int { return len(es) - es.stats().NY }
|
||||
|
||||
// LenY is the length of the Y list.
|
||||
func (es EditScript) LenY() int { return len(es) - es.stats().NX }
|
||||
|
||||
// EqualFunc reports whether the symbols at indexes ix and iy are equal.
|
||||
// When called by Difference, the index is guaranteed to be within nx and ny.
|
||||
type EqualFunc func(ix int, iy int) Result
|
||||
|
||||
// Result is the result of comparison.
|
||||
// NumSame is the number of sub-elements that are equal.
|
||||
// NumDiff is the number of sub-elements that are not equal.
|
||||
type Result struct{ NumSame, NumDiff int }
|
||||
|
||||
// BoolResult returns a Result that is either Equal or not Equal.
|
||||
func BoolResult(b bool) Result {
|
||||
if b {
|
||||
return Result{NumSame: 1} // Equal, Similar
|
||||
} else {
|
||||
return Result{NumDiff: 2} // Not Equal, not Similar
|
||||
}
|
||||
}
|
||||
|
||||
// Equal indicates whether the symbols are equal. Two symbols are equal
|
||||
// if and only if NumDiff == 0. If Equal, then they are also Similar.
|
||||
func (r Result) Equal() bool { return r.NumDiff == 0 }
|
||||
|
||||
// Similar indicates whether two symbols are similar and may be represented
|
||||
// by using the Modified type. As a special case, we consider binary comparisons
|
||||
// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar.
|
||||
//
|
||||
// The exact ratio of NumSame to NumDiff to determine similarity may change.
|
||||
func (r Result) Similar() bool {
|
||||
// Use NumSame+1 to offset NumSame so that binary comparisons are similar.
|
||||
return r.NumSame+1 >= r.NumDiff
|
||||
}
|
||||
|
||||
var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
|
||||
|
||||
// Difference reports whether two lists of lengths nx and ny are equal
|
||||
// given the definition of equality provided as f.
|
||||
//
|
||||
// This function returns an edit-script, which is a sequence of operations
|
||||
// needed to convert one list into the other. The following invariants for
|
||||
// the edit-script are maintained:
|
||||
// • eq == (es.Dist()==0)
|
||||
// • nx == es.LenX()
|
||||
// • ny == es.LenY()
|
||||
//
|
||||
// This algorithm is not guaranteed to be an optimal solution (i.e., one that
|
||||
// produces an edit-script with a minimal Levenshtein distance). This algorithm
|
||||
// favors performance over optimality. The exact output is not guaranteed to
|
||||
// be stable and may change over time.
|
||||
func Difference(nx, ny int, f EqualFunc) (es EditScript) {
|
||||
// This algorithm is based on traversing what is known as an "edit-graph".
|
||||
// See Figure 1 from "An O(ND) Difference Algorithm and Its Variations"
|
||||
// by Eugene W. Myers. Since D can be as large as N itself, this is
|
||||
// effectively O(N^2). Unlike the algorithm from that paper, we are not
|
||||
// interested in the optimal path, but at least some "decent" path.
|
||||
//
|
||||
// For example, let X and Y be lists of symbols:
|
||||
// X = [A B C A B B A]
|
||||
// Y = [C B A B A C]
|
||||
//
|
||||
// The edit-graph can be drawn as the following:
|
||||
// A B C A B B A
|
||||
// ┌─────────────┐
|
||||
// C │_|_|\|_|_|_|_│ 0
|
||||
// B │_|\|_|_|\|\|_│ 1
|
||||
// A │\|_|_|\|_|_|\│ 2
|
||||
// B │_|\|_|_|\|\|_│ 3
|
||||
// A │\|_|_|\|_|_|\│ 4
|
||||
// C │ | |\| | | | │ 5
|
||||
// └─────────────┘ 6
|
||||
// 0 1 2 3 4 5 6 7
|
||||
//
|
||||
// List X is written along the horizontal axis, while list Y is written
|
||||
// along the vertical axis. At any point on this grid, if the symbol in
|
||||
// list X matches the corresponding symbol in list Y, then a '\' is drawn.
|
||||
// The goal of any minimal edit-script algorithm is to find a path from the
|
||||
// top-left corner to the bottom-right corner, while traveling through the
|
||||
// fewest horizontal or vertical edges.
|
||||
// A horizontal edge is equivalent to inserting a symbol from list X.
|
||||
// A vertical edge is equivalent to inserting a symbol from list Y.
|
||||
// A diagonal edge is equivalent to a matching symbol between both X and Y.
|
||||
|
||||
// Invariants:
|
||||
// • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
|
||||
// • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
|
||||
//
|
||||
// In general:
|
||||
// • fwdFrontier.X < revFrontier.X
|
||||
// • fwdFrontier.Y < revFrontier.Y
|
||||
// Unless, it is time for the algorithm to terminate.
|
||||
fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
|
||||
revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
|
||||
fwdFrontier := fwdPath.point // Forward search frontier
|
||||
revFrontier := revPath.point // Reverse search frontier
|
||||
|
||||
// Search budget bounds the cost of searching for better paths.
|
||||
// The longest sequence of non-matching symbols that can be tolerated is
|
||||
// approximately the square-root of the search budget.
|
||||
searchBudget := 4 * (nx + ny) // O(n)
|
||||
|
||||
// Running the tests with the "cmp_debug" build tag prints a visualization
|
||||
// of the algorithm running in real-time. This is educational for
|
||||
// understanding how the algorithm works. See debug_enable.go.
|
||||
f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
|
||||
|
||||
// The algorithm below is a greedy, meet-in-the-middle algorithm for
|
||||
// computing sub-optimal edit-scripts between two lists.
|
||||
//
|
||||
// The algorithm is approximately as follows:
|
||||
// • Searching for differences switches back-and-forth between
|
||||
// a search that starts at the beginning (the top-left corner), and
|
||||
// a search that starts at the end (the bottom-right corner). The goal of
|
||||
// the search is connect with the search from the opposite corner.
|
||||
// • As we search, we build a path in a greedy manner, where the first
|
||||
// match seen is added to the path (this is sub-optimal, but provides a
|
||||
// decent result in practice). When matches are found, we try the next pair
|
||||
// of symbols in the lists and follow all matches as far as possible.
|
||||
// • When searching for matches, we search along a diagonal going through
|
||||
// through the "frontier" point. If no matches are found, we advance the
|
||||
// frontier towards the opposite corner.
|
||||
// • This algorithm terminates when either the X coordinates or the
|
||||
// Y coordinates of the forward and reverse frontier points ever intersect.
|
||||
|
||||
// This algorithm is correct even if searching only in the forward direction
|
||||
// or in the reverse direction. We do both because it is commonly observed
|
||||
// that two lists commonly differ because elements were added to the front
|
||||
// or end of the other list.
|
||||
//
|
||||
// Non-deterministically start with either the forward or reverse direction
|
||||
// to introduce some deliberate instability so that we have the flexibility
|
||||
// to change this algorithm in the future.
|
||||
if flags.Deterministic || randBool {
|
||||
goto forwardSearch
|
||||
} else {
|
||||
goto reverseSearch
|
||||
}
|
||||
|
||||
forwardSearch:
|
||||
{
|
||||
// Forward search from the beginning.
|
||||
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
|
||||
goto finishSearch
|
||||
}
|
||||
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
|
||||
// Search in a diagonal pattern for a match.
|
||||
z := zigzag(i)
|
||||
p := point{fwdFrontier.X + z, fwdFrontier.Y - z}
|
||||
switch {
|
||||
case p.X >= revPath.X || p.Y < fwdPath.Y:
|
||||
stop1 = true // Hit top-right corner
|
||||
case p.Y >= revPath.Y || p.X < fwdPath.X:
|
||||
stop2 = true // Hit bottom-left corner
|
||||
case f(p.X, p.Y).Equal():
|
||||
// Match found, so connect the path to this point.
|
||||
fwdPath.connect(p, f)
|
||||
fwdPath.append(Identity)
|
||||
// Follow sequence of matches as far as possible.
|
||||
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
|
||||
if !f(fwdPath.X, fwdPath.Y).Equal() {
|
||||
break
|
||||
}
|
||||
fwdPath.append(Identity)
|
||||
}
|
||||
fwdFrontier = fwdPath.point
|
||||
stop1, stop2 = true, true
|
||||
default:
|
||||
searchBudget-- // Match not found
|
||||
}
|
||||
debug.Update()
|
||||
}
|
||||
// Advance the frontier towards reverse point.
|
||||
if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y {
|
||||
fwdFrontier.X++
|
||||
} else {
|
||||
fwdFrontier.Y++
|
||||
}
|
||||
goto reverseSearch
|
||||
}
|
||||
|
||||
reverseSearch:
|
||||
{
|
||||
// Reverse search from the end.
|
||||
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
|
||||
goto finishSearch
|
||||
}
|
||||
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
|
||||
// Search in a diagonal pattern for a match.
|
||||
z := zigzag(i)
|
||||
p := point{revFrontier.X - z, revFrontier.Y + z}
|
||||
switch {
|
||||
case fwdPath.X >= p.X || revPath.Y < p.Y:
|
||||
stop1 = true // Hit bottom-left corner
|
||||
case fwdPath.Y >= p.Y || revPath.X < p.X:
|
||||
stop2 = true // Hit top-right corner
|
||||
case f(p.X-1, p.Y-1).Equal():
|
||||
// Match found, so connect the path to this point.
|
||||
revPath.connect(p, f)
|
||||
revPath.append(Identity)
|
||||
// Follow sequence of matches as far as possible.
|
||||
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
|
||||
if !f(revPath.X-1, revPath.Y-1).Equal() {
|
||||
break
|
||||
}
|
||||
revPath.append(Identity)
|
||||
}
|
||||
revFrontier = revPath.point
|
||||
stop1, stop2 = true, true
|
||||
default:
|
||||
searchBudget-- // Match not found
|
||||
}
|
||||
debug.Update()
|
||||
}
|
||||
// Advance the frontier towards forward point.
|
||||
if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y {
|
||||
revFrontier.X--
|
||||
} else {
|
||||
revFrontier.Y--
|
||||
}
|
||||
goto forwardSearch
|
||||
}
|
||||
|
||||
finishSearch:
|
||||
// Join the forward and reverse paths and then append the reverse path.
|
||||
fwdPath.connect(revPath.point, f)
|
||||
for i := len(revPath.es) - 1; i >= 0; i-- {
|
||||
t := revPath.es[i]
|
||||
revPath.es = revPath.es[:i]
|
||||
fwdPath.append(t)
|
||||
}
|
||||
debug.Finish()
|
||||
return fwdPath.es
|
||||
}
|
||||
|
||||
type path struct {
|
||||
dir int // +1 if forward, -1 if reverse
|
||||
point // Leading point of the EditScript path
|
||||
es EditScript
|
||||
}
|
||||
|
||||
// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types
|
||||
// to the edit-script to connect p.point to dst.
|
||||
func (p *path) connect(dst point, f EqualFunc) {
|
||||
if p.dir > 0 {
|
||||
// Connect in forward direction.
|
||||
for dst.X > p.X && dst.Y > p.Y {
|
||||
switch r := f(p.X, p.Y); {
|
||||
case r.Equal():
|
||||
p.append(Identity)
|
||||
case r.Similar():
|
||||
p.append(Modified)
|
||||
case dst.X-p.X >= dst.Y-p.Y:
|
||||
p.append(UniqueX)
|
||||
default:
|
||||
p.append(UniqueY)
|
||||
}
|
||||
}
|
||||
for dst.X > p.X {
|
||||
p.append(UniqueX)
|
||||
}
|
||||
for dst.Y > p.Y {
|
||||
p.append(UniqueY)
|
||||
}
|
||||
} else {
|
||||
// Connect in reverse direction.
|
||||
for p.X > dst.X && p.Y > dst.Y {
|
||||
switch r := f(p.X-1, p.Y-1); {
|
||||
case r.Equal():
|
||||
p.append(Identity)
|
||||
case r.Similar():
|
||||
p.append(Modified)
|
||||
case p.Y-dst.Y >= p.X-dst.X:
|
||||
p.append(UniqueY)
|
||||
default:
|
||||
p.append(UniqueX)
|
||||
}
|
||||
}
|
||||
for p.X > dst.X {
|
||||
p.append(UniqueX)
|
||||
}
|
||||
for p.Y > dst.Y {
|
||||
p.append(UniqueY)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *path) append(t EditType) {
|
||||
p.es = append(p.es, t)
|
||||
switch t {
|
||||
case Identity, Modified:
|
||||
p.add(p.dir, p.dir)
|
||||
case UniqueX:
|
||||
p.add(p.dir, 0)
|
||||
case UniqueY:
|
||||
p.add(0, p.dir)
|
||||
}
|
||||
debug.Update()
|
||||
}
|
||||
|
||||
type point struct{ X, Y int }
|
||||
|
||||
func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy }
|
||||
|
||||
// zigzag maps a consecutive sequence of integers to a zig-zag sequence.
|
||||
// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...]
|
||||
func zigzag(x int) int {
|
||||
if x&1 != 0 {
|
||||
x = ^x
|
||||
}
|
||||
return x >> 1
|
||||
}
|
9
vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
generated
vendored
Normal file
9
vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flags
|
||||
|
||||
// Deterministic controls whether the output of Diff should be deterministic.
|
||||
// This is only used for testing.
|
||||
var Deterministic bool
|
10
vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go
generated
vendored
Normal file
10
vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.10
|
||||
|
||||
package flags
|
||||
|
||||
// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10.
|
||||
const AtLeastGo110 = false
|
10
vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go
generated
vendored
Normal file
10
vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.10
|
||||
|
||||
package flags
|
||||
|
||||
// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10.
|
||||
const AtLeastGo110 = true
|
99
vendor/github.com/google/go-cmp/cmp/internal/function/func.go
generated
vendored
Normal file
99
vendor/github.com/google/go-cmp/cmp/internal/function/func.go
generated
vendored
Normal file
|
@ -0,0 +1,99 @@
|
|||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package function provides functionality for identifying function types.
|
||||
package function
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type funcType int
|
||||
|
||||
const (
|
||||
_ funcType = iota
|
||||
|
||||
tbFunc // func(T) bool
|
||||
ttbFunc // func(T, T) bool
|
||||
trbFunc // func(T, R) bool
|
||||
tibFunc // func(T, I) bool
|
||||
trFunc // func(T) R
|
||||
|
||||
Equal = ttbFunc // func(T, T) bool
|
||||
EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool
|
||||
Transformer = trFunc // func(T) R
|
||||
ValueFilter = ttbFunc // func(T, T) bool
|
||||
Less = ttbFunc // func(T, T) bool
|
||||
ValuePredicate = tbFunc // func(T) bool
|
||||
KeyValuePredicate = trbFunc // func(T, R) bool
|
||||
)
|
||||
|
||||
var boolType = reflect.TypeOf(true)
|
||||
|
||||
// IsType reports whether the reflect.Type is of the specified function type.
|
||||
func IsType(t reflect.Type, ft funcType) bool {
|
||||
if t == nil || t.Kind() != reflect.Func || t.IsVariadic() {
|
||||
return false
|
||||
}
|
||||
ni, no := t.NumIn(), t.NumOut()
|
||||
switch ft {
|
||||
case tbFunc: // func(T) bool
|
||||
if ni == 1 && no == 1 && t.Out(0) == boolType {
|
||||
return true
|
||||
}
|
||||
case ttbFunc: // func(T, T) bool
|
||||
if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType {
|
||||
return true
|
||||
}
|
||||
case trbFunc: // func(T, R) bool
|
||||
if ni == 2 && no == 1 && t.Out(0) == boolType {
|
||||
return true
|
||||
}
|
||||
case tibFunc: // func(T, I) bool
|
||||
if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType {
|
||||
return true
|
||||
}
|
||||
case trFunc: // func(T) R
|
||||
if ni == 1 && no == 1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`)
|
||||
|
||||
// NameOf returns the name of the function value.
|
||||
func NameOf(v reflect.Value) string {
|
||||
fnc := runtime.FuncForPC(v.Pointer())
|
||||
if fnc == nil {
|
||||
return "<unknown>"
|
||||
}
|
||||
fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm"
|
||||
|
||||
// Method closures have a "-fm" suffix.
|
||||
fullName = strings.TrimSuffix(fullName, "-fm")
|
||||
|
||||
var name string
|
||||
for len(fullName) > 0 {
|
||||
inParen := strings.HasSuffix(fullName, ")")
|
||||
fullName = strings.TrimSuffix(fullName, ")")
|
||||
|
||||
s := lastIdentRx.FindString(fullName)
|
||||
if s == "" {
|
||||
break
|
||||
}
|
||||
name = s + "." + name
|
||||
fullName = strings.TrimSuffix(fullName, s)
|
||||
|
||||
if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 {
|
||||
fullName = fullName[:i]
|
||||
}
|
||||
fullName = strings.TrimSuffix(fullName, ".")
|
||||
}
|
||||
return strings.TrimSuffix(name, ".")
|
||||
}
|
157
vendor/github.com/google/go-cmp/cmp/internal/value/name.go
generated
vendored
Normal file
157
vendor/github.com/google/go-cmp/cmp/internal/value/name.go
generated
vendored
Normal file
|
@ -0,0 +1,157 @@
|
|||
// Copyright 2020, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package value
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// TypeString is nearly identical to reflect.Type.String,
|
||||
// but has an additional option to specify that full type names be used.
|
||||
func TypeString(t reflect.Type, qualified bool) string {
|
||||
return string(appendTypeName(nil, t, qualified, false))
|
||||
}
|
||||
|
||||
func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte {
|
||||
// BUG: Go reflection provides no way to disambiguate two named types
|
||||
// of the same name and within the same package,
|
||||
// but declared within the namespace of different functions.
|
||||
|
||||
// Named type.
|
||||
if t.Name() != "" {
|
||||
if qualified && t.PkgPath() != "" {
|
||||
b = append(b, '"')
|
||||
b = append(b, t.PkgPath()...)
|
||||
b = append(b, '"')
|
||||
b = append(b, '.')
|
||||
b = append(b, t.Name()...)
|
||||
} else {
|
||||
b = append(b, t.String()...)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Unnamed type.
|
||||
switch k := t.Kind(); k {
|
||||
case reflect.Bool, reflect.String, reflect.UnsafePointer,
|
||||
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
|
||||
reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
|
||||
b = append(b, k.String()...)
|
||||
case reflect.Chan:
|
||||
if t.ChanDir() == reflect.RecvDir {
|
||||
b = append(b, "<-"...)
|
||||
}
|
||||
b = append(b, "chan"...)
|
||||
if t.ChanDir() == reflect.SendDir {
|
||||
b = append(b, "<-"...)
|
||||
}
|
||||
b = append(b, ' ')
|
||||
b = appendTypeName(b, t.Elem(), qualified, false)
|
||||
case reflect.Func:
|
||||
if !elideFunc {
|
||||
b = append(b, "func"...)
|
||||
}
|
||||
b = append(b, '(')
|
||||
for i := 0; i < t.NumIn(); i++ {
|
||||
if i > 0 {
|
||||
b = append(b, ", "...)
|
||||
}
|
||||
if i == t.NumIn()-1 && t.IsVariadic() {
|
||||
b = append(b, "..."...)
|
||||
b = appendTypeName(b, t.In(i).Elem(), qualified, false)
|
||||
} else {
|
||||
b = appendTypeName(b, t.In(i), qualified, false)
|
||||
}
|
||||
}
|
||||
b = append(b, ')')
|
||||
switch t.NumOut() {
|
||||
case 0:
|
||||
// Do nothing
|
||||
case 1:
|
||||
b = append(b, ' ')
|
||||
b = appendTypeName(b, t.Out(0), qualified, false)
|
||||
default:
|
||||
b = append(b, " ("...)
|
||||
for i := 0; i < t.NumOut(); i++ {
|
||||
if i > 0 {
|
||||
b = append(b, ", "...)
|
||||
}
|
||||
b = appendTypeName(b, t.Out(i), qualified, false)
|
||||
}
|
||||
b = append(b, ')')
|
||||
}
|
||||
case reflect.Struct:
|
||||
b = append(b, "struct{ "...)
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
if i > 0 {
|
||||
b = append(b, "; "...)
|
||||
}
|
||||
sf := t.Field(i)
|
||||
if !sf.Anonymous {
|
||||
if qualified && sf.PkgPath != "" {
|
||||
b = append(b, '"')
|
||||
b = append(b, sf.PkgPath...)
|
||||
b = append(b, '"')
|
||||
b = append(b, '.')
|
||||
}
|
||||
b = append(b, sf.Name...)
|
||||
b = append(b, ' ')
|
||||
}
|
||||
b = appendTypeName(b, sf.Type, qualified, false)
|
||||
if sf.Tag != "" {
|
||||
b = append(b, ' ')
|
||||
b = strconv.AppendQuote(b, string(sf.Tag))
|
||||
}
|
||||
}
|
||||
if b[len(b)-1] == ' ' {
|
||||
b = b[:len(b)-1]
|
||||
} else {
|
||||
b = append(b, ' ')
|
||||
}
|
||||
b = append(b, '}')
|
||||
case reflect.Slice, reflect.Array:
|
||||
b = append(b, '[')
|
||||
if k == reflect.Array {
|
||||
b = strconv.AppendUint(b, uint64(t.Len()), 10)
|
||||
}
|
||||
b = append(b, ']')
|
||||
b = appendTypeName(b, t.Elem(), qualified, false)
|
||||
case reflect.Map:
|
||||
b = append(b, "map["...)
|
||||
b = appendTypeName(b, t.Key(), qualified, false)
|
||||
b = append(b, ']')
|
||||
b = appendTypeName(b, t.Elem(), qualified, false)
|
||||
case reflect.Ptr:
|
||||
b = append(b, '*')
|
||||
b = appendTypeName(b, t.Elem(), qualified, false)
|
||||
case reflect.Interface:
|
||||
b = append(b, "interface{ "...)
|
||||
for i := 0; i < t.NumMethod(); i++ {
|
||||
if i > 0 {
|
||||
b = append(b, "; "...)
|
||||
}
|
||||
m := t.Method(i)
|
||||
if qualified && m.PkgPath != "" {
|
||||
b = append(b, '"')
|
||||
b = append(b, m.PkgPath...)
|
||||
b = append(b, '"')
|
||||
b = append(b, '.')
|
||||
}
|
||||
b = append(b, m.Name...)
|
||||
b = appendTypeName(b, m.Type, qualified, true)
|
||||
}
|
||||
if b[len(b)-1] == ' ' {
|
||||
b = b[:len(b)-1]
|
||||
} else {
|
||||
b = append(b, ' ')
|
||||
}
|
||||
b = append(b, '}')
|
||||
default:
|
||||
panic("invalid kind: " + k.String())
|
||||
}
|
||||
return b
|
||||
}
|
33
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go
generated
vendored
Normal file
33
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
// Copyright 2018, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build purego
|
||||
|
||||
package value
|
||||
|
||||
import "reflect"
|
||||
|
||||
// Pointer is an opaque typed pointer and is guaranteed to be comparable.
|
||||
type Pointer struct {
|
||||
p uintptr
|
||||
t reflect.Type
|
||||
}
|
||||
|
||||
// PointerOf returns a Pointer from v, which must be a
|
||||
// reflect.Ptr, reflect.Slice, or reflect.Map.
|
||||
func PointerOf(v reflect.Value) Pointer {
|
||||
// NOTE: Storing a pointer as an uintptr is technically incorrect as it
|
||||
// assumes that the GC implementation does not use a moving collector.
|
||||
return Pointer{v.Pointer(), v.Type()}
|
||||
}
|
||||
|
||||
// IsNil reports whether the pointer is nil.
|
||||
func (p Pointer) IsNil() bool {
|
||||
return p.p == 0
|
||||
}
|
||||
|
||||
// Uintptr returns the pointer as a uintptr.
|
||||
func (p Pointer) Uintptr() uintptr {
|
||||
return p.p
|
||||
}
|
36
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go
generated
vendored
Normal file
36
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
// Copyright 2018, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !purego
|
||||
|
||||
package value
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Pointer is an opaque typed pointer and is guaranteed to be comparable.
|
||||
type Pointer struct {
|
||||
p unsafe.Pointer
|
||||
t reflect.Type
|
||||
}
|
||||
|
||||
// PointerOf returns a Pointer from v, which must be a
|
||||
// reflect.Ptr, reflect.Slice, or reflect.Map.
|
||||
func PointerOf(v reflect.Value) Pointer {
|
||||
// The proper representation of a pointer is unsafe.Pointer,
|
||||
// which is necessary if the GC ever uses a moving collector.
|
||||
return Pointer{unsafe.Pointer(v.Pointer()), v.Type()}
|
||||
}
|
||||
|
||||
// IsNil reports whether the pointer is nil.
|
||||
func (p Pointer) IsNil() bool {
|
||||
return p.p == nil
|
||||
}
|
||||
|
||||
// Uintptr returns the pointer as a uintptr.
|
||||
func (p Pointer) Uintptr() uintptr {
|
||||
return uintptr(p.p)
|
||||
}
|
106
vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
generated
vendored
Normal file
106
vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
generated
vendored
Normal file
|
@ -0,0 +1,106 @@
|
|||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package value
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// SortKeys sorts a list of map keys, deduplicating keys if necessary.
|
||||
// The type of each value must be comparable.
|
||||
func SortKeys(vs []reflect.Value) []reflect.Value {
|
||||
if len(vs) == 0 {
|
||||
return vs
|
||||
}
|
||||
|
||||
// Sort the map keys.
|
||||
sort.SliceStable(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) })
|
||||
|
||||
// Deduplicate keys (fails for NaNs).
|
||||
vs2 := vs[:1]
|
||||
for _, v := range vs[1:] {
|
||||
if isLess(vs2[len(vs2)-1], v) {
|
||||
vs2 = append(vs2, v)
|
||||
}
|
||||
}
|
||||
return vs2
|
||||
}
|
||||
|
||||
// isLess is a generic function for sorting arbitrary map keys.
|
||||
// The inputs must be of the same type and must be comparable.
|
||||
func isLess(x, y reflect.Value) bool {
|
||||
switch x.Type().Kind() {
|
||||
case reflect.Bool:
|
||||
return !x.Bool() && y.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return x.Int() < y.Int()
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return x.Uint() < y.Uint()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
// NOTE: This does not sort -0 as less than +0
|
||||
// since Go maps treat -0 and +0 as equal keys.
|
||||
fx, fy := x.Float(), y.Float()
|
||||
return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy)
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
cx, cy := x.Complex(), y.Complex()
|
||||
rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy)
|
||||
if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) {
|
||||
return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy)
|
||||
}
|
||||
return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry)
|
||||
case reflect.Ptr, reflect.UnsafePointer, reflect.Chan:
|
||||
return x.Pointer() < y.Pointer()
|
||||
case reflect.String:
|
||||
return x.String() < y.String()
|
||||
case reflect.Array:
|
||||
for i := 0; i < x.Len(); i++ {
|
||||
if isLess(x.Index(i), y.Index(i)) {
|
||||
return true
|
||||
}
|
||||
if isLess(y.Index(i), x.Index(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return false
|
||||
case reflect.Struct:
|
||||
for i := 0; i < x.NumField(); i++ {
|
||||
if isLess(x.Field(i), y.Field(i)) {
|
||||
return true
|
||||
}
|
||||
if isLess(y.Field(i), x.Field(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return false
|
||||
case reflect.Interface:
|
||||
vx, vy := x.Elem(), y.Elem()
|
||||
if !vx.IsValid() || !vy.IsValid() {
|
||||
return !vx.IsValid() && vy.IsValid()
|
||||
}
|
||||
tx, ty := vx.Type(), vy.Type()
|
||||
if tx == ty {
|
||||
return isLess(x.Elem(), y.Elem())
|
||||
}
|
||||
if tx.Kind() != ty.Kind() {
|
||||
return vx.Kind() < vy.Kind()
|
||||
}
|
||||
if tx.String() != ty.String() {
|
||||
return tx.String() < ty.String()
|
||||
}
|
||||
if tx.PkgPath() != ty.PkgPath() {
|
||||
return tx.PkgPath() < ty.PkgPath()
|
||||
}
|
||||
// This can happen in rare situations, so we fallback to just comparing
|
||||
// the unique pointer for a reflect.Type. This guarantees deterministic
|
||||
// ordering within a program, but it is obviously not stable.
|
||||
return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer()
|
||||
default:
|
||||
// Must be Func, Map, or Slice; which are not comparable.
|
||||
panic(fmt.Sprintf("%T is not comparable", x.Type()))
|
||||
}
|
||||
}
|
48
vendor/github.com/google/go-cmp/cmp/internal/value/zero.go
generated
vendored
Normal file
48
vendor/github.com/google/go-cmp/cmp/internal/value/zero.go
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package value
|
||||
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// IsZero reports whether v is the zero value.
|
||||
// This does not rely on Interface and so can be used on unexported fields.
|
||||
func IsZero(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
return v.Bool() == false
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return math.Float64bits(v.Float()) == 0
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return math.Float64bits(real(v.Complex())) == 0 && math.Float64bits(imag(v.Complex())) == 0
|
||||
case reflect.String:
|
||||
return v.String() == ""
|
||||
case reflect.UnsafePointer:
|
||||
return v.Pointer() == 0
|
||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
|
||||
return v.IsNil()
|
||||
case reflect.Array:
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if !IsZero(v.Index(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.Struct:
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
if !IsZero(v.Field(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
552
vendor/github.com/google/go-cmp/cmp/options.go
generated
vendored
Normal file
552
vendor/github.com/google/go-cmp/cmp/options.go
generated
vendored
Normal file
|
@ -0,0 +1,552 @@
|
|||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/function"
|
||||
)
|
||||
|
||||
// Option configures for specific behavior of Equal and Diff. In particular,
|
||||
// the fundamental Option functions (Ignore, Transformer, and Comparer),
|
||||
// configure how equality is determined.
|
||||
//
|
||||
// The fundamental options may be composed with filters (FilterPath and
|
||||
// FilterValues) to control the scope over which they are applied.
|
||||
//
|
||||
// The cmp/cmpopts package provides helper functions for creating options that
|
||||
// may be used with Equal and Diff.
|
||||
type Option interface {
|
||||
// filter applies all filters and returns the option that remains.
|
||||
// Each option may only read s.curPath and call s.callTTBFunc.
|
||||
//
|
||||
// An Options is returned only if multiple comparers or transformers
|
||||
// can apply simultaneously and will only contain values of those types
|
||||
// or sub-Options containing values of those types.
|
||||
filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption
|
||||
}
|
||||
|
||||
// applicableOption represents the following types:
|
||||
// Fundamental: ignore | validator | *comparer | *transformer
|
||||
// Grouping: Options
|
||||
type applicableOption interface {
|
||||
Option
|
||||
|
||||
// apply executes the option, which may mutate s or panic.
|
||||
apply(s *state, vx, vy reflect.Value)
|
||||
}
|
||||
|
||||
// coreOption represents the following types:
|
||||
// Fundamental: ignore | validator | *comparer | *transformer
|
||||
// Filters: *pathFilter | *valuesFilter
|
||||
type coreOption interface {
|
||||
Option
|
||||
isCore()
|
||||
}
|
||||
|
||||
type core struct{}
|
||||
|
||||
func (core) isCore() {}
|
||||
|
||||
// Options is a list of Option values that also satisfies the Option interface.
|
||||
// Helper comparison packages may return an Options value when packing multiple
|
||||
// Option values into a single Option. When this package processes an Options,
|
||||
// it will be implicitly expanded into a flat list.
|
||||
//
|
||||
// Applying a filter on an Options is equivalent to applying that same filter
|
||||
// on all individual options held within.
|
||||
type Options []Option
|
||||
|
||||
func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) {
|
||||
for _, opt := range opts {
|
||||
switch opt := opt.filter(s, t, vx, vy); opt.(type) {
|
||||
case ignore:
|
||||
return ignore{} // Only ignore can short-circuit evaluation
|
||||
case validator:
|
||||
out = validator{} // Takes precedence over comparer or transformer
|
||||
case *comparer, *transformer, Options:
|
||||
switch out.(type) {
|
||||
case nil:
|
||||
out = opt
|
||||
case validator:
|
||||
// Keep validator
|
||||
case *comparer, *transformer, Options:
|
||||
out = Options{out, opt} // Conflicting comparers or transformers
|
||||
}
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (opts Options) apply(s *state, _, _ reflect.Value) {
|
||||
const warning = "ambiguous set of applicable options"
|
||||
const help = "consider using filters to ensure at most one Comparer or Transformer may apply"
|
||||
var ss []string
|
||||
for _, opt := range flattenOptions(nil, opts) {
|
||||
ss = append(ss, fmt.Sprint(opt))
|
||||
}
|
||||
set := strings.Join(ss, "\n\t")
|
||||
panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help))
|
||||
}
|
||||
|
||||
func (opts Options) String() string {
|
||||
var ss []string
|
||||
for _, opt := range opts {
|
||||
ss = append(ss, fmt.Sprint(opt))
|
||||
}
|
||||
return fmt.Sprintf("Options{%s}", strings.Join(ss, ", "))
|
||||
}
|
||||
|
||||
// FilterPath returns a new Option where opt is only evaluated if filter f
|
||||
// returns true for the current Path in the value tree.
|
||||
//
|
||||
// This filter is called even if a slice element or map entry is missing and
|
||||
// provides an opportunity to ignore such cases. The filter function must be
|
||||
// symmetric such that the filter result is identical regardless of whether the
|
||||
// missing value is from x or y.
|
||||
//
|
||||
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
|
||||
// a previously filtered Option.
|
||||
func FilterPath(f func(Path) bool, opt Option) Option {
|
||||
if f == nil {
|
||||
panic("invalid path filter function")
|
||||
}
|
||||
if opt := normalizeOption(opt); opt != nil {
|
||||
return &pathFilter{fnc: f, opt: opt}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type pathFilter struct {
|
||||
core
|
||||
fnc func(Path) bool
|
||||
opt Option
|
||||
}
|
||||
|
||||
func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
|
||||
if f.fnc(s.curPath) {
|
||||
return f.opt.filter(s, t, vx, vy)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f pathFilter) String() string {
|
||||
return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt)
|
||||
}
|
||||
|
||||
// FilterValues returns a new Option where opt is only evaluated if filter f,
|
||||
// which is a function of the form "func(T, T) bool", returns true for the
|
||||
// current pair of values being compared. If either value is invalid or
|
||||
// the type of the values is not assignable to T, then this filter implicitly
|
||||
// returns false.
|
||||
//
|
||||
// The filter function must be
|
||||
// symmetric (i.e., agnostic to the order of the inputs) and
|
||||
// deterministic (i.e., produces the same result when given the same inputs).
|
||||
// If T is an interface, it is possible that f is called with two values with
|
||||
// different concrete types that both implement T.
|
||||
//
|
||||
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
|
||||
// a previously filtered Option.
|
||||
func FilterValues(f interface{}, opt Option) Option {
|
||||
v := reflect.ValueOf(f)
|
||||
if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() {
|
||||
panic(fmt.Sprintf("invalid values filter function: %T", f))
|
||||
}
|
||||
if opt := normalizeOption(opt); opt != nil {
|
||||
vf := &valuesFilter{fnc: v, opt: opt}
|
||||
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
|
||||
vf.typ = ti
|
||||
}
|
||||
return vf
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type valuesFilter struct {
|
||||
core
|
||||
typ reflect.Type // T
|
||||
fnc reflect.Value // func(T, T) bool
|
||||
opt Option
|
||||
}
|
||||
|
||||
func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
|
||||
if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() {
|
||||
return nil
|
||||
}
|
||||
if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) {
|
||||
return f.opt.filter(s, t, vx, vy)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f valuesFilter) String() string {
|
||||
return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt)
|
||||
}
|
||||
|
||||
// Ignore is an Option that causes all comparisons to be ignored.
|
||||
// This value is intended to be combined with FilterPath or FilterValues.
|
||||
// It is an error to pass an unfiltered Ignore option to Equal.
|
||||
func Ignore() Option { return ignore{} }
|
||||
|
||||
type ignore struct{ core }
|
||||
|
||||
func (ignore) isFiltered() bool { return false }
|
||||
func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} }
|
||||
func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) }
|
||||
func (ignore) String() string { return "Ignore()" }
|
||||
|
||||
// validator is a sentinel Option type to indicate that some options could not
|
||||
// be evaluated due to unexported fields, missing slice elements, or
|
||||
// missing map entries. Both values are validator only for unexported fields.
|
||||
type validator struct{ core }
|
||||
|
||||
func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption {
|
||||
if !vx.IsValid() || !vy.IsValid() {
|
||||
return validator{}
|
||||
}
|
||||
if !vx.CanInterface() || !vy.CanInterface() {
|
||||
return validator{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (validator) apply(s *state, vx, vy reflect.Value) {
|
||||
// Implies missing slice element or map entry.
|
||||
if !vx.IsValid() || !vy.IsValid() {
|
||||
s.report(vx.IsValid() == vy.IsValid(), 0)
|
||||
return
|
||||
}
|
||||
|
||||
// Unable to Interface implies unexported field without visibility access.
|
||||
if !vx.CanInterface() || !vy.CanInterface() {
|
||||
help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported"
|
||||
var name string
|
||||
if t := s.curPath.Index(-2).Type(); t.Name() != "" {
|
||||
// Named type with unexported fields.
|
||||
name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType
|
||||
if _, ok := reflect.New(t).Interface().(error); ok {
|
||||
help = "consider using cmpopts.EquateErrors to compare error values"
|
||||
}
|
||||
} else {
|
||||
// Unnamed type with unexported fields. Derive PkgPath from field.
|
||||
var pkgPath string
|
||||
for i := 0; i < t.NumField() && pkgPath == ""; i++ {
|
||||
pkgPath = t.Field(i).PkgPath
|
||||
}
|
||||
name = fmt.Sprintf("%q.(%v)", pkgPath, t.String()) // e.g., "path/to/package".(struct { a int })
|
||||
}
|
||||
panic(fmt.Sprintf("cannot handle unexported field at %#v:\n\t%v\n%s", s.curPath, name, help))
|
||||
}
|
||||
|
||||
panic("not reachable")
|
||||
}
|
||||
|
||||
// identRx represents a valid identifier according to the Go specification.
|
||||
const identRx = `[_\p{L}][_\p{L}\p{N}]*`
|
||||
|
||||
var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`)
|
||||
|
||||
// Transformer returns an Option that applies a transformation function that
|
||||
// converts values of a certain type into that of another.
|
||||
//
|
||||
// The transformer f must be a function "func(T) R" that converts values of
|
||||
// type T to those of type R and is implicitly filtered to input values
|
||||
// assignable to T. The transformer must not mutate T in any way.
|
||||
//
|
||||
// To help prevent some cases of infinite recursive cycles applying the
|
||||
// same transform to the output of itself (e.g., in the case where the
|
||||
// input and output types are the same), an implicit filter is added such that
|
||||
// a transformer is applicable only if that exact transformer is not already
|
||||
// in the tail of the Path since the last non-Transform step.
|
||||
// For situations where the implicit filter is still insufficient,
|
||||
// consider using cmpopts.AcyclicTransformer, which adds a filter
|
||||
// to prevent the transformer from being recursively applied upon itself.
|
||||
//
|
||||
// The name is a user provided label that is used as the Transform.Name in the
|
||||
// transformation PathStep (and eventually shown in the Diff output).
|
||||
// The name must be a valid identifier or qualified identifier in Go syntax.
|
||||
// If empty, an arbitrary name is used.
|
||||
func Transformer(name string, f interface{}) Option {
|
||||
v := reflect.ValueOf(f)
|
||||
if !function.IsType(v.Type(), function.Transformer) || v.IsNil() {
|
||||
panic(fmt.Sprintf("invalid transformer function: %T", f))
|
||||
}
|
||||
if name == "" {
|
||||
name = function.NameOf(v)
|
||||
if !identsRx.MatchString(name) {
|
||||
name = "λ" // Lambda-symbol as placeholder name
|
||||
}
|
||||
} else if !identsRx.MatchString(name) {
|
||||
panic(fmt.Sprintf("invalid name: %q", name))
|
||||
}
|
||||
tr := &transformer{name: name, fnc: reflect.ValueOf(f)}
|
||||
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
|
||||
tr.typ = ti
|
||||
}
|
||||
return tr
|
||||
}
|
||||
|
||||
type transformer struct {
|
||||
core
|
||||
name string
|
||||
typ reflect.Type // T
|
||||
fnc reflect.Value // func(T) R
|
||||
}
|
||||
|
||||
func (tr *transformer) isFiltered() bool { return tr.typ != nil }
|
||||
|
||||
func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption {
|
||||
for i := len(s.curPath) - 1; i >= 0; i-- {
|
||||
if t, ok := s.curPath[i].(Transform); !ok {
|
||||
break // Hit most recent non-Transform step
|
||||
} else if tr == t.trans {
|
||||
return nil // Cannot directly use same Transform
|
||||
}
|
||||
}
|
||||
if tr.typ == nil || t.AssignableTo(tr.typ) {
|
||||
return tr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tr *transformer) apply(s *state, vx, vy reflect.Value) {
|
||||
step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}}
|
||||
vvx := s.callTRFunc(tr.fnc, vx, step)
|
||||
vvy := s.callTRFunc(tr.fnc, vy, step)
|
||||
step.vx, step.vy = vvx, vvy
|
||||
s.compareAny(step)
|
||||
}
|
||||
|
||||
func (tr transformer) String() string {
|
||||
return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc))
|
||||
}
|
||||
|
||||
// Comparer returns an Option that determines whether two values are equal
|
||||
// to each other.
|
||||
//
|
||||
// The comparer f must be a function "func(T, T) bool" and is implicitly
|
||||
// filtered to input values assignable to T. If T is an interface, it is
|
||||
// possible that f is called with two values of different concrete types that
|
||||
// both implement T.
|
||||
//
|
||||
// The equality function must be:
|
||||
// • Symmetric: equal(x, y) == equal(y, x)
|
||||
// • Deterministic: equal(x, y) == equal(x, y)
|
||||
// • Pure: equal(x, y) does not modify x or y
|
||||
func Comparer(f interface{}) Option {
|
||||
v := reflect.ValueOf(f)
|
||||
if !function.IsType(v.Type(), function.Equal) || v.IsNil() {
|
||||
panic(fmt.Sprintf("invalid comparer function: %T", f))
|
||||
}
|
||||
cm := &comparer{fnc: v}
|
||||
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
|
||||
cm.typ = ti
|
||||
}
|
||||
return cm
|
||||
}
|
||||
|
||||
type comparer struct {
|
||||
core
|
||||
typ reflect.Type // T
|
||||
fnc reflect.Value // func(T, T) bool
|
||||
}
|
||||
|
||||
func (cm *comparer) isFiltered() bool { return cm.typ != nil }
|
||||
|
||||
func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption {
|
||||
if cm.typ == nil || t.AssignableTo(cm.typ) {
|
||||
return cm
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm *comparer) apply(s *state, vx, vy reflect.Value) {
|
||||
eq := s.callTTBFunc(cm.fnc, vx, vy)
|
||||
s.report(eq, reportByFunc)
|
||||
}
|
||||
|
||||
func (cm comparer) String() string {
|
||||
return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc))
|
||||
}
|
||||
|
||||
// Exporter returns an Option that specifies whether Equal is allowed to
|
||||
// introspect into the unexported fields of certain struct types.
|
||||
//
|
||||
// Users of this option must understand that comparing on unexported fields
|
||||
// from external packages is not safe since changes in the internal
|
||||
// implementation of some external package may cause the result of Equal
|
||||
// to unexpectedly change. However, it may be valid to use this option on types
|
||||
// defined in an internal package where the semantic meaning of an unexported
|
||||
// field is in the control of the user.
|
||||
//
|
||||
// In many cases, a custom Comparer should be used instead that defines
|
||||
// equality as a function of the public API of a type rather than the underlying
|
||||
// unexported implementation.
|
||||
//
|
||||
// For example, the reflect.Type documentation defines equality to be determined
|
||||
// by the == operator on the interface (essentially performing a shallow pointer
|
||||
// comparison) and most attempts to compare *regexp.Regexp types are interested
|
||||
// in only checking that the regular expression strings are equal.
|
||||
// Both of these are accomplished using Comparers:
|
||||
//
|
||||
// Comparer(func(x, y reflect.Type) bool { return x == y })
|
||||
// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() })
|
||||
//
|
||||
// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore
|
||||
// all unexported fields on specified struct types.
|
||||
func Exporter(f func(reflect.Type) bool) Option {
|
||||
if !supportExporters {
|
||||
panic("Exporter is not supported on purego builds")
|
||||
}
|
||||
return exporter(f)
|
||||
}
|
||||
|
||||
type exporter func(reflect.Type) bool
|
||||
|
||||
func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// AllowUnexported returns an Options that allows Equal to forcibly introspect
|
||||
// unexported fields of the specified struct types.
|
||||
//
|
||||
// See Exporter for the proper use of this option.
|
||||
func AllowUnexported(types ...interface{}) Option {
|
||||
m := make(map[reflect.Type]bool)
|
||||
for _, typ := range types {
|
||||
t := reflect.TypeOf(typ)
|
||||
if t.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("invalid struct type: %T", typ))
|
||||
}
|
||||
m[t] = true
|
||||
}
|
||||
return exporter(func(t reflect.Type) bool { return m[t] })
|
||||
}
|
||||
|
||||
// Result represents the comparison result for a single node and
|
||||
// is provided by cmp when calling Result (see Reporter).
|
||||
type Result struct {
|
||||
_ [0]func() // Make Result incomparable
|
||||
flags resultFlags
|
||||
}
|
||||
|
||||
// Equal reports whether the node was determined to be equal or not.
|
||||
// As a special case, ignored nodes are considered equal.
|
||||
func (r Result) Equal() bool {
|
||||
return r.flags&(reportEqual|reportByIgnore) != 0
|
||||
}
|
||||
|
||||
// ByIgnore reports whether the node is equal because it was ignored.
|
||||
// This never reports true if Equal reports false.
|
||||
func (r Result) ByIgnore() bool {
|
||||
return r.flags&reportByIgnore != 0
|
||||
}
|
||||
|
||||
// ByMethod reports whether the Equal method determined equality.
|
||||
func (r Result) ByMethod() bool {
|
||||
return r.flags&reportByMethod != 0
|
||||
}
|
||||
|
||||
// ByFunc reports whether a Comparer function determined equality.
|
||||
func (r Result) ByFunc() bool {
|
||||
return r.flags&reportByFunc != 0
|
||||
}
|
||||
|
||||
// ByCycle reports whether a reference cycle was detected.
|
||||
func (r Result) ByCycle() bool {
|
||||
return r.flags&reportByCycle != 0
|
||||
}
|
||||
|
||||
type resultFlags uint
|
||||
|
||||
const (
|
||||
_ resultFlags = (1 << iota) / 2
|
||||
|
||||
reportEqual
|
||||
reportUnequal
|
||||
reportByIgnore
|
||||
reportByMethod
|
||||
reportByFunc
|
||||
reportByCycle
|
||||
)
|
||||
|
||||
// Reporter is an Option that can be passed to Equal. When Equal traverses
|
||||
// the value trees, it calls PushStep as it descends into each node in the
|
||||
// tree and PopStep as it ascend out of the node. The leaves of the tree are
|
||||
// either compared (determined to be equal or not equal) or ignored and reported
|
||||
// as such by calling the Report method.
|
||||
func Reporter(r interface {
|
||||
// PushStep is called when a tree-traversal operation is performed.
|
||||
// The PathStep itself is only valid until the step is popped.
|
||||
// The PathStep.Values are valid for the duration of the entire traversal
|
||||
// and must not be mutated.
|
||||
//
|
||||
// Equal always calls PushStep at the start to provide an operation-less
|
||||
// PathStep used to report the root values.
|
||||
//
|
||||
// Within a slice, the exact set of inserted, removed, or modified elements
|
||||
// is unspecified and may change in future implementations.
|
||||
// The entries of a map are iterated through in an unspecified order.
|
||||
PushStep(PathStep)
|
||||
|
||||
// Report is called exactly once on leaf nodes to report whether the
|
||||
// comparison identified the node as equal, unequal, or ignored.
|
||||
// A leaf node is one that is immediately preceded by and followed by
|
||||
// a pair of PushStep and PopStep calls.
|
||||
Report(Result)
|
||||
|
||||
// PopStep ascends back up the value tree.
|
||||
// There is always a matching pop call for every push call.
|
||||
PopStep()
|
||||
}) Option {
|
||||
return reporter{r}
|
||||
}
|
||||
|
||||
type reporter struct{ reporterIface }
|
||||
type reporterIface interface {
|
||||
PushStep(PathStep)
|
||||
Report(Result)
|
||||
PopStep()
|
||||
}
|
||||
|
||||
func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// normalizeOption normalizes the input options such that all Options groups
|
||||
// are flattened and groups with a single element are reduced to that element.
|
||||
// Only coreOptions and Options containing coreOptions are allowed.
|
||||
func normalizeOption(src Option) Option {
|
||||
switch opts := flattenOptions(nil, Options{src}); len(opts) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return opts[0]
|
||||
default:
|
||||
return opts
|
||||
}
|
||||
}
|
||||
|
||||
// flattenOptions copies all options in src to dst as a flat list.
|
||||
// Only coreOptions and Options containing coreOptions are allowed.
|
||||
func flattenOptions(dst, src Options) Options {
|
||||
for _, opt := range src {
|
||||
switch opt := opt.(type) {
|
||||
case nil:
|
||||
continue
|
||||
case Options:
|
||||
dst = flattenOptions(dst, opt)
|
||||
case coreOption:
|
||||
dst = append(dst, opt)
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid option type: %T", opt))
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
378
vendor/github.com/google/go-cmp/cmp/path.go
generated
vendored
Normal file
378
vendor/github.com/google/go-cmp/cmp/path.go
generated
vendored
Normal file
|
@ -0,0 +1,378 @@
|
|||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
// Path is a list of PathSteps describing the sequence of operations to get
|
||||
// from some root type to the current position in the value tree.
|
||||
// The first Path element is always an operation-less PathStep that exists
|
||||
// simply to identify the initial type.
|
||||
//
|
||||
// When traversing structs with embedded structs, the embedded struct will
|
||||
// always be accessed as a field before traversing the fields of the
|
||||
// embedded struct themselves. That is, an exported field from the
|
||||
// embedded struct will never be accessed directly from the parent struct.
|
||||
type Path []PathStep
|
||||
|
||||
// PathStep is a union-type for specific operations to traverse
|
||||
// a value's tree structure. Users of this package never need to implement
|
||||
// these types as values of this type will be returned by this package.
|
||||
//
|
||||
// Implementations of this interface are
|
||||
// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform.
|
||||
type PathStep interface {
|
||||
String() string
|
||||
|
||||
// Type is the resulting type after performing the path step.
|
||||
Type() reflect.Type
|
||||
|
||||
// Values is the resulting values after performing the path step.
|
||||
// The type of each valid value is guaranteed to be identical to Type.
|
||||
//
|
||||
// In some cases, one or both may be invalid or have restrictions:
|
||||
// • For StructField, both are not interface-able if the current field
|
||||
// is unexported and the struct type is not explicitly permitted by
|
||||
// an Exporter to traverse unexported fields.
|
||||
// • For SliceIndex, one may be invalid if an element is missing from
|
||||
// either the x or y slice.
|
||||
// • For MapIndex, one may be invalid if an entry is missing from
|
||||
// either the x or y map.
|
||||
//
|
||||
// The provided values must not be mutated.
|
||||
Values() (vx, vy reflect.Value)
|
||||
}
|
||||
|
||||
var (
|
||||
_ PathStep = StructField{}
|
||||
_ PathStep = SliceIndex{}
|
||||
_ PathStep = MapIndex{}
|
||||
_ PathStep = Indirect{}
|
||||
_ PathStep = TypeAssertion{}
|
||||
_ PathStep = Transform{}
|
||||
)
|
||||
|
||||
func (pa *Path) push(s PathStep) {
|
||||
*pa = append(*pa, s)
|
||||
}
|
||||
|
||||
func (pa *Path) pop() {
|
||||
*pa = (*pa)[:len(*pa)-1]
|
||||
}
|
||||
|
||||
// Last returns the last PathStep in the Path.
|
||||
// If the path is empty, this returns a non-nil PathStep that reports a nil Type.
|
||||
func (pa Path) Last() PathStep {
|
||||
return pa.Index(-1)
|
||||
}
|
||||
|
||||
// Index returns the ith step in the Path and supports negative indexing.
|
||||
// A negative index starts counting from the tail of the Path such that -1
|
||||
// refers to the last step, -2 refers to the second-to-last step, and so on.
|
||||
// If index is invalid, this returns a non-nil PathStep that reports a nil Type.
|
||||
func (pa Path) Index(i int) PathStep {
|
||||
if i < 0 {
|
||||
i = len(pa) + i
|
||||
}
|
||||
if i < 0 || i >= len(pa) {
|
||||
return pathStep{}
|
||||
}
|
||||
return pa[i]
|
||||
}
|
||||
|
||||
// String returns the simplified path to a node.
|
||||
// The simplified path only contains struct field accesses.
|
||||
//
|
||||
// For example:
|
||||
// MyMap.MySlices.MyField
|
||||
func (pa Path) String() string {
|
||||
var ss []string
|
||||
for _, s := range pa {
|
||||
if _, ok := s.(StructField); ok {
|
||||
ss = append(ss, s.String())
|
||||
}
|
||||
}
|
||||
return strings.TrimPrefix(strings.Join(ss, ""), ".")
|
||||
}
|
||||
|
||||
// GoString returns the path to a specific node using Go syntax.
|
||||
//
|
||||
// For example:
|
||||
// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField
|
||||
func (pa Path) GoString() string {
|
||||
var ssPre, ssPost []string
|
||||
var numIndirect int
|
||||
for i, s := range pa {
|
||||
var nextStep PathStep
|
||||
if i+1 < len(pa) {
|
||||
nextStep = pa[i+1]
|
||||
}
|
||||
switch s := s.(type) {
|
||||
case Indirect:
|
||||
numIndirect++
|
||||
pPre, pPost := "(", ")"
|
||||
switch nextStep.(type) {
|
||||
case Indirect:
|
||||
continue // Next step is indirection, so let them batch up
|
||||
case StructField:
|
||||
numIndirect-- // Automatic indirection on struct fields
|
||||
case nil:
|
||||
pPre, pPost = "", "" // Last step; no need for parenthesis
|
||||
}
|
||||
if numIndirect > 0 {
|
||||
ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect))
|
||||
ssPost = append(ssPost, pPost)
|
||||
}
|
||||
numIndirect = 0
|
||||
continue
|
||||
case Transform:
|
||||
ssPre = append(ssPre, s.trans.name+"(")
|
||||
ssPost = append(ssPost, ")")
|
||||
continue
|
||||
}
|
||||
ssPost = append(ssPost, s.String())
|
||||
}
|
||||
for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 {
|
||||
ssPre[i], ssPre[j] = ssPre[j], ssPre[i]
|
||||
}
|
||||
return strings.Join(ssPre, "") + strings.Join(ssPost, "")
|
||||
}
|
||||
|
||||
type pathStep struct {
|
||||
typ reflect.Type
|
||||
vx, vy reflect.Value
|
||||
}
|
||||
|
||||
func (ps pathStep) Type() reflect.Type { return ps.typ }
|
||||
func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy }
|
||||
func (ps pathStep) String() string {
|
||||
if ps.typ == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
s := ps.typ.String()
|
||||
if s == "" || strings.ContainsAny(s, "{}\n") {
|
||||
return "root" // Type too simple or complex to print
|
||||
}
|
||||
return fmt.Sprintf("{%s}", s)
|
||||
}
|
||||
|
||||
// StructField represents a struct field access on a field called Name.
|
||||
type StructField struct{ *structField }
|
||||
type structField struct {
|
||||
pathStep
|
||||
name string
|
||||
idx int
|
||||
|
||||
// These fields are used for forcibly accessing an unexported field.
|
||||
// pvx, pvy, and field are only valid if unexported is true.
|
||||
unexported bool
|
||||
mayForce bool // Forcibly allow visibility
|
||||
paddr bool // Was parent addressable?
|
||||
pvx, pvy reflect.Value // Parent values (always addressible)
|
||||
field reflect.StructField // Field information
|
||||
}
|
||||
|
||||
func (sf StructField) Type() reflect.Type { return sf.typ }
|
||||
func (sf StructField) Values() (vx, vy reflect.Value) {
|
||||
if !sf.unexported {
|
||||
return sf.vx, sf.vy // CanInterface reports true
|
||||
}
|
||||
|
||||
// Forcibly obtain read-write access to an unexported struct field.
|
||||
if sf.mayForce {
|
||||
vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr)
|
||||
vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr)
|
||||
return vx, vy // CanInterface reports true
|
||||
}
|
||||
return sf.vx, sf.vy // CanInterface reports false
|
||||
}
|
||||
func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) }
|
||||
|
||||
// Name is the field name.
|
||||
func (sf StructField) Name() string { return sf.name }
|
||||
|
||||
// Index is the index of the field in the parent struct type.
|
||||
// See reflect.Type.Field.
|
||||
func (sf StructField) Index() int { return sf.idx }
|
||||
|
||||
// SliceIndex is an index operation on a slice or array at some index Key.
|
||||
type SliceIndex struct{ *sliceIndex }
|
||||
type sliceIndex struct {
|
||||
pathStep
|
||||
xkey, ykey int
|
||||
isSlice bool // False for reflect.Array
|
||||
}
|
||||
|
||||
func (si SliceIndex) Type() reflect.Type { return si.typ }
|
||||
func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy }
|
||||
func (si SliceIndex) String() string {
|
||||
switch {
|
||||
case si.xkey == si.ykey:
|
||||
return fmt.Sprintf("[%d]", si.xkey)
|
||||
case si.ykey == -1:
|
||||
// [5->?] means "I don't know where X[5] went"
|
||||
return fmt.Sprintf("[%d->?]", si.xkey)
|
||||
case si.xkey == -1:
|
||||
// [?->3] means "I don't know where Y[3] came from"
|
||||
return fmt.Sprintf("[?->%d]", si.ykey)
|
||||
default:
|
||||
// [5->3] means "X[5] moved to Y[3]"
|
||||
return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey)
|
||||
}
|
||||
}
|
||||
|
||||
// Key is the index key; it may return -1 if in a split state
|
||||
func (si SliceIndex) Key() int {
|
||||
if si.xkey != si.ykey {
|
||||
return -1
|
||||
}
|
||||
return si.xkey
|
||||
}
|
||||
|
||||
// SplitKeys are the indexes for indexing into slices in the
|
||||
// x and y values, respectively. These indexes may differ due to the
|
||||
// insertion or removal of an element in one of the slices, causing
|
||||
// all of the indexes to be shifted. If an index is -1, then that
|
||||
// indicates that the element does not exist in the associated slice.
|
||||
//
|
||||
// Key is guaranteed to return -1 if and only if the indexes returned
|
||||
// by SplitKeys are not the same. SplitKeys will never return -1 for
|
||||
// both indexes.
|
||||
func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey }
|
||||
|
||||
// MapIndex is an index operation on a map at some index Key.
|
||||
type MapIndex struct{ *mapIndex }
|
||||
type mapIndex struct {
|
||||
pathStep
|
||||
key reflect.Value
|
||||
}
|
||||
|
||||
func (mi MapIndex) Type() reflect.Type { return mi.typ }
|
||||
func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy }
|
||||
func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) }
|
||||
|
||||
// Key is the value of the map key.
|
||||
func (mi MapIndex) Key() reflect.Value { return mi.key }
|
||||
|
||||
// Indirect represents pointer indirection on the parent type.
|
||||
type Indirect struct{ *indirect }
|
||||
type indirect struct {
|
||||
pathStep
|
||||
}
|
||||
|
||||
func (in Indirect) Type() reflect.Type { return in.typ }
|
||||
func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy }
|
||||
func (in Indirect) String() string { return "*" }
|
||||
|
||||
// TypeAssertion represents a type assertion on an interface.
|
||||
type TypeAssertion struct{ *typeAssertion }
|
||||
type typeAssertion struct {
|
||||
pathStep
|
||||
}
|
||||
|
||||
func (ta TypeAssertion) Type() reflect.Type { return ta.typ }
|
||||
func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy }
|
||||
func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) }
|
||||
|
||||
// Transform is a transformation from the parent type to the current type.
|
||||
type Transform struct{ *transform }
|
||||
type transform struct {
|
||||
pathStep
|
||||
trans *transformer
|
||||
}
|
||||
|
||||
func (tf Transform) Type() reflect.Type { return tf.typ }
|
||||
func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy }
|
||||
func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) }
|
||||
|
||||
// Name is the name of the Transformer.
|
||||
func (tf Transform) Name() string { return tf.trans.name }
|
||||
|
||||
// Func is the function pointer to the transformer function.
|
||||
func (tf Transform) Func() reflect.Value { return tf.trans.fnc }
|
||||
|
||||
// Option returns the originally constructed Transformer option.
|
||||
// The == operator can be used to detect the exact option used.
|
||||
func (tf Transform) Option() Option { return tf.trans }
|
||||
|
||||
// pointerPath represents a dual-stack of pointers encountered when
|
||||
// recursively traversing the x and y values. This data structure supports
|
||||
// detection of cycles and determining whether the cycles are equal.
|
||||
// In Go, cycles can occur via pointers, slices, and maps.
|
||||
//
|
||||
// The pointerPath uses a map to represent a stack; where descension into a
|
||||
// pointer pushes the address onto the stack, and ascension from a pointer
|
||||
// pops the address from the stack. Thus, when traversing into a pointer from
|
||||
// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles
|
||||
// by checking whether the pointer has already been visited. The cycle detection
|
||||
// uses a separate stack for the x and y values.
|
||||
//
|
||||
// If a cycle is detected we need to determine whether the two pointers
|
||||
// should be considered equal. The definition of equality chosen by Equal
|
||||
// requires two graphs to have the same structure. To determine this, both the
|
||||
// x and y values must have a cycle where the previous pointers were also
|
||||
// encountered together as a pair.
|
||||
//
|
||||
// Semantically, this is equivalent to augmenting Indirect, SliceIndex, and
|
||||
// MapIndex with pointer information for the x and y values.
|
||||
// Suppose px and py are two pointers to compare, we then search the
|
||||
// Path for whether px was ever encountered in the Path history of x, and
|
||||
// similarly so with py. If either side has a cycle, the comparison is only
|
||||
// equal if both px and py have a cycle resulting from the same PathStep.
|
||||
//
|
||||
// Using a map as a stack is more performant as we can perform cycle detection
|
||||
// in O(1) instead of O(N) where N is len(Path).
|
||||
type pointerPath struct {
|
||||
// mx is keyed by x pointers, where the value is the associated y pointer.
|
||||
mx map[value.Pointer]value.Pointer
|
||||
// my is keyed by y pointers, where the value is the associated x pointer.
|
||||
my map[value.Pointer]value.Pointer
|
||||
}
|
||||
|
||||
func (p *pointerPath) Init() {
|
||||
p.mx = make(map[value.Pointer]value.Pointer)
|
||||
p.my = make(map[value.Pointer]value.Pointer)
|
||||
}
|
||||
|
||||
// Push indicates intent to descend into pointers vx and vy where
|
||||
// visited reports whether either has been seen before. If visited before,
|
||||
// equal reports whether both pointers were encountered together.
|
||||
// Pop must be called if and only if the pointers were never visited.
|
||||
//
|
||||
// The pointers vx and vy must be a reflect.Ptr, reflect.Slice, or reflect.Map
|
||||
// and be non-nil.
|
||||
func (p pointerPath) Push(vx, vy reflect.Value) (equal, visited bool) {
|
||||
px := value.PointerOf(vx)
|
||||
py := value.PointerOf(vy)
|
||||
_, ok1 := p.mx[px]
|
||||
_, ok2 := p.my[py]
|
||||
if ok1 || ok2 {
|
||||
equal = p.mx[px] == py && p.my[py] == px // Pointers paired together
|
||||
return equal, true
|
||||
}
|
||||
p.mx[px] = py
|
||||
p.my[py] = px
|
||||
return false, false
|
||||
}
|
||||
|
||||
// Pop ascends from pointers vx and vy.
|
||||
func (p pointerPath) Pop(vx, vy reflect.Value) {
|
||||
delete(p.mx, value.PointerOf(vx))
|
||||
delete(p.my, value.PointerOf(vy))
|
||||
}
|
||||
|
||||
// isExported reports whether the identifier is exported.
|
||||
func isExported(id string) bool {
|
||||
r, _ := utf8.DecodeRuneInString(id)
|
||||
return unicode.IsUpper(r)
|
||||
}
|
54
vendor/github.com/google/go-cmp/cmp/report.go
generated
vendored
Normal file
54
vendor/github.com/google/go-cmp/cmp/report.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
|||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
// defaultReporter implements the reporter interface.
|
||||
//
|
||||
// As Equal serially calls the PushStep, Report, and PopStep methods, the
|
||||
// defaultReporter constructs a tree-based representation of the compared value
|
||||
// and the result of each comparison (see valueNode).
|
||||
//
|
||||
// When the String method is called, the FormatDiff method transforms the
|
||||
// valueNode tree into a textNode tree, which is a tree-based representation
|
||||
// of the textual output (see textNode).
|
||||
//
|
||||
// Lastly, the textNode.String method produces the final report as a string.
|
||||
type defaultReporter struct {
|
||||
root *valueNode
|
||||
curr *valueNode
|
||||
}
|
||||
|
||||
func (r *defaultReporter) PushStep(ps PathStep) {
|
||||
r.curr = r.curr.PushStep(ps)
|
||||
if r.root == nil {
|
||||
r.root = r.curr
|
||||
}
|
||||
}
|
||||
func (r *defaultReporter) Report(rs Result) {
|
||||
r.curr.Report(rs)
|
||||
}
|
||||
func (r *defaultReporter) PopStep() {
|
||||
r.curr = r.curr.PopStep()
|
||||
}
|
||||
|
||||
// String provides a full report of the differences detected as a structured
|
||||
// literal in pseudo-Go syntax. String may only be called after the entire tree
|
||||
// has been traversed.
|
||||
func (r *defaultReporter) String() string {
|
||||
assert(r.root != nil && r.curr == nil)
|
||||
if r.root.NumDiff == 0 {
|
||||
return ""
|
||||
}
|
||||
ptrs := new(pointerReferences)
|
||||
text := formatOptions{}.FormatDiff(r.root, ptrs)
|
||||
resolveReferences(text)
|
||||
return text.String()
|
||||
}
|
||||
|
||||
func assert(ok bool) {
|
||||
if !ok {
|
||||
panic("assertion failure")
|
||||
}
|
||||
}
|
432
vendor/github.com/google/go-cmp/cmp/report_compare.go
generated
vendored
Normal file
432
vendor/github.com/google/go-cmp/cmp/report_compare.go
generated
vendored
Normal file
|
@ -0,0 +1,432 @@
|
|||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
// numContextRecords is the number of surrounding equal records to print.
|
||||
const numContextRecords = 2
|
||||
|
||||
type diffMode byte
|
||||
|
||||
const (
|
||||
diffUnknown diffMode = 0
|
||||
diffIdentical diffMode = ' '
|
||||
diffRemoved diffMode = '-'
|
||||
diffInserted diffMode = '+'
|
||||
)
|
||||
|
||||
type typeMode int
|
||||
|
||||
const (
|
||||
// emitType always prints the type.
|
||||
emitType typeMode = iota
|
||||
// elideType never prints the type.
|
||||
elideType
|
||||
// autoType prints the type only for composite kinds
|
||||
// (i.e., structs, slices, arrays, and maps).
|
||||
autoType
|
||||
)
|
||||
|
||||
type formatOptions struct {
|
||||
// DiffMode controls the output mode of FormatDiff.
|
||||
//
|
||||
// If diffUnknown, then produce a diff of the x and y values.
|
||||
// If diffIdentical, then emit values as if they were equal.
|
||||
// If diffRemoved, then only emit x values (ignoring y values).
|
||||
// If diffInserted, then only emit y values (ignoring x values).
|
||||
DiffMode diffMode
|
||||
|
||||
// TypeMode controls whether to print the type for the current node.
|
||||
//
|
||||
// As a general rule of thumb, we always print the type of the next node
|
||||
// after an interface, and always elide the type of the next node after
|
||||
// a slice or map node.
|
||||
TypeMode typeMode
|
||||
|
||||
// formatValueOptions are options specific to printing reflect.Values.
|
||||
formatValueOptions
|
||||
}
|
||||
|
||||
func (opts formatOptions) WithDiffMode(d diffMode) formatOptions {
|
||||
opts.DiffMode = d
|
||||
return opts
|
||||
}
|
||||
func (opts formatOptions) WithTypeMode(t typeMode) formatOptions {
|
||||
opts.TypeMode = t
|
||||
return opts
|
||||
}
|
||||
func (opts formatOptions) WithVerbosity(level int) formatOptions {
|
||||
opts.VerbosityLevel = level
|
||||
opts.LimitVerbosity = true
|
||||
return opts
|
||||
}
|
||||
func (opts formatOptions) verbosity() uint {
|
||||
switch {
|
||||
case opts.VerbosityLevel < 0:
|
||||
return 0
|
||||
case opts.VerbosityLevel > 16:
|
||||
return 16 // some reasonable maximum to avoid shift overflow
|
||||
default:
|
||||
return uint(opts.VerbosityLevel)
|
||||
}
|
||||
}
|
||||
|
||||
const maxVerbosityPreset = 6
|
||||
|
||||
// verbosityPreset modifies the verbosity settings given an index
|
||||
// between 0 and maxVerbosityPreset, inclusive.
|
||||
func verbosityPreset(opts formatOptions, i int) formatOptions {
|
||||
opts.VerbosityLevel = int(opts.verbosity()) + 2*i
|
||||
if i > 0 {
|
||||
opts.AvoidStringer = true
|
||||
}
|
||||
if i >= maxVerbosityPreset {
|
||||
opts.PrintAddresses = true
|
||||
opts.QualifiedNames = true
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
// FormatDiff converts a valueNode tree into a textNode tree, where the later
|
||||
// is a textual representation of the differences detected in the former.
|
||||
func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) {
|
||||
if opts.DiffMode == diffIdentical {
|
||||
opts = opts.WithVerbosity(1)
|
||||
} else if opts.verbosity() < 3 {
|
||||
opts = opts.WithVerbosity(3)
|
||||
}
|
||||
|
||||
// Check whether we have specialized formatting for this node.
|
||||
// This is not necessary, but helpful for producing more readable outputs.
|
||||
if opts.CanFormatDiffSlice(v) {
|
||||
return opts.FormatDiffSlice(v)
|
||||
}
|
||||
|
||||
var parentKind reflect.Kind
|
||||
if v.parent != nil && v.parent.TransformerName == "" {
|
||||
parentKind = v.parent.Type.Kind()
|
||||
}
|
||||
|
||||
// For leaf nodes, format the value based on the reflect.Values alone.
|
||||
if v.MaxDepth == 0 {
|
||||
switch opts.DiffMode {
|
||||
case diffUnknown, diffIdentical:
|
||||
// Format Equal.
|
||||
if v.NumDiff == 0 {
|
||||
outx := opts.FormatValue(v.ValueX, parentKind, ptrs)
|
||||
outy := opts.FormatValue(v.ValueY, parentKind, ptrs)
|
||||
if v.NumIgnored > 0 && v.NumSame == 0 {
|
||||
return textEllipsis
|
||||
} else if outx.Len() < outy.Len() {
|
||||
return outx
|
||||
} else {
|
||||
return outy
|
||||
}
|
||||
}
|
||||
|
||||
// Format unequal.
|
||||
assert(opts.DiffMode == diffUnknown)
|
||||
var list textList
|
||||
outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs)
|
||||
outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs)
|
||||
for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
|
||||
opts2 := verbosityPreset(opts, i).WithTypeMode(elideType)
|
||||
outx = opts2.FormatValue(v.ValueX, parentKind, ptrs)
|
||||
outy = opts2.FormatValue(v.ValueY, parentKind, ptrs)
|
||||
}
|
||||
if outx != nil {
|
||||
list = append(list, textRecord{Diff: '-', Value: outx})
|
||||
}
|
||||
if outy != nil {
|
||||
list = append(list, textRecord{Diff: '+', Value: outy})
|
||||
}
|
||||
return opts.WithTypeMode(emitType).FormatType(v.Type, list)
|
||||
case diffRemoved:
|
||||
return opts.FormatValue(v.ValueX, parentKind, ptrs)
|
||||
case diffInserted:
|
||||
return opts.FormatValue(v.ValueY, parentKind, ptrs)
|
||||
default:
|
||||
panic("invalid diff mode")
|
||||
}
|
||||
}
|
||||
|
||||
// Register slice element to support cycle detection.
|
||||
if parentKind == reflect.Slice {
|
||||
ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true)
|
||||
defer ptrs.Pop()
|
||||
defer func() { out = wrapTrunkReferences(ptrRefs, out) }()
|
||||
}
|
||||
|
||||
// Descend into the child value node.
|
||||
if v.TransformerName != "" {
|
||||
out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
|
||||
out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"}
|
||||
return opts.FormatType(v.Type, out)
|
||||
} else {
|
||||
switch k := v.Type.Kind(); k {
|
||||
case reflect.Struct, reflect.Array, reflect.Slice:
|
||||
out = opts.formatDiffList(v.Records, k, ptrs)
|
||||
out = opts.FormatType(v.Type, out)
|
||||
case reflect.Map:
|
||||
// Register map to support cycle detection.
|
||||
ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
|
||||
defer ptrs.Pop()
|
||||
|
||||
out = opts.formatDiffList(v.Records, k, ptrs)
|
||||
out = wrapTrunkReferences(ptrRefs, out)
|
||||
out = opts.FormatType(v.Type, out)
|
||||
case reflect.Ptr:
|
||||
// Register pointer to support cycle detection.
|
||||
ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
|
||||
defer ptrs.Pop()
|
||||
|
||||
out = opts.FormatDiff(v.Value, ptrs)
|
||||
out = wrapTrunkReferences(ptrRefs, out)
|
||||
out = &textWrap{Prefix: "&", Value: out}
|
||||
case reflect.Interface:
|
||||
out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
|
||||
default:
|
||||
panic(fmt.Sprintf("%v cannot have children", k))
|
||||
}
|
||||
return out
|
||||
}
|
||||
}
|
||||
|
||||
func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode {
|
||||
// Derive record name based on the data structure kind.
|
||||
var name string
|
||||
var formatKey func(reflect.Value) string
|
||||
switch k {
|
||||
case reflect.Struct:
|
||||
name = "field"
|
||||
opts = opts.WithTypeMode(autoType)
|
||||
formatKey = func(v reflect.Value) string { return v.String() }
|
||||
case reflect.Slice, reflect.Array:
|
||||
name = "element"
|
||||
opts = opts.WithTypeMode(elideType)
|
||||
formatKey = func(reflect.Value) string { return "" }
|
||||
case reflect.Map:
|
||||
name = "entry"
|
||||
opts = opts.WithTypeMode(elideType)
|
||||
formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) }
|
||||
}
|
||||
|
||||
maxLen := -1
|
||||
if opts.LimitVerbosity {
|
||||
if opts.DiffMode == diffIdentical {
|
||||
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
|
||||
} else {
|
||||
maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc...
|
||||
}
|
||||
opts.VerbosityLevel--
|
||||
}
|
||||
|
||||
// Handle unification.
|
||||
switch opts.DiffMode {
|
||||
case diffIdentical, diffRemoved, diffInserted:
|
||||
var list textList
|
||||
var deferredEllipsis bool // Add final "..." to indicate records were dropped
|
||||
for _, r := range recs {
|
||||
if len(list) == maxLen {
|
||||
deferredEllipsis = true
|
||||
break
|
||||
}
|
||||
|
||||
// Elide struct fields that are zero value.
|
||||
if k == reflect.Struct {
|
||||
var isZero bool
|
||||
switch opts.DiffMode {
|
||||
case diffIdentical:
|
||||
isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueY)
|
||||
case diffRemoved:
|
||||
isZero = value.IsZero(r.Value.ValueX)
|
||||
case diffInserted:
|
||||
isZero = value.IsZero(r.Value.ValueY)
|
||||
}
|
||||
if isZero {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Elide ignored nodes.
|
||||
if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 {
|
||||
deferredEllipsis = !(k == reflect.Slice || k == reflect.Array)
|
||||
if !deferredEllipsis {
|
||||
list.AppendEllipsis(diffStats{})
|
||||
}
|
||||
continue
|
||||
}
|
||||
if out := opts.FormatDiff(r.Value, ptrs); out != nil {
|
||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||
}
|
||||
}
|
||||
if deferredEllipsis {
|
||||
list.AppendEllipsis(diffStats{})
|
||||
}
|
||||
return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
case diffUnknown:
|
||||
default:
|
||||
panic("invalid diff mode")
|
||||
}
|
||||
|
||||
// Handle differencing.
|
||||
var numDiffs int
|
||||
var list textList
|
||||
var keys []reflect.Value // invariant: len(list) == len(keys)
|
||||
groups := coalesceAdjacentRecords(name, recs)
|
||||
maxGroup := diffStats{Name: name}
|
||||
for i, ds := range groups {
|
||||
if maxLen >= 0 && numDiffs >= maxLen {
|
||||
maxGroup = maxGroup.Append(ds)
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle equal records.
|
||||
if ds.NumDiff() == 0 {
|
||||
// Compute the number of leading and trailing records to print.
|
||||
var numLo, numHi int
|
||||
numEqual := ds.NumIgnored + ds.NumIdentical
|
||||
for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 {
|
||||
if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
|
||||
break
|
||||
}
|
||||
numLo++
|
||||
}
|
||||
for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
|
||||
if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
|
||||
break
|
||||
}
|
||||
numHi++
|
||||
}
|
||||
if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 {
|
||||
numHi++ // Avoid pointless coalescing of a single equal record
|
||||
}
|
||||
|
||||
// Format the equal values.
|
||||
for _, r := range recs[:numLo] {
|
||||
out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
|
||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||
keys = append(keys, r.Key)
|
||||
}
|
||||
if numEqual > numLo+numHi {
|
||||
ds.NumIdentical -= numLo + numHi
|
||||
list.AppendEllipsis(ds)
|
||||
for len(keys) < len(list) {
|
||||
keys = append(keys, reflect.Value{})
|
||||
}
|
||||
}
|
||||
for _, r := range recs[numEqual-numHi : numEqual] {
|
||||
out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
|
||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||
keys = append(keys, r.Key)
|
||||
}
|
||||
recs = recs[numEqual:]
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle unequal records.
|
||||
for _, r := range recs[:ds.NumDiff()] {
|
||||
switch {
|
||||
case opts.CanFormatDiffSlice(r.Value):
|
||||
out := opts.FormatDiffSlice(r.Value)
|
||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||
keys = append(keys, r.Key)
|
||||
case r.Value.NumChildren == r.Value.MaxDepth:
|
||||
outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
|
||||
outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
|
||||
for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
|
||||
opts2 := verbosityPreset(opts, i)
|
||||
outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
|
||||
outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
|
||||
}
|
||||
if outx != nil {
|
||||
list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx})
|
||||
keys = append(keys, r.Key)
|
||||
}
|
||||
if outy != nil {
|
||||
list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy})
|
||||
keys = append(keys, r.Key)
|
||||
}
|
||||
default:
|
||||
out := opts.FormatDiff(r.Value, ptrs)
|
||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||
keys = append(keys, r.Key)
|
||||
}
|
||||
}
|
||||
recs = recs[ds.NumDiff():]
|
||||
numDiffs += ds.NumDiff()
|
||||
}
|
||||
if maxGroup.IsZero() {
|
||||
assert(len(recs) == 0)
|
||||
} else {
|
||||
list.AppendEllipsis(maxGroup)
|
||||
for len(keys) < len(list) {
|
||||
keys = append(keys, reflect.Value{})
|
||||
}
|
||||
}
|
||||
assert(len(list) == len(keys))
|
||||
|
||||
// For maps, the default formatting logic uses fmt.Stringer which may
|
||||
// produce ambiguous output. Avoid calling String to disambiguate.
|
||||
if k == reflect.Map {
|
||||
var ambiguous bool
|
||||
seenKeys := map[string]reflect.Value{}
|
||||
for i, currKey := range keys {
|
||||
if currKey.IsValid() {
|
||||
strKey := list[i].Key
|
||||
prevKey, seen := seenKeys[strKey]
|
||||
if seen && prevKey.CanInterface() && currKey.CanInterface() {
|
||||
ambiguous = prevKey.Interface() != currKey.Interface()
|
||||
if ambiguous {
|
||||
break
|
||||
}
|
||||
}
|
||||
seenKeys[strKey] = currKey
|
||||
}
|
||||
}
|
||||
if ambiguous {
|
||||
for i, k := range keys {
|
||||
if k.IsValid() {
|
||||
list[i].Key = formatMapKey(k, true, ptrs)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
}
|
||||
|
||||
// coalesceAdjacentRecords coalesces the list of records into groups of
|
||||
// adjacent equal, or unequal counts.
|
||||
func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) {
|
||||
var prevCase int // Arbitrary index into which case last occurred
|
||||
lastStats := func(i int) *diffStats {
|
||||
if prevCase != i {
|
||||
groups = append(groups, diffStats{Name: name})
|
||||
prevCase = i
|
||||
}
|
||||
return &groups[len(groups)-1]
|
||||
}
|
||||
for _, r := range recs {
|
||||
switch rv := r.Value; {
|
||||
case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0:
|
||||
lastStats(1).NumIgnored++
|
||||
case rv.NumDiff == 0:
|
||||
lastStats(1).NumIdentical++
|
||||
case rv.NumDiff > 0 && !rv.ValueY.IsValid():
|
||||
lastStats(2).NumRemoved++
|
||||
case rv.NumDiff > 0 && !rv.ValueX.IsValid():
|
||||
lastStats(2).NumInserted++
|
||||
default:
|
||||
lastStats(2).NumModified++
|
||||
}
|
||||
}
|
||||
return groups
|
||||
}
|
264
vendor/github.com/google/go-cmp/cmp/report_references.go
generated
vendored
Normal file
264
vendor/github.com/google/go-cmp/cmp/report_references.go
generated
vendored
Normal file
|
@ -0,0 +1,264 @@
|
|||
// Copyright 2020, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/flags"
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
const (
|
||||
pointerDelimPrefix = "⟪"
|
||||
pointerDelimSuffix = "⟫"
|
||||
)
|
||||
|
||||
// formatPointer prints the address of the pointer.
|
||||
func formatPointer(p value.Pointer, withDelims bool) string {
|
||||
v := p.Uintptr()
|
||||
if flags.Deterministic {
|
||||
v = 0xdeadf00f // Only used for stable testing purposes
|
||||
}
|
||||
if withDelims {
|
||||
return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix
|
||||
}
|
||||
return formatHex(uint64(v))
|
||||
}
|
||||
|
||||
// pointerReferences is a stack of pointers visited so far.
|
||||
type pointerReferences [][2]value.Pointer
|
||||
|
||||
func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) {
|
||||
if deref && vx.IsValid() {
|
||||
vx = vx.Addr()
|
||||
}
|
||||
if deref && vy.IsValid() {
|
||||
vy = vy.Addr()
|
||||
}
|
||||
switch d {
|
||||
case diffUnknown, diffIdentical:
|
||||
pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)}
|
||||
case diffRemoved:
|
||||
pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}}
|
||||
case diffInserted:
|
||||
pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)}
|
||||
}
|
||||
*ps = append(*ps, pp)
|
||||
return pp
|
||||
}
|
||||
|
||||
func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) {
|
||||
p = value.PointerOf(v)
|
||||
for _, pp := range *ps {
|
||||
if p == pp[0] || p == pp[1] {
|
||||
return p, true
|
||||
}
|
||||
}
|
||||
*ps = append(*ps, [2]value.Pointer{p, p})
|
||||
return p, false
|
||||
}
|
||||
|
||||
func (ps *pointerReferences) Pop() {
|
||||
*ps = (*ps)[:len(*ps)-1]
|
||||
}
|
||||
|
||||
// trunkReferences is metadata for a textNode indicating that the sub-tree
|
||||
// represents the value for either pointer in a pair of references.
|
||||
type trunkReferences struct{ pp [2]value.Pointer }
|
||||
|
||||
// trunkReference is metadata for a textNode indicating that the sub-tree
|
||||
// represents the value for the given pointer reference.
|
||||
type trunkReference struct{ p value.Pointer }
|
||||
|
||||
// leafReference is metadata for a textNode indicating that the value is
|
||||
// truncated as it refers to another part of the tree (i.e., a trunk).
|
||||
type leafReference struct{ p value.Pointer }
|
||||
|
||||
func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode {
|
||||
switch {
|
||||
case pp[0].IsNil():
|
||||
return &textWrap{Value: s, Metadata: trunkReference{pp[1]}}
|
||||
case pp[1].IsNil():
|
||||
return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
|
||||
case pp[0] == pp[1]:
|
||||
return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
|
||||
default:
|
||||
return &textWrap{Value: s, Metadata: trunkReferences{pp}}
|
||||
}
|
||||
}
|
||||
func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode {
|
||||
var prefix string
|
||||
if printAddress {
|
||||
prefix = formatPointer(p, true)
|
||||
}
|
||||
return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}}
|
||||
}
|
||||
func makeLeafReference(p value.Pointer, printAddress bool) textNode {
|
||||
out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"}
|
||||
var prefix string
|
||||
if printAddress {
|
||||
prefix = formatPointer(p, true)
|
||||
}
|
||||
return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}}
|
||||
}
|
||||
|
||||
// resolveReferences walks the textNode tree searching for any leaf reference
|
||||
// metadata and resolves each against the corresponding trunk references.
|
||||
// Since pointer addresses in memory are not particularly readable to the user,
|
||||
// it replaces each pointer value with an arbitrary and unique reference ID.
|
||||
func resolveReferences(s textNode) {
|
||||
var walkNodes func(textNode, func(textNode))
|
||||
walkNodes = func(s textNode, f func(textNode)) {
|
||||
f(s)
|
||||
switch s := s.(type) {
|
||||
case *textWrap:
|
||||
walkNodes(s.Value, f)
|
||||
case textList:
|
||||
for _, r := range s {
|
||||
walkNodes(r.Value, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Collect all trunks and leaves with reference metadata.
|
||||
var trunks, leaves []*textWrap
|
||||
walkNodes(s, func(s textNode) {
|
||||
if s, ok := s.(*textWrap); ok {
|
||||
switch s.Metadata.(type) {
|
||||
case leafReference:
|
||||
leaves = append(leaves, s)
|
||||
case trunkReference, trunkReferences:
|
||||
trunks = append(trunks, s)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// No leaf references to resolve.
|
||||
if len(leaves) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Collect the set of all leaf references to resolve.
|
||||
leafPtrs := make(map[value.Pointer]bool)
|
||||
for _, leaf := range leaves {
|
||||
leafPtrs[leaf.Metadata.(leafReference).p] = true
|
||||
}
|
||||
|
||||
// Collect the set of trunk pointers that are always paired together.
|
||||
// This allows us to assign a single ID to both pointers for brevity.
|
||||
// If a pointer in a pair ever occurs by itself or as a different pair,
|
||||
// then the pair is broken.
|
||||
pairedTrunkPtrs := make(map[value.Pointer]value.Pointer)
|
||||
unpair := func(p value.Pointer) {
|
||||
if !pairedTrunkPtrs[p].IsNil() {
|
||||
pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half
|
||||
}
|
||||
pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half
|
||||
}
|
||||
for _, trunk := range trunks {
|
||||
switch p := trunk.Metadata.(type) {
|
||||
case trunkReference:
|
||||
unpair(p.p) // standalone pointer cannot be part of a pair
|
||||
case trunkReferences:
|
||||
p0, ok0 := pairedTrunkPtrs[p.pp[0]]
|
||||
p1, ok1 := pairedTrunkPtrs[p.pp[1]]
|
||||
switch {
|
||||
case !ok0 && !ok1:
|
||||
// Register the newly seen pair.
|
||||
pairedTrunkPtrs[p.pp[0]] = p.pp[1]
|
||||
pairedTrunkPtrs[p.pp[1]] = p.pp[0]
|
||||
case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]:
|
||||
// Exact pair already seen; do nothing.
|
||||
default:
|
||||
// Pair conflicts with some other pair; break all pairs.
|
||||
unpair(p.pp[0])
|
||||
unpair(p.pp[1])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Correlate each pointer referenced by leaves to a unique identifier,
|
||||
// and print the IDs for each trunk that matches those pointers.
|
||||
var nextID uint
|
||||
ptrIDs := make(map[value.Pointer]uint)
|
||||
newID := func() uint {
|
||||
id := nextID
|
||||
nextID++
|
||||
return id
|
||||
}
|
||||
for _, trunk := range trunks {
|
||||
switch p := trunk.Metadata.(type) {
|
||||
case trunkReference:
|
||||
if print := leafPtrs[p.p]; print {
|
||||
id, ok := ptrIDs[p.p]
|
||||
if !ok {
|
||||
id = newID()
|
||||
ptrIDs[p.p] = id
|
||||
}
|
||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
|
||||
}
|
||||
case trunkReferences:
|
||||
print0 := leafPtrs[p.pp[0]]
|
||||
print1 := leafPtrs[p.pp[1]]
|
||||
if print0 || print1 {
|
||||
id0, ok0 := ptrIDs[p.pp[0]]
|
||||
id1, ok1 := ptrIDs[p.pp[1]]
|
||||
isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0]
|
||||
if isPair {
|
||||
var id uint
|
||||
assert(ok0 == ok1) // must be seen together or not at all
|
||||
if ok0 {
|
||||
assert(id0 == id1) // must have the same ID
|
||||
id = id0
|
||||
} else {
|
||||
id = newID()
|
||||
ptrIDs[p.pp[0]] = id
|
||||
ptrIDs[p.pp[1]] = id
|
||||
}
|
||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
|
||||
} else {
|
||||
if print0 && !ok0 {
|
||||
id0 = newID()
|
||||
ptrIDs[p.pp[0]] = id0
|
||||
}
|
||||
if print1 && !ok1 {
|
||||
id1 = newID()
|
||||
ptrIDs[p.pp[1]] = id1
|
||||
}
|
||||
switch {
|
||||
case print0 && print1:
|
||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1))
|
||||
case print0:
|
||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0))
|
||||
case print1:
|
||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update all leaf references with the unique identifier.
|
||||
for _, leaf := range leaves {
|
||||
if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok {
|
||||
leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func formatReference(id uint) string {
|
||||
return fmt.Sprintf("ref#%d", id)
|
||||
}
|
||||
|
||||
func updateReferencePrefix(prefix, ref string) string {
|
||||
if prefix == "" {
|
||||
return pointerDelimPrefix + ref + pointerDelimSuffix
|
||||
}
|
||||
suffix := strings.TrimPrefix(prefix, pointerDelimPrefix)
|
||||
return pointerDelimPrefix + ref + ": " + suffix
|
||||
}
|
402
vendor/github.com/google/go-cmp/cmp/report_reflect.go
generated
vendored
Normal file
402
vendor/github.com/google/go-cmp/cmp/report_reflect.go
generated
vendored
Normal file
|
@ -0,0 +1,402 @@
|
|||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
type formatValueOptions struct {
|
||||
// AvoidStringer controls whether to avoid calling custom stringer
|
||||
// methods like error.Error or fmt.Stringer.String.
|
||||
AvoidStringer bool
|
||||
|
||||
// PrintAddresses controls whether to print the address of all pointers,
|
||||
// slice elements, and maps.
|
||||
PrintAddresses bool
|
||||
|
||||
// QualifiedNames controls whether FormatType uses the fully qualified name
|
||||
// (including the full package path as opposed to just the package name).
|
||||
QualifiedNames bool
|
||||
|
||||
// VerbosityLevel controls the amount of output to produce.
|
||||
// A higher value produces more output. A value of zero or lower produces
|
||||
// no output (represented using an ellipsis).
|
||||
// If LimitVerbosity is false, then the level is treated as infinite.
|
||||
VerbosityLevel int
|
||||
|
||||
// LimitVerbosity specifies that formatting should respect VerbosityLevel.
|
||||
LimitVerbosity bool
|
||||
}
|
||||
|
||||
// FormatType prints the type as if it were wrapping s.
|
||||
// This may return s as-is depending on the current type and TypeMode mode.
|
||||
func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode {
|
||||
// Check whether to emit the type or not.
|
||||
switch opts.TypeMode {
|
||||
case autoType:
|
||||
switch t.Kind() {
|
||||
case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map:
|
||||
if s.Equal(textNil) {
|
||||
return s
|
||||
}
|
||||
default:
|
||||
return s
|
||||
}
|
||||
if opts.DiffMode == diffIdentical {
|
||||
return s // elide type for identical nodes
|
||||
}
|
||||
case elideType:
|
||||
return s
|
||||
}
|
||||
|
||||
// Determine the type label, applying special handling for unnamed types.
|
||||
typeName := value.TypeString(t, opts.QualifiedNames)
|
||||
if t.Name() == "" {
|
||||
// According to Go grammar, certain type literals contain symbols that
|
||||
// do not strongly bind to the next lexicographical token (e.g., *T).
|
||||
switch t.Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Ptr:
|
||||
typeName = "(" + typeName + ")"
|
||||
}
|
||||
}
|
||||
return &textWrap{Prefix: typeName, Value: wrapParens(s)}
|
||||
}
|
||||
|
||||
// wrapParens wraps s with a set of parenthesis, but avoids it if the
|
||||
// wrapped node itself is already surrounded by a pair of parenthesis or braces.
|
||||
// It handles unwrapping one level of pointer-reference nodes.
|
||||
func wrapParens(s textNode) textNode {
|
||||
var refNode *textWrap
|
||||
if s2, ok := s.(*textWrap); ok {
|
||||
// Unwrap a single pointer reference node.
|
||||
switch s2.Metadata.(type) {
|
||||
case leafReference, trunkReference, trunkReferences:
|
||||
refNode = s2
|
||||
if s3, ok := refNode.Value.(*textWrap); ok {
|
||||
s2 = s3
|
||||
}
|
||||
}
|
||||
|
||||
// Already has delimiters that make parenthesis unnecessary.
|
||||
hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")")
|
||||
hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}")
|
||||
if hasParens || hasBraces {
|
||||
return s
|
||||
}
|
||||
}
|
||||
if refNode != nil {
|
||||
refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"}
|
||||
return s
|
||||
}
|
||||
return &textWrap{Prefix: "(", Value: s, Suffix: ")"}
|
||||
}
|
||||
|
||||
// FormatValue prints the reflect.Value, taking extra care to avoid descending
|
||||
// into pointers already in ptrs. As pointers are visited, ptrs is also updated.
|
||||
func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) {
|
||||
if !v.IsValid() {
|
||||
return nil
|
||||
}
|
||||
t := v.Type()
|
||||
|
||||
// Check slice element for cycles.
|
||||
if parentKind == reflect.Slice {
|
||||
ptrRef, visited := ptrs.Push(v.Addr())
|
||||
if visited {
|
||||
return makeLeafReference(ptrRef, false)
|
||||
}
|
||||
defer ptrs.Pop()
|
||||
defer func() { out = wrapTrunkReference(ptrRef, false, out) }()
|
||||
}
|
||||
|
||||
// Check whether there is an Error or String method to call.
|
||||
if !opts.AvoidStringer && v.CanInterface() {
|
||||
// Avoid calling Error or String methods on nil receivers since many
|
||||
// implementations crash when doing so.
|
||||
if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() {
|
||||
var prefix, strVal string
|
||||
func() {
|
||||
// Swallow and ignore any panics from String or Error.
|
||||
defer func() { recover() }()
|
||||
switch v := v.Interface().(type) {
|
||||
case error:
|
||||
strVal = v.Error()
|
||||
prefix = "e"
|
||||
case fmt.Stringer:
|
||||
strVal = v.String()
|
||||
prefix = "s"
|
||||
}
|
||||
}()
|
||||
if prefix != "" {
|
||||
return opts.formatString(prefix, strVal)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check whether to explicitly wrap the result with the type.
|
||||
var skipType bool
|
||||
defer func() {
|
||||
if !skipType {
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
}()
|
||||
|
||||
switch t.Kind() {
|
||||
case reflect.Bool:
|
||||
return textLine(fmt.Sprint(v.Bool()))
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return textLine(fmt.Sprint(v.Int()))
|
||||
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return textLine(fmt.Sprint(v.Uint()))
|
||||
case reflect.Uint8:
|
||||
if parentKind == reflect.Slice || parentKind == reflect.Array {
|
||||
return textLine(formatHex(v.Uint()))
|
||||
}
|
||||
return textLine(fmt.Sprint(v.Uint()))
|
||||
case reflect.Uintptr:
|
||||
return textLine(formatHex(v.Uint()))
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return textLine(fmt.Sprint(v.Float()))
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return textLine(fmt.Sprint(v.Complex()))
|
||||
case reflect.String:
|
||||
return opts.formatString("", v.String())
|
||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
return textLine(formatPointer(value.PointerOf(v), true))
|
||||
case reflect.Struct:
|
||||
var list textList
|
||||
v := makeAddressable(v) // needed for retrieveUnexportedField
|
||||
maxLen := v.NumField()
|
||||
if opts.LimitVerbosity {
|
||||
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
|
||||
opts.VerbosityLevel--
|
||||
}
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
vv := v.Field(i)
|
||||
if value.IsZero(vv) {
|
||||
continue // Elide fields with zero values
|
||||
}
|
||||
if len(list) == maxLen {
|
||||
list.AppendEllipsis(diffStats{})
|
||||
break
|
||||
}
|
||||
sf := t.Field(i)
|
||||
if supportExporters && !isExported(sf.Name) {
|
||||
vv = retrieveUnexportedField(v, sf, true)
|
||||
}
|
||||
s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs)
|
||||
list = append(list, textRecord{Key: sf.Name, Value: s})
|
||||
}
|
||||
return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
return textNil
|
||||
}
|
||||
|
||||
// Check whether this is a []byte of text data.
|
||||
if t.Elem() == reflect.TypeOf(byte(0)) {
|
||||
b := v.Bytes()
|
||||
isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) && unicode.IsSpace(r) }
|
||||
if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 {
|
||||
out = opts.formatString("", string(b))
|
||||
return opts.WithTypeMode(emitType).FormatType(t, out)
|
||||
}
|
||||
}
|
||||
|
||||
fallthrough
|
||||
case reflect.Array:
|
||||
maxLen := v.Len()
|
||||
if opts.LimitVerbosity {
|
||||
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
|
||||
opts.VerbosityLevel--
|
||||
}
|
||||
var list textList
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if len(list) == maxLen {
|
||||
list.AppendEllipsis(diffStats{})
|
||||
break
|
||||
}
|
||||
s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs)
|
||||
list = append(list, textRecord{Value: s})
|
||||
}
|
||||
|
||||
out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
if t.Kind() == reflect.Slice && opts.PrintAddresses {
|
||||
header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap())
|
||||
out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out}
|
||||
}
|
||||
return out
|
||||
case reflect.Map:
|
||||
if v.IsNil() {
|
||||
return textNil
|
||||
}
|
||||
|
||||
// Check pointer for cycles.
|
||||
ptrRef, visited := ptrs.Push(v)
|
||||
if visited {
|
||||
return makeLeafReference(ptrRef, opts.PrintAddresses)
|
||||
}
|
||||
defer ptrs.Pop()
|
||||
|
||||
maxLen := v.Len()
|
||||
if opts.LimitVerbosity {
|
||||
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
|
||||
opts.VerbosityLevel--
|
||||
}
|
||||
var list textList
|
||||
for _, k := range value.SortKeys(v.MapKeys()) {
|
||||
if len(list) == maxLen {
|
||||
list.AppendEllipsis(diffStats{})
|
||||
break
|
||||
}
|
||||
sk := formatMapKey(k, false, ptrs)
|
||||
sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs)
|
||||
list = append(list, textRecord{Key: sk, Value: sv})
|
||||
}
|
||||
|
||||
out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
|
||||
return out
|
||||
case reflect.Ptr:
|
||||
if v.IsNil() {
|
||||
return textNil
|
||||
}
|
||||
|
||||
// Check pointer for cycles.
|
||||
ptrRef, visited := ptrs.Push(v)
|
||||
if visited {
|
||||
out = makeLeafReference(ptrRef, opts.PrintAddresses)
|
||||
return &textWrap{Prefix: "&", Value: out}
|
||||
}
|
||||
defer ptrs.Pop()
|
||||
|
||||
skipType = true // Let the underlying value print the type instead
|
||||
out = opts.FormatValue(v.Elem(), t.Kind(), ptrs)
|
||||
out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
|
||||
out = &textWrap{Prefix: "&", Value: out}
|
||||
return out
|
||||
case reflect.Interface:
|
||||
if v.IsNil() {
|
||||
return textNil
|
||||
}
|
||||
// Interfaces accept different concrete types,
|
||||
// so configure the underlying value to explicitly print the type.
|
||||
skipType = true // Print the concrete type instead
|
||||
return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs)
|
||||
default:
|
||||
panic(fmt.Sprintf("%v kind not handled", v.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
func (opts formatOptions) formatString(prefix, s string) textNode {
|
||||
maxLen := len(s)
|
||||
maxLines := strings.Count(s, "\n") + 1
|
||||
if opts.LimitVerbosity {
|
||||
maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc...
|
||||
maxLines = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
|
||||
}
|
||||
|
||||
// For multiline strings, use the triple-quote syntax,
|
||||
// but only use it when printing removed or inserted nodes since
|
||||
// we only want the extra verbosity for those cases.
|
||||
lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n")
|
||||
isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+')
|
||||
for i := 0; i < len(lines) && isTripleQuoted; i++ {
|
||||
lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
|
||||
isPrintable := func(r rune) bool {
|
||||
return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
|
||||
}
|
||||
line := lines[i]
|
||||
isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen
|
||||
}
|
||||
if isTripleQuoted {
|
||||
var list textList
|
||||
list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
|
||||
for i, line := range lines {
|
||||
if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 {
|
||||
comment := commentString(fmt.Sprintf("%d elided lines", numElided))
|
||||
list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment})
|
||||
break
|
||||
}
|
||||
list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true})
|
||||
}
|
||||
list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
|
||||
return &textWrap{Prefix: "(", Value: list, Suffix: ")"}
|
||||
}
|
||||
|
||||
// Format the string as a single-line quoted string.
|
||||
if len(s) > maxLen+len(textEllipsis) {
|
||||
return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis))
|
||||
}
|
||||
return textLine(prefix + formatString(s))
|
||||
}
|
||||
|
||||
// formatMapKey formats v as if it were a map key.
|
||||
// The result is guaranteed to be a single line.
|
||||
func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string {
|
||||
var opts formatOptions
|
||||
opts.DiffMode = diffIdentical
|
||||
opts.TypeMode = elideType
|
||||
opts.PrintAddresses = disambiguate
|
||||
opts.AvoidStringer = disambiguate
|
||||
opts.QualifiedNames = disambiguate
|
||||
opts.VerbosityLevel = maxVerbosityPreset
|
||||
opts.LimitVerbosity = true
|
||||
s := opts.FormatValue(v, reflect.Map, ptrs).String()
|
||||
return strings.TrimSpace(s)
|
||||
}
|
||||
|
||||
// formatString prints s as a double-quoted or backtick-quoted string.
|
||||
func formatString(s string) string {
|
||||
// Use quoted string if it the same length as a raw string literal.
|
||||
// Otherwise, attempt to use the raw string form.
|
||||
qs := strconv.Quote(s)
|
||||
if len(qs) == 1+len(s)+1 {
|
||||
return qs
|
||||
}
|
||||
|
||||
// Disallow newlines to ensure output is a single line.
|
||||
// Only allow printable runes for readability purposes.
|
||||
rawInvalid := func(r rune) bool {
|
||||
return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t')
|
||||
}
|
||||
if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 {
|
||||
return "`" + s + "`"
|
||||
}
|
||||
return qs
|
||||
}
|
||||
|
||||
// formatHex prints u as a hexadecimal integer in Go notation.
|
||||
func formatHex(u uint64) string {
|
||||
var f string
|
||||
switch {
|
||||
case u <= 0xff:
|
||||
f = "0x%02x"
|
||||
case u <= 0xffff:
|
||||
f = "0x%04x"
|
||||
case u <= 0xffffff:
|
||||
f = "0x%06x"
|
||||
case u <= 0xffffffff:
|
||||
f = "0x%08x"
|
||||
case u <= 0xffffffffff:
|
||||
f = "0x%010x"
|
||||
case u <= 0xffffffffffff:
|
||||
f = "0x%012x"
|
||||
case u <= 0xffffffffffffff:
|
||||
f = "0x%014x"
|
||||
case u <= 0xffffffffffffffff:
|
||||
f = "0x%016x"
|
||||
}
|
||||
return fmt.Sprintf(f, u)
|
||||
}
|
613
vendor/github.com/google/go-cmp/cmp/report_slices.go
generated
vendored
Normal file
613
vendor/github.com/google/go-cmp/cmp/report_slices.go
generated
vendored
Normal file
|
@ -0,0 +1,613 @@
|
|||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/diff"
|
||||
)
|
||||
|
||||
// CanFormatDiffSlice reports whether we support custom formatting for nodes
|
||||
// that are slices of primitive kinds or strings.
|
||||
func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool {
|
||||
switch {
|
||||
case opts.DiffMode != diffUnknown:
|
||||
return false // Must be formatting in diff mode
|
||||
case v.NumDiff == 0:
|
||||
return false // No differences detected
|
||||
case !v.ValueX.IsValid() || !v.ValueY.IsValid():
|
||||
return false // Both values must be valid
|
||||
case v.NumIgnored > 0:
|
||||
return false // Some ignore option was used
|
||||
case v.NumTransformed > 0:
|
||||
return false // Some transform option was used
|
||||
case v.NumCompared > 1:
|
||||
return false // More than one comparison was used
|
||||
case v.NumCompared == 1 && v.Type.Name() != "":
|
||||
// The need for cmp to check applicability of options on every element
|
||||
// in a slice is a significant performance detriment for large []byte.
|
||||
// The workaround is to specify Comparer(bytes.Equal),
|
||||
// which enables cmp to compare []byte more efficiently.
|
||||
// If they differ, we still want to provide batched diffing.
|
||||
// The logic disallows named types since they tend to have their own
|
||||
// String method, with nicer formatting than what this provides.
|
||||
return false
|
||||
}
|
||||
|
||||
// Check whether this is an interface with the same concrete types.
|
||||
t := v.Type
|
||||
vx, vy := v.ValueX, v.ValueY
|
||||
if t.Kind() == reflect.Interface && !vx.IsNil() && !vy.IsNil() && vx.Elem().Type() == vy.Elem().Type() {
|
||||
vx, vy = vx.Elem(), vy.Elem()
|
||||
t = vx.Type()
|
||||
}
|
||||
|
||||
// Check whether we provide specialized diffing for this type.
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
case reflect.Array, reflect.Slice:
|
||||
// Only slices of primitive types have specialized handling.
|
||||
switch t.Elem().Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
|
||||
reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
// Both slice values have to be non-empty.
|
||||
if t.Kind() == reflect.Slice && (vx.Len() == 0 || vy.Len() == 0) {
|
||||
return false
|
||||
}
|
||||
|
||||
// If a sufficient number of elements already differ,
|
||||
// use specialized formatting even if length requirement is not met.
|
||||
if v.NumDiff > v.NumSame {
|
||||
return true
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
// Use specialized string diffing for longer slices or strings.
|
||||
const minLength = 64
|
||||
return vx.Len() >= minLength && vy.Len() >= minLength
|
||||
}
|
||||
|
||||
// FormatDiffSlice prints a diff for the slices (or strings) represented by v.
|
||||
// This provides custom-tailored logic to make printing of differences in
|
||||
// textual strings and slices of primitive kinds more readable.
|
||||
func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||
assert(opts.DiffMode == diffUnknown)
|
||||
t, vx, vy := v.Type, v.ValueX, v.ValueY
|
||||
if t.Kind() == reflect.Interface {
|
||||
vx, vy = vx.Elem(), vy.Elem()
|
||||
t = vx.Type()
|
||||
opts = opts.WithTypeMode(emitType)
|
||||
}
|
||||
|
||||
// Auto-detect the type of the data.
|
||||
var sx, sy string
|
||||
var ssx, ssy []string
|
||||
var isString, isMostlyText, isPureLinedText, isBinary bool
|
||||
switch {
|
||||
case t.Kind() == reflect.String:
|
||||
sx, sy = vx.String(), vy.String()
|
||||
isString = true
|
||||
case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)):
|
||||
sx, sy = string(vx.Bytes()), string(vy.Bytes())
|
||||
isString = true
|
||||
case t.Kind() == reflect.Array:
|
||||
// Arrays need to be addressable for slice operations to work.
|
||||
vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem()
|
||||
vx2.Set(vx)
|
||||
vy2.Set(vy)
|
||||
vx, vy = vx2, vy2
|
||||
}
|
||||
if isString {
|
||||
var numTotalRunes, numValidRunes, numLines, lastLineIdx, maxLineLen int
|
||||
for i, r := range sx + sy {
|
||||
numTotalRunes++
|
||||
if (unicode.IsPrint(r) || unicode.IsSpace(r)) && r != utf8.RuneError {
|
||||
numValidRunes++
|
||||
}
|
||||
if r == '\n' {
|
||||
if maxLineLen < i-lastLineIdx {
|
||||
maxLineLen = i - lastLineIdx
|
||||
}
|
||||
lastLineIdx = i + 1
|
||||
numLines++
|
||||
}
|
||||
}
|
||||
isPureText := numValidRunes == numTotalRunes
|
||||
isMostlyText = float64(numValidRunes) > math.Floor(0.90*float64(numTotalRunes))
|
||||
isPureLinedText = isPureText && numLines >= 4 && maxLineLen <= 1024
|
||||
isBinary = !isMostlyText
|
||||
|
||||
// Avoid diffing by lines if it produces a significantly more complex
|
||||
// edit script than diffing by bytes.
|
||||
if isPureLinedText {
|
||||
ssx = strings.Split(sx, "\n")
|
||||
ssy = strings.Split(sy, "\n")
|
||||
esLines := diff.Difference(len(ssx), len(ssy), func(ix, iy int) diff.Result {
|
||||
return diff.BoolResult(ssx[ix] == ssy[iy])
|
||||
})
|
||||
esBytes := diff.Difference(len(sx), len(sy), func(ix, iy int) diff.Result {
|
||||
return diff.BoolResult(sx[ix] == sy[iy])
|
||||
})
|
||||
efficiencyLines := float64(esLines.Dist()) / float64(len(esLines))
|
||||
efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes))
|
||||
isPureLinedText = efficiencyLines < 4*efficiencyBytes
|
||||
}
|
||||
}
|
||||
|
||||
// Format the string into printable records.
|
||||
var list textList
|
||||
var delim string
|
||||
switch {
|
||||
// If the text appears to be multi-lined text,
|
||||
// then perform differencing across individual lines.
|
||||
case isPureLinedText:
|
||||
list = opts.formatDiffSlice(
|
||||
reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line",
|
||||
func(v reflect.Value, d diffMode) textRecord {
|
||||
s := formatString(v.Index(0).String())
|
||||
return textRecord{Diff: d, Value: textLine(s)}
|
||||
},
|
||||
)
|
||||
delim = "\n"
|
||||
|
||||
// If possible, use a custom triple-quote (""") syntax for printing
|
||||
// differences in a string literal. This format is more readable,
|
||||
// but has edge-cases where differences are visually indistinguishable.
|
||||
// This format is avoided under the following conditions:
|
||||
// • A line starts with `"""`
|
||||
// • A line starts with "..."
|
||||
// • A line contains non-printable characters
|
||||
// • Adjacent different lines differ only by whitespace
|
||||
//
|
||||
// For example:
|
||||
// """
|
||||
// ... // 3 identical lines
|
||||
// foo
|
||||
// bar
|
||||
// - baz
|
||||
// + BAZ
|
||||
// """
|
||||
isTripleQuoted := true
|
||||
prevRemoveLines := map[string]bool{}
|
||||
prevInsertLines := map[string]bool{}
|
||||
var list2 textList
|
||||
list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
|
||||
for _, r := range list {
|
||||
if !r.Value.Equal(textEllipsis) {
|
||||
line, _ := strconv.Unquote(string(r.Value.(textLine)))
|
||||
line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
|
||||
normLine := strings.Map(func(r rune) rune {
|
||||
if unicode.IsSpace(r) {
|
||||
return -1 // drop whitespace to avoid visually indistinguishable output
|
||||
}
|
||||
return r
|
||||
}, line)
|
||||
isPrintable := func(r rune) bool {
|
||||
return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
|
||||
}
|
||||
isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == ""
|
||||
switch r.Diff {
|
||||
case diffRemoved:
|
||||
isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine]
|
||||
prevRemoveLines[normLine] = true
|
||||
case diffInserted:
|
||||
isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine]
|
||||
prevInsertLines[normLine] = true
|
||||
}
|
||||
if !isTripleQuoted {
|
||||
break
|
||||
}
|
||||
r.Value = textLine(line)
|
||||
r.ElideComma = true
|
||||
}
|
||||
if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group
|
||||
prevRemoveLines = map[string]bool{}
|
||||
prevInsertLines = map[string]bool{}
|
||||
}
|
||||
list2 = append(list2, r)
|
||||
}
|
||||
if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 {
|
||||
list2 = list2[:len(list2)-1] // elide single empty line at the end
|
||||
}
|
||||
list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
|
||||
if isTripleQuoted {
|
||||
var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"}
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
if t != reflect.TypeOf(string("")) {
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
case reflect.Slice:
|
||||
// Always emit type for slices since the triple-quote syntax
|
||||
// looks like a string (not a slice).
|
||||
opts = opts.WithTypeMode(emitType)
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// If the text appears to be single-lined text,
|
||||
// then perform differencing in approximately fixed-sized chunks.
|
||||
// The output is printed as quoted strings.
|
||||
case isMostlyText:
|
||||
list = opts.formatDiffSlice(
|
||||
reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte",
|
||||
func(v reflect.Value, d diffMode) textRecord {
|
||||
s := formatString(v.String())
|
||||
return textRecord{Diff: d, Value: textLine(s)}
|
||||
},
|
||||
)
|
||||
|
||||
// If the text appears to be binary data,
|
||||
// then perform differencing in approximately fixed-sized chunks.
|
||||
// The output is inspired by hexdump.
|
||||
case isBinary:
|
||||
list = opts.formatDiffSlice(
|
||||
reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte",
|
||||
func(v reflect.Value, d diffMode) textRecord {
|
||||
var ss []string
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
ss = append(ss, formatHex(v.Index(i).Uint()))
|
||||
}
|
||||
s := strings.Join(ss, ", ")
|
||||
comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String())))
|
||||
return textRecord{Diff: d, Value: textLine(s), Comment: comment}
|
||||
},
|
||||
)
|
||||
|
||||
// For all other slices of primitive types,
|
||||
// then perform differencing in approximately fixed-sized chunks.
|
||||
// The size of each chunk depends on the width of the element kind.
|
||||
default:
|
||||
var chunkSize int
|
||||
if t.Elem().Kind() == reflect.Bool {
|
||||
chunkSize = 16
|
||||
} else {
|
||||
switch t.Elem().Bits() {
|
||||
case 8:
|
||||
chunkSize = 16
|
||||
case 16:
|
||||
chunkSize = 12
|
||||
case 32:
|
||||
chunkSize = 8
|
||||
default:
|
||||
chunkSize = 8
|
||||
}
|
||||
}
|
||||
list = opts.formatDiffSlice(
|
||||
vx, vy, chunkSize, t.Elem().Kind().String(),
|
||||
func(v reflect.Value, d diffMode) textRecord {
|
||||
var ss []string
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
switch t.Elem().Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
ss = append(ss, fmt.Sprint(v.Index(i).Int()))
|
||||
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
ss = append(ss, fmt.Sprint(v.Index(i).Uint()))
|
||||
case reflect.Uint8, reflect.Uintptr:
|
||||
ss = append(ss, formatHex(v.Index(i).Uint()))
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
|
||||
ss = append(ss, fmt.Sprint(v.Index(i).Interface()))
|
||||
}
|
||||
}
|
||||
s := strings.Join(ss, ", ")
|
||||
return textRecord{Diff: d, Value: textLine(s)}
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Wrap the output with appropriate type information.
|
||||
var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
if !isMostlyText {
|
||||
// The "{...}" byte-sequence literal is not valid Go syntax for strings.
|
||||
// Emit the type for extra clarity (e.g. "string{...}").
|
||||
if t.Kind() == reflect.String {
|
||||
opts = opts.WithTypeMode(emitType)
|
||||
}
|
||||
return opts.FormatType(t, out)
|
||||
}
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
|
||||
if t != reflect.TypeOf(string("")) {
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
case reflect.Slice:
|
||||
out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
|
||||
if t != reflect.TypeOf([]byte(nil)) {
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// formatASCII formats s as an ASCII string.
|
||||
// This is useful for printing binary strings in a semi-legible way.
|
||||
func formatASCII(s string) string {
|
||||
b := bytes.Repeat([]byte{'.'}, len(s))
|
||||
for i := 0; i < len(s); i++ {
|
||||
if ' ' <= s[i] && s[i] <= '~' {
|
||||
b[i] = s[i]
|
||||
}
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (opts formatOptions) formatDiffSlice(
|
||||
vx, vy reflect.Value, chunkSize int, name string,
|
||||
makeRec func(reflect.Value, diffMode) textRecord,
|
||||
) (list textList) {
|
||||
eq := func(ix, iy int) bool {
|
||||
return vx.Index(ix).Interface() == vy.Index(iy).Interface()
|
||||
}
|
||||
es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result {
|
||||
return diff.BoolResult(eq(ix, iy))
|
||||
})
|
||||
|
||||
appendChunks := func(v reflect.Value, d diffMode) int {
|
||||
n0 := v.Len()
|
||||
for v.Len() > 0 {
|
||||
n := chunkSize
|
||||
if n > v.Len() {
|
||||
n = v.Len()
|
||||
}
|
||||
list = append(list, makeRec(v.Slice(0, n), d))
|
||||
v = v.Slice(n, v.Len())
|
||||
}
|
||||
return n0 - v.Len()
|
||||
}
|
||||
|
||||
var numDiffs int
|
||||
maxLen := -1
|
||||
if opts.LimitVerbosity {
|
||||
maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
|
||||
opts.VerbosityLevel--
|
||||
}
|
||||
|
||||
groups := coalesceAdjacentEdits(name, es)
|
||||
groups = coalesceInterveningIdentical(groups, chunkSize/4)
|
||||
groups = cleanupSurroundingIdentical(groups, eq)
|
||||
maxGroup := diffStats{Name: name}
|
||||
for i, ds := range groups {
|
||||
if maxLen >= 0 && numDiffs >= maxLen {
|
||||
maxGroup = maxGroup.Append(ds)
|
||||
continue
|
||||
}
|
||||
|
||||
// Print equal.
|
||||
if ds.NumDiff() == 0 {
|
||||
// Compute the number of leading and trailing equal bytes to print.
|
||||
var numLo, numHi int
|
||||
numEqual := ds.NumIgnored + ds.NumIdentical
|
||||
for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 {
|
||||
numLo++
|
||||
}
|
||||
for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
|
||||
numHi++
|
||||
}
|
||||
if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 {
|
||||
numHi = numEqual - numLo // Avoid pointless coalescing of single equal row
|
||||
}
|
||||
|
||||
// Print the equal bytes.
|
||||
appendChunks(vx.Slice(0, numLo), diffIdentical)
|
||||
if numEqual > numLo+numHi {
|
||||
ds.NumIdentical -= numLo + numHi
|
||||
list.AppendEllipsis(ds)
|
||||
}
|
||||
appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical)
|
||||
vx = vx.Slice(numEqual, vx.Len())
|
||||
vy = vy.Slice(numEqual, vy.Len())
|
||||
continue
|
||||
}
|
||||
|
||||
// Print unequal.
|
||||
len0 := len(list)
|
||||
nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved)
|
||||
vx = vx.Slice(nx, vx.Len())
|
||||
ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted)
|
||||
vy = vy.Slice(ny, vy.Len())
|
||||
numDiffs += len(list) - len0
|
||||
}
|
||||
if maxGroup.IsZero() {
|
||||
assert(vx.Len() == 0 && vy.Len() == 0)
|
||||
} else {
|
||||
list.AppendEllipsis(maxGroup)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent
|
||||
// equal or unequal counts.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// Input: "..XXY...Y"
|
||||
// Output: [
|
||||
// {NumIdentical: 2},
|
||||
// {NumRemoved: 2, NumInserted 1},
|
||||
// {NumIdentical: 3},
|
||||
// {NumInserted: 1},
|
||||
// ]
|
||||
//
|
||||
func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) {
|
||||
var prevMode byte
|
||||
lastStats := func(mode byte) *diffStats {
|
||||
if prevMode != mode {
|
||||
groups = append(groups, diffStats{Name: name})
|
||||
prevMode = mode
|
||||
}
|
||||
return &groups[len(groups)-1]
|
||||
}
|
||||
for _, e := range es {
|
||||
switch e {
|
||||
case diff.Identity:
|
||||
lastStats('=').NumIdentical++
|
||||
case diff.UniqueX:
|
||||
lastStats('!').NumRemoved++
|
||||
case diff.UniqueY:
|
||||
lastStats('!').NumInserted++
|
||||
case diff.Modified:
|
||||
lastStats('!').NumModified++
|
||||
}
|
||||
}
|
||||
return groups
|
||||
}
|
||||
|
||||
// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize)
|
||||
// equal groups into adjacent unequal groups that currently result in a
|
||||
// dual inserted/removed printout. This acts as a high-pass filter to smooth
|
||||
// out high-frequency changes within the windowSize.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// WindowSize: 16,
|
||||
// Input: [
|
||||
// {NumIdentical: 61}, // group 0
|
||||
// {NumRemoved: 3, NumInserted: 1}, // group 1
|
||||
// {NumIdentical: 6}, // ├── coalesce
|
||||
// {NumInserted: 2}, // ├── coalesce
|
||||
// {NumIdentical: 1}, // ├── coalesce
|
||||
// {NumRemoved: 9}, // └── coalesce
|
||||
// {NumIdentical: 64}, // group 2
|
||||
// {NumRemoved: 3, NumInserted: 1}, // group 3
|
||||
// {NumIdentical: 6}, // ├── coalesce
|
||||
// {NumInserted: 2}, // ├── coalesce
|
||||
// {NumIdentical: 1}, // ├── coalesce
|
||||
// {NumRemoved: 7}, // ├── coalesce
|
||||
// {NumIdentical: 1}, // ├── coalesce
|
||||
// {NumRemoved: 2}, // └── coalesce
|
||||
// {NumIdentical: 63}, // group 4
|
||||
// ]
|
||||
// Output: [
|
||||
// {NumIdentical: 61},
|
||||
// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3},
|
||||
// {NumIdentical: 64},
|
||||
// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3},
|
||||
// {NumIdentical: 63},
|
||||
// ]
|
||||
//
|
||||
func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats {
|
||||
groups, groupsOrig := groups[:0], groups
|
||||
for i, ds := range groupsOrig {
|
||||
if len(groups) >= 2 && ds.NumDiff() > 0 {
|
||||
prev := &groups[len(groups)-2] // Unequal group
|
||||
curr := &groups[len(groups)-1] // Equal group
|
||||
next := &groupsOrig[i] // Unequal group
|
||||
hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0
|
||||
hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0
|
||||
if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize {
|
||||
*prev = prev.Append(*curr).Append(*next)
|
||||
groups = groups[:len(groups)-1] // Truncate off equal group
|
||||
continue
|
||||
}
|
||||
}
|
||||
groups = append(groups, ds)
|
||||
}
|
||||
return groups
|
||||
}
|
||||
|
||||
// cleanupSurroundingIdentical scans through all unequal groups, and
|
||||
// moves any leading sequence of equal elements to the preceding equal group and
|
||||
// moves and trailing sequence of equal elements to the succeeding equal group.
|
||||
//
|
||||
// This is necessary since coalesceInterveningIdentical may coalesce edit groups
|
||||
// together such that leading/trailing spans of equal elements becomes possible.
|
||||
// Note that this can occur even with an optimal diffing algorithm.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// Input: [
|
||||
// {NumIdentical: 61},
|
||||
// {NumIdentical: 1 , NumRemoved: 11, NumInserted: 2}, // assume 3 leading identical elements
|
||||
// {NumIdentical: 67},
|
||||
// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, // assume 10 trailing identical elements
|
||||
// {NumIdentical: 54},
|
||||
// ]
|
||||
// Output: [
|
||||
// {NumIdentical: 64}, // incremented by 3
|
||||
// {NumRemoved: 9},
|
||||
// {NumIdentical: 67},
|
||||
// {NumRemoved: 9},
|
||||
// {NumIdentical: 64}, // incremented by 10
|
||||
// ]
|
||||
//
|
||||
func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats {
|
||||
var ix, iy int // indexes into sequence x and y
|
||||
for i, ds := range groups {
|
||||
// Handle equal group.
|
||||
if ds.NumDiff() == 0 {
|
||||
ix += ds.NumIdentical
|
||||
iy += ds.NumIdentical
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle unequal group.
|
||||
nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified
|
||||
ny := ds.NumIdentical + ds.NumInserted + ds.NumModified
|
||||
var numLeadingIdentical, numTrailingIdentical int
|
||||
for i := 0; i < nx && i < ny && eq(ix+i, iy+i); i++ {
|
||||
numLeadingIdentical++
|
||||
}
|
||||
for i := 0; i < nx && i < ny && eq(ix+nx-1-i, iy+ny-1-i); i++ {
|
||||
numTrailingIdentical++
|
||||
}
|
||||
if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 {
|
||||
if numLeadingIdentical > 0 {
|
||||
// Remove leading identical span from this group and
|
||||
// insert it into the preceding group.
|
||||
if i-1 >= 0 {
|
||||
groups[i-1].NumIdentical += numLeadingIdentical
|
||||
} else {
|
||||
// No preceding group exists, so prepend a new group,
|
||||
// but do so after we finish iterating over all groups.
|
||||
defer func() {
|
||||
groups = append([]diffStats{{Name: groups[0].Name, NumIdentical: numLeadingIdentical}}, groups...)
|
||||
}()
|
||||
}
|
||||
// Increment indexes since the preceding group would have handled this.
|
||||
ix += numLeadingIdentical
|
||||
iy += numLeadingIdentical
|
||||
}
|
||||
if numTrailingIdentical > 0 {
|
||||
// Remove trailing identical span from this group and
|
||||
// insert it into the succeeding group.
|
||||
if i+1 < len(groups) {
|
||||
groups[i+1].NumIdentical += numTrailingIdentical
|
||||
} else {
|
||||
// No succeeding group exists, so append a new group,
|
||||
// but do so after we finish iterating over all groups.
|
||||
defer func() {
|
||||
groups = append(groups, diffStats{Name: groups[len(groups)-1].Name, NumIdentical: numTrailingIdentical})
|
||||
}()
|
||||
}
|
||||
// Do not increment indexes since the succeeding group will handle this.
|
||||
}
|
||||
|
||||
// Update this group since some identical elements were removed.
|
||||
nx -= numIdentical
|
||||
ny -= numIdentical
|
||||
groups[i] = diffStats{Name: ds.Name, NumRemoved: nx, NumInserted: ny}
|
||||
}
|
||||
ix += nx
|
||||
iy += ny
|
||||
}
|
||||
return groups
|
||||
}
|
431
vendor/github.com/google/go-cmp/cmp/report_text.go
generated
vendored
Normal file
431
vendor/github.com/google/go-cmp/cmp/report_text.go
generated
vendored
Normal file
|
@ -0,0 +1,431 @@
|
|||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/flags"
|
||||
)
|
||||
|
||||
var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
|
||||
|
||||
const maxColumnLength = 80
|
||||
|
||||
type indentMode int
|
||||
|
||||
func (n indentMode) appendIndent(b []byte, d diffMode) []byte {
|
||||
// The output of Diff is documented as being unstable to provide future
|
||||
// flexibility in changing the output for more humanly readable reports.
|
||||
// This logic intentionally introduces instability to the exact output
|
||||
// so that users can detect accidental reliance on stability early on,
|
||||
// rather than much later when an actual change to the format occurs.
|
||||
if flags.Deterministic || randBool {
|
||||
// Use regular spaces (U+0020).
|
||||
switch d {
|
||||
case diffUnknown, diffIdentical:
|
||||
b = append(b, " "...)
|
||||
case diffRemoved:
|
||||
b = append(b, "- "...)
|
||||
case diffInserted:
|
||||
b = append(b, "+ "...)
|
||||
}
|
||||
} else {
|
||||
// Use non-breaking spaces (U+00a0).
|
||||
switch d {
|
||||
case diffUnknown, diffIdentical:
|
||||
b = append(b, " "...)
|
||||
case diffRemoved:
|
||||
b = append(b, "- "...)
|
||||
case diffInserted:
|
||||
b = append(b, "+ "...)
|
||||
}
|
||||
}
|
||||
return repeatCount(n).appendChar(b, '\t')
|
||||
}
|
||||
|
||||
type repeatCount int
|
||||
|
||||
func (n repeatCount) appendChar(b []byte, c byte) []byte {
|
||||
for ; n > 0; n-- {
|
||||
b = append(b, c)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// textNode is a simplified tree-based representation of structured text.
|
||||
// Possible node types are textWrap, textList, or textLine.
|
||||
type textNode interface {
|
||||
// Len reports the length in bytes of a single-line version of the tree.
|
||||
// Nested textRecord.Diff and textRecord.Comment fields are ignored.
|
||||
Len() int
|
||||
// Equal reports whether the two trees are structurally identical.
|
||||
// Nested textRecord.Diff and textRecord.Comment fields are compared.
|
||||
Equal(textNode) bool
|
||||
// String returns the string representation of the text tree.
|
||||
// It is not guaranteed that len(x.String()) == x.Len(),
|
||||
// nor that x.String() == y.String() implies that x.Equal(y).
|
||||
String() string
|
||||
|
||||
// formatCompactTo formats the contents of the tree as a single-line string
|
||||
// to the provided buffer. Any nested textRecord.Diff and textRecord.Comment
|
||||
// fields are ignored.
|
||||
//
|
||||
// However, not all nodes in the tree should be collapsed as a single-line.
|
||||
// If a node can be collapsed as a single-line, it is replaced by a textLine
|
||||
// node. Since the top-level node cannot replace itself, this also returns
|
||||
// the current node itself.
|
||||
//
|
||||
// This does not mutate the receiver.
|
||||
formatCompactTo([]byte, diffMode) ([]byte, textNode)
|
||||
// formatExpandedTo formats the contents of the tree as a multi-line string
|
||||
// to the provided buffer. In order for column alignment to operate well,
|
||||
// formatCompactTo must be called before calling formatExpandedTo.
|
||||
formatExpandedTo([]byte, diffMode, indentMode) []byte
|
||||
}
|
||||
|
||||
// textWrap is a wrapper that concatenates a prefix and/or a suffix
|
||||
// to the underlying node.
|
||||
type textWrap struct {
|
||||
Prefix string // e.g., "bytes.Buffer{"
|
||||
Value textNode // textWrap | textList | textLine
|
||||
Suffix string // e.g., "}"
|
||||
Metadata interface{} // arbitrary metadata; has no effect on formatting
|
||||
}
|
||||
|
||||
func (s *textWrap) Len() int {
|
||||
return len(s.Prefix) + s.Value.Len() + len(s.Suffix)
|
||||
}
|
||||
func (s1 *textWrap) Equal(s2 textNode) bool {
|
||||
if s2, ok := s2.(*textWrap); ok {
|
||||
return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix
|
||||
}
|
||||
return false
|
||||
}
|
||||
func (s *textWrap) String() string {
|
||||
var d diffMode
|
||||
var n indentMode
|
||||
_, s2 := s.formatCompactTo(nil, d)
|
||||
b := n.appendIndent(nil, d) // Leading indent
|
||||
b = s2.formatExpandedTo(b, d, n) // Main body
|
||||
b = append(b, '\n') // Trailing newline
|
||||
return string(b)
|
||||
}
|
||||
func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
|
||||
n0 := len(b) // Original buffer length
|
||||
b = append(b, s.Prefix...)
|
||||
b, s.Value = s.Value.formatCompactTo(b, d)
|
||||
b = append(b, s.Suffix...)
|
||||
if _, ok := s.Value.(textLine); ok {
|
||||
return b, textLine(b[n0:])
|
||||
}
|
||||
return b, s
|
||||
}
|
||||
func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
|
||||
b = append(b, s.Prefix...)
|
||||
b = s.Value.formatExpandedTo(b, d, n)
|
||||
b = append(b, s.Suffix...)
|
||||
return b
|
||||
}
|
||||
|
||||
// textList is a comma-separated list of textWrap or textLine nodes.
|
||||
// The list may be formatted as multi-lines or single-line at the discretion
|
||||
// of the textList.formatCompactTo method.
|
||||
type textList []textRecord
|
||||
type textRecord struct {
|
||||
Diff diffMode // e.g., 0 or '-' or '+'
|
||||
Key string // e.g., "MyField"
|
||||
Value textNode // textWrap | textLine
|
||||
ElideComma bool // avoid trailing comma
|
||||
Comment fmt.Stringer // e.g., "6 identical fields"
|
||||
}
|
||||
|
||||
// AppendEllipsis appends a new ellipsis node to the list if none already
|
||||
// exists at the end. If cs is non-zero it coalesces the statistics with the
|
||||
// previous diffStats.
|
||||
func (s *textList) AppendEllipsis(ds diffStats) {
|
||||
hasStats := !ds.IsZero()
|
||||
if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) {
|
||||
if hasStats {
|
||||
*s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds})
|
||||
} else {
|
||||
*s = append(*s, textRecord{Value: textEllipsis, ElideComma: true})
|
||||
}
|
||||
return
|
||||
}
|
||||
if hasStats {
|
||||
(*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds)
|
||||
}
|
||||
}
|
||||
|
||||
func (s textList) Len() (n int) {
|
||||
for i, r := range s {
|
||||
n += len(r.Key)
|
||||
if r.Key != "" {
|
||||
n += len(": ")
|
||||
}
|
||||
n += r.Value.Len()
|
||||
if i < len(s)-1 {
|
||||
n += len(", ")
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (s1 textList) Equal(s2 textNode) bool {
|
||||
if s2, ok := s2.(textList); ok {
|
||||
if len(s1) != len(s2) {
|
||||
return false
|
||||
}
|
||||
for i := range s1 {
|
||||
r1, r2 := s1[i], s2[i]
|
||||
if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s textList) String() string {
|
||||
return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String()
|
||||
}
|
||||
|
||||
func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
|
||||
s = append(textList(nil), s...) // Avoid mutating original
|
||||
|
||||
// Determine whether we can collapse this list as a single line.
|
||||
n0 := len(b) // Original buffer length
|
||||
var multiLine bool
|
||||
for i, r := range s {
|
||||
if r.Diff == diffInserted || r.Diff == diffRemoved {
|
||||
multiLine = true
|
||||
}
|
||||
b = append(b, r.Key...)
|
||||
if r.Key != "" {
|
||||
b = append(b, ": "...)
|
||||
}
|
||||
b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff)
|
||||
if _, ok := s[i].Value.(textLine); !ok {
|
||||
multiLine = true
|
||||
}
|
||||
if r.Comment != nil {
|
||||
multiLine = true
|
||||
}
|
||||
if i < len(s)-1 {
|
||||
b = append(b, ", "...)
|
||||
}
|
||||
}
|
||||
// Force multi-lined output when printing a removed/inserted node that
|
||||
// is sufficiently long.
|
||||
if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength {
|
||||
multiLine = true
|
||||
}
|
||||
if !multiLine {
|
||||
return b, textLine(b[n0:])
|
||||
}
|
||||
return b, s
|
||||
}
|
||||
|
||||
func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
|
||||
alignKeyLens := s.alignLens(
|
||||
func(r textRecord) bool {
|
||||
_, isLine := r.Value.(textLine)
|
||||
return r.Key == "" || !isLine
|
||||
},
|
||||
func(r textRecord) int { return utf8.RuneCountInString(r.Key) },
|
||||
)
|
||||
alignValueLens := s.alignLens(
|
||||
func(r textRecord) bool {
|
||||
_, isLine := r.Value.(textLine)
|
||||
return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil
|
||||
},
|
||||
func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) },
|
||||
)
|
||||
|
||||
// Format lists of simple lists in a batched form.
|
||||
// If the list is sequence of only textLine values,
|
||||
// then batch multiple values on a single line.
|
||||
var isSimple bool
|
||||
for _, r := range s {
|
||||
_, isLine := r.Value.(textLine)
|
||||
isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil
|
||||
if !isSimple {
|
||||
break
|
||||
}
|
||||
}
|
||||
if isSimple {
|
||||
n++
|
||||
var batch []byte
|
||||
emitBatch := func() {
|
||||
if len(batch) > 0 {
|
||||
b = n.appendIndent(append(b, '\n'), d)
|
||||
b = append(b, bytes.TrimRight(batch, " ")...)
|
||||
batch = batch[:0]
|
||||
}
|
||||
}
|
||||
for _, r := range s {
|
||||
line := r.Value.(textLine)
|
||||
if len(batch)+len(line)+len(", ") > maxColumnLength {
|
||||
emitBatch()
|
||||
}
|
||||
batch = append(batch, line...)
|
||||
batch = append(batch, ", "...)
|
||||
}
|
||||
emitBatch()
|
||||
n--
|
||||
return n.appendIndent(append(b, '\n'), d)
|
||||
}
|
||||
|
||||
// Format the list as a multi-lined output.
|
||||
n++
|
||||
for i, r := range s {
|
||||
b = n.appendIndent(append(b, '\n'), d|r.Diff)
|
||||
if r.Key != "" {
|
||||
b = append(b, r.Key+": "...)
|
||||
}
|
||||
b = alignKeyLens[i].appendChar(b, ' ')
|
||||
|
||||
b = r.Value.formatExpandedTo(b, d|r.Diff, n)
|
||||
if !r.ElideComma {
|
||||
b = append(b, ',')
|
||||
}
|
||||
b = alignValueLens[i].appendChar(b, ' ')
|
||||
|
||||
if r.Comment != nil {
|
||||
b = append(b, " // "+r.Comment.String()...)
|
||||
}
|
||||
}
|
||||
n--
|
||||
|
||||
return n.appendIndent(append(b, '\n'), d)
|
||||
}
|
||||
|
||||
func (s textList) alignLens(
|
||||
skipFunc func(textRecord) bool,
|
||||
lenFunc func(textRecord) int,
|
||||
) []repeatCount {
|
||||
var startIdx, endIdx, maxLen int
|
||||
lens := make([]repeatCount, len(s))
|
||||
for i, r := range s {
|
||||
if skipFunc(r) {
|
||||
for j := startIdx; j < endIdx && j < len(s); j++ {
|
||||
lens[j] = repeatCount(maxLen - lenFunc(s[j]))
|
||||
}
|
||||
startIdx, endIdx, maxLen = i+1, i+1, 0
|
||||
} else {
|
||||
if maxLen < lenFunc(r) {
|
||||
maxLen = lenFunc(r)
|
||||
}
|
||||
endIdx = i + 1
|
||||
}
|
||||
}
|
||||
for j := startIdx; j < endIdx && j < len(s); j++ {
|
||||
lens[j] = repeatCount(maxLen - lenFunc(s[j]))
|
||||
}
|
||||
return lens
|
||||
}
|
||||
|
||||
// textLine is a single-line segment of text and is always a leaf node
|
||||
// in the textNode tree.
|
||||
type textLine []byte
|
||||
|
||||
var (
|
||||
textNil = textLine("nil")
|
||||
textEllipsis = textLine("...")
|
||||
)
|
||||
|
||||
func (s textLine) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
func (s1 textLine) Equal(s2 textNode) bool {
|
||||
if s2, ok := s2.(textLine); ok {
|
||||
return bytes.Equal([]byte(s1), []byte(s2))
|
||||
}
|
||||
return false
|
||||
}
|
||||
func (s textLine) String() string {
|
||||
return string(s)
|
||||
}
|
||||
func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
|
||||
return append(b, s...), s
|
||||
}
|
||||
func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte {
|
||||
return append(b, s...)
|
||||
}
|
||||
|
||||
type diffStats struct {
|
||||
Name string
|
||||
NumIgnored int
|
||||
NumIdentical int
|
||||
NumRemoved int
|
||||
NumInserted int
|
||||
NumModified int
|
||||
}
|
||||
|
||||
func (s diffStats) IsZero() bool {
|
||||
s.Name = ""
|
||||
return s == diffStats{}
|
||||
}
|
||||
|
||||
func (s diffStats) NumDiff() int {
|
||||
return s.NumRemoved + s.NumInserted + s.NumModified
|
||||
}
|
||||
|
||||
func (s diffStats) Append(ds diffStats) diffStats {
|
||||
assert(s.Name == ds.Name)
|
||||
s.NumIgnored += ds.NumIgnored
|
||||
s.NumIdentical += ds.NumIdentical
|
||||
s.NumRemoved += ds.NumRemoved
|
||||
s.NumInserted += ds.NumInserted
|
||||
s.NumModified += ds.NumModified
|
||||
return s
|
||||
}
|
||||
|
||||
// String prints a humanly-readable summary of coalesced records.
|
||||
//
|
||||
// Example:
|
||||
// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields"
|
||||
func (s diffStats) String() string {
|
||||
var ss []string
|
||||
var sum int
|
||||
labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"}
|
||||
counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified}
|
||||
for i, n := range counts {
|
||||
if n > 0 {
|
||||
ss = append(ss, fmt.Sprintf("%d %v", n, labels[i]))
|
||||
}
|
||||
sum += n
|
||||
}
|
||||
|
||||
// Pluralize the name (adjusting for some obscure English grammar rules).
|
||||
name := s.Name
|
||||
if sum > 1 {
|
||||
name += "s"
|
||||
if strings.HasSuffix(name, "ys") {
|
||||
name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries"
|
||||
}
|
||||
}
|
||||
|
||||
// Format the list according to English grammar (with Oxford comma).
|
||||
switch n := len(ss); n {
|
||||
case 0:
|
||||
return ""
|
||||
case 1, 2:
|
||||
return strings.Join(ss, " and ") + " " + name
|
||||
default:
|
||||
return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name
|
||||
}
|
||||
}
|
||||
|
||||
type commentString string
|
||||
|
||||
func (s commentString) String() string { return string(s) }
|
121
vendor/github.com/google/go-cmp/cmp/report_value.go
generated
vendored
Normal file
121
vendor/github.com/google/go-cmp/cmp/report_value.go
generated
vendored
Normal file
|
@ -0,0 +1,121 @@
|
|||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import "reflect"
|
||||
|
||||
// valueNode represents a single node within a report, which is a
|
||||
// structured representation of the value tree, containing information
|
||||
// regarding which nodes are equal or not.
|
||||
type valueNode struct {
|
||||
parent *valueNode
|
||||
|
||||
Type reflect.Type
|
||||
ValueX reflect.Value
|
||||
ValueY reflect.Value
|
||||
|
||||
// NumSame is the number of leaf nodes that are equal.
|
||||
// All descendants are equal only if NumDiff is 0.
|
||||
NumSame int
|
||||
// NumDiff is the number of leaf nodes that are not equal.
|
||||
NumDiff int
|
||||
// NumIgnored is the number of leaf nodes that are ignored.
|
||||
NumIgnored int
|
||||
// NumCompared is the number of leaf nodes that were compared
|
||||
// using an Equal method or Comparer function.
|
||||
NumCompared int
|
||||
// NumTransformed is the number of non-leaf nodes that were transformed.
|
||||
NumTransformed int
|
||||
// NumChildren is the number of transitive descendants of this node.
|
||||
// This counts from zero; thus, leaf nodes have no descendants.
|
||||
NumChildren int
|
||||
// MaxDepth is the maximum depth of the tree. This counts from zero;
|
||||
// thus, leaf nodes have a depth of zero.
|
||||
MaxDepth int
|
||||
|
||||
// Records is a list of struct fields, slice elements, or map entries.
|
||||
Records []reportRecord // If populated, implies Value is not populated
|
||||
|
||||
// Value is the result of a transformation, pointer indirect, of
|
||||
// type assertion.
|
||||
Value *valueNode // If populated, implies Records is not populated
|
||||
|
||||
// TransformerName is the name of the transformer.
|
||||
TransformerName string // If non-empty, implies Value is populated
|
||||
}
|
||||
type reportRecord struct {
|
||||
Key reflect.Value // Invalid for slice element
|
||||
Value *valueNode
|
||||
}
|
||||
|
||||
func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) {
|
||||
vx, vy := ps.Values()
|
||||
child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy}
|
||||
switch s := ps.(type) {
|
||||
case StructField:
|
||||
assert(parent.Value == nil)
|
||||
parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child})
|
||||
case SliceIndex:
|
||||
assert(parent.Value == nil)
|
||||
parent.Records = append(parent.Records, reportRecord{Value: child})
|
||||
case MapIndex:
|
||||
assert(parent.Value == nil)
|
||||
parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child})
|
||||
case Indirect:
|
||||
assert(parent.Value == nil && parent.Records == nil)
|
||||
parent.Value = child
|
||||
case TypeAssertion:
|
||||
assert(parent.Value == nil && parent.Records == nil)
|
||||
parent.Value = child
|
||||
case Transform:
|
||||
assert(parent.Value == nil && parent.Records == nil)
|
||||
parent.Value = child
|
||||
parent.TransformerName = s.Name()
|
||||
parent.NumTransformed++
|
||||
default:
|
||||
assert(parent == nil) // Must be the root step
|
||||
}
|
||||
return child
|
||||
}
|
||||
|
||||
func (r *valueNode) Report(rs Result) {
|
||||
assert(r.MaxDepth == 0) // May only be called on leaf nodes
|
||||
|
||||
if rs.ByIgnore() {
|
||||
r.NumIgnored++
|
||||
} else {
|
||||
if rs.Equal() {
|
||||
r.NumSame++
|
||||
} else {
|
||||
r.NumDiff++
|
||||
}
|
||||
}
|
||||
assert(r.NumSame+r.NumDiff+r.NumIgnored == 1)
|
||||
|
||||
if rs.ByMethod() {
|
||||
r.NumCompared++
|
||||
}
|
||||
if rs.ByFunc() {
|
||||
r.NumCompared++
|
||||
}
|
||||
assert(r.NumCompared <= 1)
|
||||
}
|
||||
|
||||
func (child *valueNode) PopStep() (parent *valueNode) {
|
||||
if child.parent == nil {
|
||||
return nil
|
||||
}
|
||||
parent = child.parent
|
||||
parent.NumSame += child.NumSame
|
||||
parent.NumDiff += child.NumDiff
|
||||
parent.NumIgnored += child.NumIgnored
|
||||
parent.NumCompared += child.NumCompared
|
||||
parent.NumTransformed += child.NumTransformed
|
||||
parent.NumChildren += child.NumChildren + 1
|
||||
if parent.MaxDepth < child.MaxDepth+1 {
|
||||
parent.MaxDepth = child.MaxDepth + 1
|
||||
}
|
||||
return parent
|
||||
}
|
56
vendor/go.opencensus.io/plugin/ocgrpc/client.go
generated
vendored
Normal file
56
vendor/go.opencensus.io/plugin/ocgrpc/client.go
generated
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
// Copyright 2018, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ocgrpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/grpc/stats"
|
||||
)
|
||||
|
||||
// ClientHandler implements a gRPC stats.Handler for recording OpenCensus stats and
|
||||
// traces. Use with gRPC clients only.
|
||||
type ClientHandler struct {
|
||||
// StartOptions allows configuring the StartOptions used to create new spans.
|
||||
//
|
||||
// StartOptions.SpanKind will always be set to trace.SpanKindClient
|
||||
// for spans started by this handler.
|
||||
StartOptions trace.StartOptions
|
||||
}
|
||||
|
||||
// HandleConn exists to satisfy gRPC stats.Handler.
|
||||
func (c *ClientHandler) HandleConn(ctx context.Context, cs stats.ConnStats) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
// TagConn exists to satisfy gRPC stats.Handler.
|
||||
func (c *ClientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context {
|
||||
// no-op
|
||||
return ctx
|
||||
}
|
||||
|
||||
// HandleRPC implements per-RPC tracing and stats instrumentation.
|
||||
func (c *ClientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
|
||||
traceHandleRPC(ctx, rs)
|
||||
statsHandleRPC(ctx, rs)
|
||||
}
|
||||
|
||||
// TagRPC implements per-RPC context management.
|
||||
func (c *ClientHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
|
||||
ctx = c.traceTagRPC(ctx, rti)
|
||||
ctx = c.statsTagRPC(ctx, rti)
|
||||
return ctx
|
||||
}
|
109
vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go
generated
vendored
Normal file
109
vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go
generated
vendored
Normal file
|
@ -0,0 +1,109 @@
|
|||
// Copyright 2017, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
package ocgrpc
|
||||
|
||||
import (
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/stats/view"
|
||||
"go.opencensus.io/tag"
|
||||
)
|
||||
|
||||
// The following variables are measures are recorded by ClientHandler:
|
||||
var (
|
||||
ClientSentMessagesPerRPC = stats.Int64("grpc.io/client/sent_messages_per_rpc", "Number of messages sent in the RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless)
|
||||
ClientSentBytesPerRPC = stats.Int64("grpc.io/client/sent_bytes_per_rpc", "Total bytes sent across all request messages per RPC.", stats.UnitBytes)
|
||||
ClientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless)
|
||||
ClientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes)
|
||||
ClientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds)
|
||||
ClientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds)
|
||||
)
|
||||
|
||||
// Predefined views may be registered to collect data for the above measures.
|
||||
// As always, you may also define your own custom views over measures collected by this
|
||||
// package. These are declared as a convenience only; none are registered by
|
||||
// default.
|
||||
var (
|
||||
ClientSentBytesPerRPCView = &view.View{
|
||||
Measure: ClientSentBytesPerRPC,
|
||||
Name: "grpc.io/client/sent_bytes_per_rpc",
|
||||
Description: "Distribution of bytes sent per RPC, by method.",
|
||||
TagKeys: []tag.Key{KeyClientMethod},
|
||||
Aggregation: DefaultBytesDistribution,
|
||||
}
|
||||
|
||||
ClientReceivedBytesPerRPCView = &view.View{
|
||||
Measure: ClientReceivedBytesPerRPC,
|
||||
Name: "grpc.io/client/received_bytes_per_rpc",
|
||||
Description: "Distribution of bytes received per RPC, by method.",
|
||||
TagKeys: []tag.Key{KeyClientMethod},
|
||||
Aggregation: DefaultBytesDistribution,
|
||||
}
|
||||
|
||||
ClientRoundtripLatencyView = &view.View{
|
||||
Measure: ClientRoundtripLatency,
|
||||
Name: "grpc.io/client/roundtrip_latency",
|
||||
Description: "Distribution of round-trip latency, by method.",
|
||||
TagKeys: []tag.Key{KeyClientMethod},
|
||||
Aggregation: DefaultMillisecondsDistribution,
|
||||
}
|
||||
|
||||
// Purposely reuses the count from `ClientRoundtripLatency`, tagging
|
||||
// with method and status to result in ClientCompletedRpcs.
|
||||
ClientCompletedRPCsView = &view.View{
|
||||
Measure: ClientRoundtripLatency,
|
||||
Name: "grpc.io/client/completed_rpcs",
|
||||
Description: "Count of RPCs by method and status.",
|
||||
TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus},
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
|
||||
ClientSentMessagesPerRPCView = &view.View{
|
||||
Measure: ClientSentMessagesPerRPC,
|
||||
Name: "grpc.io/client/sent_messages_per_rpc",
|
||||
Description: "Distribution of sent messages count per RPC, by method.",
|
||||
TagKeys: []tag.Key{KeyClientMethod},
|
||||
Aggregation: DefaultMessageCountDistribution,
|
||||
}
|
||||
|
||||
ClientReceivedMessagesPerRPCView = &view.View{
|
||||
Measure: ClientReceivedMessagesPerRPC,
|
||||
Name: "grpc.io/client/received_messages_per_rpc",
|
||||
Description: "Distribution of received messages count per RPC, by method.",
|
||||
TagKeys: []tag.Key{KeyClientMethod},
|
||||
Aggregation: DefaultMessageCountDistribution,
|
||||
}
|
||||
|
||||
ClientServerLatencyView = &view.View{
|
||||
Measure: ClientServerLatency,
|
||||
Name: "grpc.io/client/server_latency",
|
||||
Description: "Distribution of server latency as viewed by client, by method.",
|
||||
TagKeys: []tag.Key{KeyClientMethod},
|
||||
Aggregation: DefaultMillisecondsDistribution,
|
||||
}
|
||||
)
|
||||
|
||||
// DefaultClientViews are the default client views provided by this package.
|
||||
var DefaultClientViews = []*view.View{
|
||||
ClientSentBytesPerRPCView,
|
||||
ClientReceivedBytesPerRPCView,
|
||||
ClientRoundtripLatencyView,
|
||||
ClientCompletedRPCsView,
|
||||
}
|
||||
|
||||
// TODO(jbd): Add roundtrip_latency, uncompressed_request_bytes, uncompressed_response_bytes, request_count, response_count.
|
||||
// TODO(acetechnologist): This is temporary and will need to be replaced by a
|
||||
// mechanism to load these defaults from a common repository/config shared by
|
||||
// all supported languages. Likely a serialized protobuf of these defaults.
|
49
vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go
generated
vendored
Normal file
49
vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go
generated
vendored
Normal file
|
@ -0,0 +1,49 @@
|
|||
// Copyright 2017, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
package ocgrpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"go.opencensus.io/tag"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/stats"
|
||||
)
|
||||
|
||||
// statsTagRPC gets the tag.Map populated by the application code, serializes
|
||||
// its tags into the GRPC metadata in order to be sent to the server.
|
||||
func (h *ClientHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
|
||||
startTime := time.Now()
|
||||
if info == nil {
|
||||
if grpclog.V(2) {
|
||||
grpclog.Info("clientHandler.TagRPC called with nil info.")
|
||||
}
|
||||
return ctx
|
||||
}
|
||||
|
||||
d := &rpcData{
|
||||
startTime: startTime,
|
||||
method: info.FullMethodName,
|
||||
}
|
||||
ts := tag.FromContext(ctx)
|
||||
if ts != nil {
|
||||
encoded := tag.Encode(ts)
|
||||
ctx = stats.SetTags(ctx, encoded)
|
||||
}
|
||||
|
||||
return context.WithValue(ctx, rpcDataKey, d)
|
||||
}
|
19
vendor/go.opencensus.io/plugin/ocgrpc/doc.go
generated
vendored
Normal file
19
vendor/go.opencensus.io/plugin/ocgrpc/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
// Copyright 2017, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package ocgrpc contains OpenCensus stats and trace
|
||||
// integrations for gRPC.
|
||||
//
|
||||
// Use ServerHandler for servers and ClientHandler for clients.
|
||||
package ocgrpc // import "go.opencensus.io/plugin/ocgrpc"
|
81
vendor/go.opencensus.io/plugin/ocgrpc/server.go
generated
vendored
Normal file
81
vendor/go.opencensus.io/plugin/ocgrpc/server.go
generated
vendored
Normal file
|
@ -0,0 +1,81 @@
|
|||
// Copyright 2018, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ocgrpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"google.golang.org/grpc/stats"
|
||||
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ServerHandler implements gRPC stats.Handler recording OpenCensus stats and
|
||||
// traces. Use with gRPC servers.
|
||||
//
|
||||
// When installed (see Example), tracing metadata is read from inbound RPCs
|
||||
// by default. If no tracing metadata is present, or if the tracing metadata is
|
||||
// present but the SpanContext isn't sampled, then a new trace may be started
|
||||
// (as determined by Sampler).
|
||||
type ServerHandler struct {
|
||||
// IsPublicEndpoint may be set to true to always start a new trace around
|
||||
// each RPC. Any SpanContext in the RPC metadata will be added as a linked
|
||||
// span instead of making it the parent of the span created around the
|
||||
// server RPC.
|
||||
//
|
||||
// Be aware that if you leave this false (the default) on a public-facing
|
||||
// server, callers will be able to send tracing metadata in gRPC headers
|
||||
// and trigger traces in your backend.
|
||||
IsPublicEndpoint bool
|
||||
|
||||
// StartOptions to use for to spans started around RPCs handled by this server.
|
||||
//
|
||||
// These will apply even if there is tracing metadata already
|
||||
// present on the inbound RPC but the SpanContext is not sampled. This
|
||||
// ensures that each service has some opportunity to be traced. If you would
|
||||
// like to not add any additional traces for this gRPC service, set:
|
||||
//
|
||||
// StartOptions.Sampler = trace.ProbabilitySampler(0.0)
|
||||
//
|
||||
// StartOptions.SpanKind will always be set to trace.SpanKindServer
|
||||
// for spans started by this handler.
|
||||
StartOptions trace.StartOptions
|
||||
}
|
||||
|
||||
var _ stats.Handler = (*ServerHandler)(nil)
|
||||
|
||||
// HandleConn exists to satisfy gRPC stats.Handler.
|
||||
func (s *ServerHandler) HandleConn(ctx context.Context, cs stats.ConnStats) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
// TagConn exists to satisfy gRPC stats.Handler.
|
||||
func (s *ServerHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context {
|
||||
// no-op
|
||||
return ctx
|
||||
}
|
||||
|
||||
// HandleRPC implements per-RPC tracing and stats instrumentation.
|
||||
func (s *ServerHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
|
||||
traceHandleRPC(ctx, rs)
|
||||
statsHandleRPC(ctx, rs)
|
||||
}
|
||||
|
||||
// TagRPC implements per-RPC context management.
|
||||
func (s *ServerHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
|
||||
ctx = s.traceTagRPC(ctx, rti)
|
||||
ctx = s.statsTagRPC(ctx, rti)
|
||||
return ctx
|
||||
}
|
99
vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go
generated
vendored
Normal file
99
vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go
generated
vendored
Normal file
|
@ -0,0 +1,99 @@
|
|||
// Copyright 2017, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
package ocgrpc
|
||||
|
||||
import (
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/stats/view"
|
||||
"go.opencensus.io/tag"
|
||||
)
|
||||
|
||||
// The following variables are measures are recorded by ServerHandler:
|
||||
var (
|
||||
ServerReceivedMessagesPerRPC = stats.Int64("grpc.io/server/received_messages_per_rpc", "Number of messages received in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless)
|
||||
ServerReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes)
|
||||
ServerSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless)
|
||||
ServerSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes)
|
||||
ServerLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds)
|
||||
)
|
||||
|
||||
// TODO(acetechnologist): This is temporary and will need to be replaced by a
|
||||
// mechanism to load these defaults from a common repository/config shared by
|
||||
// all supported languages. Likely a serialized protobuf of these defaults.
|
||||
|
||||
// Predefined views may be registered to collect data for the above measures.
|
||||
// As always, you may also define your own custom views over measures collected by this
|
||||
// package. These are declared as a convenience only; none are registered by
|
||||
// default.
|
||||
var (
|
||||
ServerReceivedBytesPerRPCView = &view.View{
|
||||
Name: "grpc.io/server/received_bytes_per_rpc",
|
||||
Description: "Distribution of received bytes per RPC, by method.",
|
||||
Measure: ServerReceivedBytesPerRPC,
|
||||
TagKeys: []tag.Key{KeyServerMethod},
|
||||
Aggregation: DefaultBytesDistribution,
|
||||
}
|
||||
|
||||
ServerSentBytesPerRPCView = &view.View{
|
||||
Name: "grpc.io/server/sent_bytes_per_rpc",
|
||||
Description: "Distribution of total sent bytes per RPC, by method.",
|
||||
Measure: ServerSentBytesPerRPC,
|
||||
TagKeys: []tag.Key{KeyServerMethod},
|
||||
Aggregation: DefaultBytesDistribution,
|
||||
}
|
||||
|
||||
ServerLatencyView = &view.View{
|
||||
Name: "grpc.io/server/server_latency",
|
||||
Description: "Distribution of server latency in milliseconds, by method.",
|
||||
TagKeys: []tag.Key{KeyServerMethod},
|
||||
Measure: ServerLatency,
|
||||
Aggregation: DefaultMillisecondsDistribution,
|
||||
}
|
||||
|
||||
// Purposely reuses the count from `ServerLatency`, tagging
|
||||
// with method and status to result in ServerCompletedRpcs.
|
||||
ServerCompletedRPCsView = &view.View{
|
||||
Name: "grpc.io/server/completed_rpcs",
|
||||
Description: "Count of RPCs by method and status.",
|
||||
TagKeys: []tag.Key{KeyServerMethod, KeyServerStatus},
|
||||
Measure: ServerLatency,
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
|
||||
ServerReceivedMessagesPerRPCView = &view.View{
|
||||
Name: "grpc.io/server/received_messages_per_rpc",
|
||||
Description: "Distribution of messages received count per RPC, by method.",
|
||||
TagKeys: []tag.Key{KeyServerMethod},
|
||||
Measure: ServerReceivedMessagesPerRPC,
|
||||
Aggregation: DefaultMessageCountDistribution,
|
||||
}
|
||||
|
||||
ServerSentMessagesPerRPCView = &view.View{
|
||||
Name: "grpc.io/server/sent_messages_per_rpc",
|
||||
Description: "Distribution of messages sent count per RPC, by method.",
|
||||
TagKeys: []tag.Key{KeyServerMethod},
|
||||
Measure: ServerSentMessagesPerRPC,
|
||||
Aggregation: DefaultMessageCountDistribution,
|
||||
}
|
||||
)
|
||||
|
||||
// DefaultServerViews are the default server views provided by this package.
|
||||
var DefaultServerViews = []*view.View{
|
||||
ServerReceivedBytesPerRPCView,
|
||||
ServerSentBytesPerRPCView,
|
||||
ServerLatencyView,
|
||||
ServerCompletedRPCsView,
|
||||
}
|
63
vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go
generated
vendored
Normal file
63
vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go
generated
vendored
Normal file
|
@ -0,0 +1,63 @@
|
|||
// Copyright 2017, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
package ocgrpc
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"context"
|
||||
|
||||
"go.opencensus.io/tag"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/stats"
|
||||
)
|
||||
|
||||
// statsTagRPC gets the metadata from gRPC context, extracts the encoded tags from
|
||||
// it and creates a new tag.Map and puts them into the returned context.
|
||||
func (h *ServerHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
|
||||
startTime := time.Now()
|
||||
if info == nil {
|
||||
if grpclog.V(2) {
|
||||
grpclog.Infof("opencensus: TagRPC called with nil info.")
|
||||
}
|
||||
return ctx
|
||||
}
|
||||
d := &rpcData{
|
||||
startTime: startTime,
|
||||
method: info.FullMethodName,
|
||||
}
|
||||
propagated := h.extractPropagatedTags(ctx)
|
||||
ctx = tag.NewContext(ctx, propagated)
|
||||
ctx, _ = tag.New(ctx, tag.Upsert(KeyServerMethod, methodName(info.FullMethodName)))
|
||||
return context.WithValue(ctx, rpcDataKey, d)
|
||||
}
|
||||
|
||||
// extractPropagatedTags creates a new tag map containing the tags extracted from the
|
||||
// gRPC metadata.
|
||||
func (h *ServerHandler) extractPropagatedTags(ctx context.Context) *tag.Map {
|
||||
buf := stats.Tags(ctx)
|
||||
if buf == nil {
|
||||
return nil
|
||||
}
|
||||
propagated, err := tag.Decode(buf)
|
||||
if err != nil {
|
||||
if grpclog.V(2) {
|
||||
grpclog.Warningf("opencensus: Failed to decode tags from gRPC metadata failed to decode: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return propagated
|
||||
}
|
227
vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go
generated
vendored
Normal file
227
vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go
generated
vendored
Normal file
|
@ -0,0 +1,227 @@
|
|||
// Copyright 2017, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
package ocgrpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"go.opencensus.io/metric/metricdata"
|
||||
ocstats "go.opencensus.io/stats"
|
||||
"go.opencensus.io/stats/view"
|
||||
"go.opencensus.io/tag"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/stats"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type grpcInstrumentationKey string
|
||||
|
||||
// rpcData holds the instrumentation RPC data that is needed between the start
|
||||
// and end of an call. It holds the info that this package needs to keep track
|
||||
// of between the various GRPC events.
|
||||
type rpcData struct {
|
||||
// reqCount and respCount has to be the first words
|
||||
// in order to be 64-aligned on 32-bit architectures.
|
||||
sentCount, sentBytes, recvCount, recvBytes int64 // access atomically
|
||||
|
||||
// startTime represents the time at which TagRPC was invoked at the
|
||||
// beginning of an RPC. It is an appoximation of the time when the
|
||||
// application code invoked GRPC code.
|
||||
startTime time.Time
|
||||
method string
|
||||
}
|
||||
|
||||
// The following variables define the default hard-coded auxiliary data used by
|
||||
// both the default GRPC client and GRPC server metrics.
|
||||
var (
|
||||
DefaultBytesDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296)
|
||||
DefaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000)
|
||||
DefaultMessageCountDistribution = view.Distribution(1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536)
|
||||
)
|
||||
|
||||
// Server tags are applied to the context used to process each RPC, as well as
|
||||
// the measures at the end of each RPC.
|
||||
var (
|
||||
KeyServerMethod = tag.MustNewKey("grpc_server_method")
|
||||
KeyServerStatus = tag.MustNewKey("grpc_server_status")
|
||||
)
|
||||
|
||||
// Client tags are applied to measures at the end of each RPC.
|
||||
var (
|
||||
KeyClientMethod = tag.MustNewKey("grpc_client_method")
|
||||
KeyClientStatus = tag.MustNewKey("grpc_client_status")
|
||||
)
|
||||
|
||||
var (
|
||||
rpcDataKey = grpcInstrumentationKey("opencensus-rpcData")
|
||||
)
|
||||
|
||||
func methodName(fullname string) string {
|
||||
return strings.TrimLeft(fullname, "/")
|
||||
}
|
||||
|
||||
// statsHandleRPC processes the RPC events.
|
||||
func statsHandleRPC(ctx context.Context, s stats.RPCStats) {
|
||||
switch st := s.(type) {
|
||||
case *stats.Begin, *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer:
|
||||
// do nothing for client
|
||||
case *stats.OutPayload:
|
||||
handleRPCOutPayload(ctx, st)
|
||||
case *stats.InPayload:
|
||||
handleRPCInPayload(ctx, st)
|
||||
case *stats.End:
|
||||
handleRPCEnd(ctx, st)
|
||||
default:
|
||||
grpclog.Infof("unexpected stats: %T", st)
|
||||
}
|
||||
}
|
||||
|
||||
func handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) {
|
||||
d, ok := ctx.Value(rpcDataKey).(*rpcData)
|
||||
if !ok {
|
||||
if grpclog.V(2) {
|
||||
grpclog.Infoln("Failed to retrieve *rpcData from context.")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
atomic.AddInt64(&d.sentBytes, int64(s.Length))
|
||||
atomic.AddInt64(&d.sentCount, 1)
|
||||
}
|
||||
|
||||
func handleRPCInPayload(ctx context.Context, s *stats.InPayload) {
|
||||
d, ok := ctx.Value(rpcDataKey).(*rpcData)
|
||||
if !ok {
|
||||
if grpclog.V(2) {
|
||||
grpclog.Infoln("Failed to retrieve *rpcData from context.")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
atomic.AddInt64(&d.recvBytes, int64(s.Length))
|
||||
atomic.AddInt64(&d.recvCount, 1)
|
||||
}
|
||||
|
||||
func handleRPCEnd(ctx context.Context, s *stats.End) {
|
||||
d, ok := ctx.Value(rpcDataKey).(*rpcData)
|
||||
if !ok {
|
||||
if grpclog.V(2) {
|
||||
grpclog.Infoln("Failed to retrieve *rpcData from context.")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
elapsedTime := time.Since(d.startTime)
|
||||
|
||||
var st string
|
||||
if s.Error != nil {
|
||||
s, ok := status.FromError(s.Error)
|
||||
if ok {
|
||||
st = statusCodeToString(s)
|
||||
}
|
||||
} else {
|
||||
st = "OK"
|
||||
}
|
||||
|
||||
latencyMillis := float64(elapsedTime) / float64(time.Millisecond)
|
||||
attachments := getSpanCtxAttachment(ctx)
|
||||
if s.Client {
|
||||
ocstats.RecordWithOptions(ctx,
|
||||
ocstats.WithTags(
|
||||
tag.Upsert(KeyClientMethod, methodName(d.method)),
|
||||
tag.Upsert(KeyClientStatus, st)),
|
||||
ocstats.WithAttachments(attachments),
|
||||
ocstats.WithMeasurements(
|
||||
ClientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)),
|
||||
ClientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)),
|
||||
ClientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)),
|
||||
ClientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)),
|
||||
ClientRoundtripLatency.M(latencyMillis)))
|
||||
} else {
|
||||
ocstats.RecordWithOptions(ctx,
|
||||
ocstats.WithTags(
|
||||
tag.Upsert(KeyServerStatus, st),
|
||||
),
|
||||
ocstats.WithAttachments(attachments),
|
||||
ocstats.WithMeasurements(
|
||||
ServerSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)),
|
||||
ServerSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)),
|
||||
ServerReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)),
|
||||
ServerReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)),
|
||||
ServerLatency.M(latencyMillis)))
|
||||
}
|
||||
}
|
||||
|
||||
func statusCodeToString(s *status.Status) string {
|
||||
// see https://github.com/grpc/grpc/blob/master/doc/statuscodes.md
|
||||
switch c := s.Code(); c {
|
||||
case codes.OK:
|
||||
return "OK"
|
||||
case codes.Canceled:
|
||||
return "CANCELLED"
|
||||
case codes.Unknown:
|
||||
return "UNKNOWN"
|
||||
case codes.InvalidArgument:
|
||||
return "INVALID_ARGUMENT"
|
||||
case codes.DeadlineExceeded:
|
||||
return "DEADLINE_EXCEEDED"
|
||||
case codes.NotFound:
|
||||
return "NOT_FOUND"
|
||||
case codes.AlreadyExists:
|
||||
return "ALREADY_EXISTS"
|
||||
case codes.PermissionDenied:
|
||||
return "PERMISSION_DENIED"
|
||||
case codes.ResourceExhausted:
|
||||
return "RESOURCE_EXHAUSTED"
|
||||
case codes.FailedPrecondition:
|
||||
return "FAILED_PRECONDITION"
|
||||
case codes.Aborted:
|
||||
return "ABORTED"
|
||||
case codes.OutOfRange:
|
||||
return "OUT_OF_RANGE"
|
||||
case codes.Unimplemented:
|
||||
return "UNIMPLEMENTED"
|
||||
case codes.Internal:
|
||||
return "INTERNAL"
|
||||
case codes.Unavailable:
|
||||
return "UNAVAILABLE"
|
||||
case codes.DataLoss:
|
||||
return "DATA_LOSS"
|
||||
case codes.Unauthenticated:
|
||||
return "UNAUTHENTICATED"
|
||||
default:
|
||||
return "CODE_" + strconv.FormatInt(int64(c), 10)
|
||||
}
|
||||
}
|
||||
|
||||
func getSpanCtxAttachment(ctx context.Context) metricdata.Attachments {
|
||||
attachments := map[string]interface{}{}
|
||||
span := trace.FromContext(ctx)
|
||||
if span == nil {
|
||||
return attachments
|
||||
}
|
||||
spanCtx := span.SpanContext()
|
||||
if spanCtx.IsSampled() {
|
||||
attachments[metricdata.AttachmentKeySpanContext] = spanCtx
|
||||
}
|
||||
return attachments
|
||||
}
|
107
vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go
generated
vendored
Normal file
107
vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go
generated
vendored
Normal file
|
@ -0,0 +1,107 @@
|
|||
// Copyright 2017, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ocgrpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/stats"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"go.opencensus.io/trace"
|
||||
"go.opencensus.io/trace/propagation"
|
||||
)
|
||||
|
||||
const traceContextKey = "grpc-trace-bin"
|
||||
|
||||
// TagRPC creates a new trace span for the client side of the RPC.
|
||||
//
|
||||
// It returns ctx with the new trace span added and a serialization of the
|
||||
// SpanContext added to the outgoing gRPC metadata.
|
||||
func (c *ClientHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
|
||||
name := strings.TrimPrefix(rti.FullMethodName, "/")
|
||||
name = strings.Replace(name, "/", ".", -1)
|
||||
ctx, span := trace.StartSpan(ctx, name,
|
||||
trace.WithSampler(c.StartOptions.Sampler),
|
||||
trace.WithSpanKind(trace.SpanKindClient)) // span is ended by traceHandleRPC
|
||||
traceContextBinary := propagation.Binary(span.SpanContext())
|
||||
return metadata.AppendToOutgoingContext(ctx, traceContextKey, string(traceContextBinary))
|
||||
}
|
||||
|
||||
// TagRPC creates a new trace span for the server side of the RPC.
|
||||
//
|
||||
// It checks the incoming gRPC metadata in ctx for a SpanContext, and if
|
||||
// it finds one, uses that SpanContext as the parent context of the new span.
|
||||
//
|
||||
// It returns ctx, with the new trace span added.
|
||||
func (s *ServerHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
name := strings.TrimPrefix(rti.FullMethodName, "/")
|
||||
name = strings.Replace(name, "/", ".", -1)
|
||||
traceContext := md[traceContextKey]
|
||||
var (
|
||||
parent trace.SpanContext
|
||||
haveParent bool
|
||||
)
|
||||
if len(traceContext) > 0 {
|
||||
// Metadata with keys ending in -bin are actually binary. They are base64
|
||||
// encoded before being put on the wire, see:
|
||||
// https://github.com/grpc/grpc-go/blob/08d6261/Documentation/grpc-metadata.md#storing-binary-data-in-metadata
|
||||
traceContextBinary := []byte(traceContext[0])
|
||||
parent, haveParent = propagation.FromBinary(traceContextBinary)
|
||||
if haveParent && !s.IsPublicEndpoint {
|
||||
ctx, _ := trace.StartSpanWithRemoteParent(ctx, name, parent,
|
||||
trace.WithSpanKind(trace.SpanKindServer),
|
||||
trace.WithSampler(s.StartOptions.Sampler),
|
||||
)
|
||||
return ctx
|
||||
}
|
||||
}
|
||||
ctx, span := trace.StartSpan(ctx, name,
|
||||
trace.WithSpanKind(trace.SpanKindServer),
|
||||
trace.WithSampler(s.StartOptions.Sampler))
|
||||
if haveParent {
|
||||
span.AddLink(trace.Link{TraceID: parent.TraceID, SpanID: parent.SpanID, Type: trace.LinkTypeChild})
|
||||
}
|
||||
return ctx
|
||||
}
|
||||
|
||||
func traceHandleRPC(ctx context.Context, rs stats.RPCStats) {
|
||||
span := trace.FromContext(ctx)
|
||||
// TODO: compressed and uncompressed sizes are not populated in every message.
|
||||
switch rs := rs.(type) {
|
||||
case *stats.Begin:
|
||||
span.AddAttributes(
|
||||
trace.BoolAttribute("Client", rs.Client),
|
||||
trace.BoolAttribute("FailFast", rs.FailFast))
|
||||
case *stats.InPayload:
|
||||
span.AddMessageReceiveEvent(0 /* TODO: messageID */, int64(rs.Length), int64(rs.WireLength))
|
||||
case *stats.OutPayload:
|
||||
span.AddMessageSendEvent(0, int64(rs.Length), int64(rs.WireLength))
|
||||
case *stats.End:
|
||||
if rs.Error != nil {
|
||||
s, ok := status.FromError(rs.Error)
|
||||
if ok {
|
||||
span.SetStatus(trace.Status{Code: int32(s.Code()), Message: s.Message()})
|
||||
} else {
|
||||
span.SetStatus(trace.Status{Code: int32(codes.Internal), Message: rs.Error.Error()})
|
||||
}
|
||||
}
|
||||
span.End()
|
||||
}
|
||||
}
|
298
vendor/google.golang.org/api/transport/grpc/dial.go
generated
vendored
Normal file
298
vendor/google.golang.org/api/transport/grpc/dial.go
generated
vendored
Normal file
|
@ -0,0 +1,298 @@
|
|||
// Copyright 2015 Google LLC.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package grpc supports network connections to GRPC servers.
|
||||
// This package is not intended for use by end developers. Use the
|
||||
// google.golang.org/api/option package to configure API clients.
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"cloud.google.com/go/compute/metadata"
|
||||
"go.opencensus.io/plugin/ocgrpc"
|
||||
"golang.org/x/oauth2"
|
||||
"google.golang.org/api/internal"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport/internal/dca"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
grpcgoogle "google.golang.org/grpc/credentials/google"
|
||||
"google.golang.org/grpc/credentials/oauth"
|
||||
|
||||
// Install grpclb, which is required for direct path.
|
||||
_ "google.golang.org/grpc/balancer/grpclb"
|
||||
)
|
||||
|
||||
// Set at init time by dial_appengine.go. If nil, we're not on App Engine.
|
||||
var appengineDialerHook func(context.Context) grpc.DialOption
|
||||
|
||||
// Set at init time by dial_socketopt.go. If nil, socketopt is not supported.
|
||||
var timeoutDialerOption grpc.DialOption
|
||||
|
||||
// Dial returns a GRPC connection for use communicating with a Google cloud
|
||||
// service, configured with the given ClientOptions.
|
||||
func Dial(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) {
|
||||
o, err := processAndValidateOpts(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if o.GRPCConnPool != nil {
|
||||
return o.GRPCConnPool.Conn(), nil
|
||||
}
|
||||
// NOTE(cbro): We removed support for option.WithGRPCConnPool (GRPCConnPoolSize)
|
||||
// on 2020-02-12 because RoundRobin and WithBalancer are deprecated and we need to remove usages of it.
|
||||
//
|
||||
// Connection pooling is only done via DialPool.
|
||||
return dial(ctx, false, o)
|
||||
}
|
||||
|
||||
// DialInsecure returns an insecure GRPC connection for use communicating
|
||||
// with fake or mock Google cloud service implementations, such as emulators.
|
||||
// The connection is configured with the given ClientOptions.
|
||||
func DialInsecure(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) {
|
||||
o, err := processAndValidateOpts(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dial(ctx, true, o)
|
||||
}
|
||||
|
||||
// DialPool returns a pool of GRPC connections for the given service.
|
||||
// This differs from the connection pooling implementation used by Dial, which uses a custom GRPC load balancer.
|
||||
// DialPool should be used instead of Dial when a pool is used by default or a different custom GRPC load balancer is needed.
|
||||
// The context and options are shared between each Conn in the pool.
|
||||
// The pool size is configured using the WithGRPCConnectionPool option.
|
||||
//
|
||||
// This API is subject to change as we further refine requirements. It will go away if gRPC stubs accept an interface instead of the concrete ClientConn type. See https://github.com/grpc/grpc-go/issues/1287.
|
||||
func DialPool(ctx context.Context, opts ...option.ClientOption) (ConnPool, error) {
|
||||
o, err := processAndValidateOpts(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if o.GRPCConnPool != nil {
|
||||
return o.GRPCConnPool, nil
|
||||
}
|
||||
poolSize := o.GRPCConnPoolSize
|
||||
if o.GRPCConn != nil {
|
||||
// WithGRPCConn is technically incompatible with WithGRPCConnectionPool.
|
||||
// Always assume pool size is 1 when a grpc.ClientConn is explicitly used.
|
||||
poolSize = 1
|
||||
}
|
||||
o.GRPCConnPoolSize = 0 // we don't *need* to set this to zero, but it's safe to.
|
||||
|
||||
if poolSize == 0 || poolSize == 1 {
|
||||
// Fast path for common case for a connection pool with a single connection.
|
||||
conn, err := dial(ctx, false, o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &singleConnPool{conn}, nil
|
||||
}
|
||||
|
||||
pool := &roundRobinConnPool{}
|
||||
for i := 0; i < poolSize; i++ {
|
||||
conn, err := dial(ctx, false, o)
|
||||
if err != nil {
|
||||
defer pool.Close() // NOTE: error from Close is ignored.
|
||||
return nil, err
|
||||
}
|
||||
pool.conns = append(pool.conns, conn)
|
||||
}
|
||||
return pool, nil
|
||||
}
|
||||
|
||||
func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.ClientConn, error) {
|
||||
if o.HTTPClient != nil {
|
||||
return nil, errors.New("unsupported HTTP client specified")
|
||||
}
|
||||
if o.GRPCConn != nil {
|
||||
return o.GRPCConn, nil
|
||||
}
|
||||
clientCertSource, endpoint, err := dca.GetClientCertificateSourceAndEndpoint(o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var grpcOpts []grpc.DialOption
|
||||
if insecure {
|
||||
grpcOpts = []grpc.DialOption{grpc.WithInsecure()}
|
||||
} else if !o.NoAuth {
|
||||
if o.APIKey != "" {
|
||||
log.Print("API keys are not supported for gRPC APIs. Remove the WithAPIKey option from your client-creating call.")
|
||||
}
|
||||
creds, err := internal.Creds(ctx, o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if o.QuotaProject == "" {
|
||||
o.QuotaProject = internal.QuotaProjectFromCreds(creds)
|
||||
}
|
||||
|
||||
// Attempt Direct Path only if:
|
||||
// * The endpoint is a host:port (or dns:///host:port).
|
||||
// * Credentials are obtained via GCE metadata server, using the default
|
||||
// service account.
|
||||
if o.EnableDirectPath && checkDirectPathEndPoint(endpoint) && isTokenSourceDirectPathCompatible(creds.TokenSource, o) && metadata.OnGCE() {
|
||||
if !strings.HasPrefix(endpoint, "dns:///") {
|
||||
endpoint = "dns:///" + endpoint
|
||||
}
|
||||
grpcOpts = []grpc.DialOption{
|
||||
grpc.WithCredentialsBundle(
|
||||
grpcgoogle.NewComputeEngineCredentials(),
|
||||
),
|
||||
// For now all DirectPath go clients will be using the following lb config, but in future
|
||||
// when different services need different configs, then we should change this to a
|
||||
// per-service config.
|
||||
grpc.WithDisableServiceConfig(),
|
||||
grpc.WithDefaultServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`),
|
||||
}
|
||||
// TODO(cbro): add support for system parameters (quota project, request reason) via chained interceptor.
|
||||
} else {
|
||||
tlsConfig := &tls.Config{
|
||||
GetClientCertificate: clientCertSource,
|
||||
}
|
||||
grpcOpts = []grpc.DialOption{
|
||||
grpc.WithPerRPCCredentials(grpcTokenSource{
|
||||
TokenSource: oauth.TokenSource{creds.TokenSource},
|
||||
quotaProject: o.QuotaProject,
|
||||
requestReason: o.RequestReason,
|
||||
}),
|
||||
grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if appengineDialerHook != nil {
|
||||
// Use the Socket API on App Engine.
|
||||
// appengine dialer will override socketopt dialer
|
||||
grpcOpts = append(grpcOpts, appengineDialerHook(ctx))
|
||||
}
|
||||
|
||||
// Add tracing, but before the other options, so that clients can override the
|
||||
// gRPC stats handler.
|
||||
// This assumes that gRPC options are processed in order, left to right.
|
||||
grpcOpts = addOCStatsHandler(grpcOpts, o)
|
||||
grpcOpts = append(grpcOpts, o.GRPCDialOpts...)
|
||||
if o.UserAgent != "" {
|
||||
grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent))
|
||||
}
|
||||
|
||||
// TODO(weiranf): This socketopt dialer will be used by default at some
|
||||
// point when isDirectPathEnabled will default to true, we guard it by
|
||||
// the Directpath env var for now once we can introspect user defined
|
||||
// dialer (https://github.com/grpc/grpc-go/issues/2795).
|
||||
if timeoutDialerOption != nil && o.EnableDirectPath && checkDirectPathEndPoint(endpoint) && metadata.OnGCE() {
|
||||
grpcOpts = append(grpcOpts, timeoutDialerOption)
|
||||
}
|
||||
|
||||
return grpc.DialContext(ctx, endpoint, grpcOpts...)
|
||||
}
|
||||
|
||||
func addOCStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption {
|
||||
if settings.TelemetryDisabled {
|
||||
return opts
|
||||
}
|
||||
return append(opts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{}))
|
||||
}
|
||||
|
||||
// grpcTokenSource supplies PerRPCCredentials from an oauth.TokenSource.
|
||||
type grpcTokenSource struct {
|
||||
oauth.TokenSource
|
||||
|
||||
// Additional metadata attached as headers.
|
||||
quotaProject string
|
||||
requestReason string
|
||||
}
|
||||
|
||||
// GetRequestMetadata gets the request metadata as a map from a grpcTokenSource.
|
||||
func (ts grpcTokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (
|
||||
map[string]string, error) {
|
||||
metadata, err := ts.TokenSource.GetRequestMetadata(ctx, uri...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Attach system parameter
|
||||
if ts.quotaProject != "" {
|
||||
metadata["X-goog-user-project"] = ts.quotaProject
|
||||
}
|
||||
if ts.requestReason != "" {
|
||||
metadata["X-goog-request-reason"] = ts.requestReason
|
||||
}
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
func isTokenSourceDirectPathCompatible(ts oauth2.TokenSource, o *internal.DialSettings) bool {
|
||||
if ts == nil {
|
||||
return false
|
||||
}
|
||||
tok, err := ts.Token()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if tok == nil {
|
||||
return false
|
||||
}
|
||||
if o.AllowNonDefaultServiceAccount {
|
||||
return true
|
||||
}
|
||||
if source, _ := tok.Extra("oauth2.google.tokenSource").(string); source != "compute-metadata" {
|
||||
return false
|
||||
}
|
||||
if acct, _ := tok.Extra("oauth2.google.serviceAccount").(string); acct != "default" {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func checkDirectPathEndPoint(endpoint string) bool {
|
||||
// Only [dns:///]host[:port] is supported, not other schemes (e.g., "tcp://" or "unix://").
|
||||
// Also don't try direct path if the user has chosen an alternate name resolver
|
||||
// (i.e., via ":///" prefix).
|
||||
//
|
||||
// TODO(cbro): once gRPC has introspectible options, check the user hasn't
|
||||
// provided a custom dialer in gRPC options.
|
||||
if strings.Contains(endpoint, "://") && !strings.HasPrefix(endpoint, "dns:///") {
|
||||
return false
|
||||
}
|
||||
|
||||
if endpoint == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func processAndValidateOpts(opts []option.ClientOption) (*internal.DialSettings, error) {
|
||||
var o internal.DialSettings
|
||||
for _, opt := range opts {
|
||||
opt.Apply(&o)
|
||||
}
|
||||
if err := o.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &o, nil
|
||||
}
|
||||
|
||||
type connPoolOption struct{ ConnPool }
|
||||
|
||||
// WithConnPool returns a ClientOption that specifies the ConnPool
|
||||
// connection to use as the basis of communications.
|
||||
//
|
||||
// This is only to be used by Google client libraries internally, for example
|
||||
// when creating a longrunning API client that shares the same connection pool
|
||||
// as a service client.
|
||||
func WithConnPool(p ConnPool) option.ClientOption {
|
||||
return connPoolOption{p}
|
||||
}
|
||||
|
||||
func (o connPoolOption) Apply(s *internal.DialSettings) {
|
||||
s.GRPCConnPool = o.ConnPool
|
||||
}
|
32
vendor/google.golang.org/api/transport/grpc/dial_appengine.go
generated
vendored
Normal file
32
vendor/google.golang.org/api/transport/grpc/dial_appengine.go
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
// Copyright 2016 Google LLC.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build appengine
|
||||
// +build appengine
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"google.golang.org/appengine"
|
||||
"google.golang.org/appengine/socket"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// NOTE: dev_appserver doesn't currently support SSL.
|
||||
// When it does, this code can be removed.
|
||||
if appengine.IsDevAppServer() {
|
||||
return
|
||||
}
|
||||
|
||||
appengineDialerHook = func(ctx context.Context) grpc.DialOption {
|
||||
return grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
|
||||
return socket.DialTimeout(ctx, "tcp", addr, timeout)
|
||||
})
|
||||
}
|
||||
}
|
50
vendor/google.golang.org/api/transport/grpc/dial_socketopt.go
generated
vendored
Normal file
50
vendor/google.golang.org/api/transport/grpc/dial_socketopt.go
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
// Copyright 2019 Google LLC.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.11 && linux
|
||||
// +build go1.11,linux
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultTCPUserTimeout is the default TCP_USER_TIMEOUT socket option. By
|
||||
// default is 20 seconds.
|
||||
tcpUserTimeoutMilliseconds = 20000
|
||||
)
|
||||
|
||||
func init() {
|
||||
// timeoutDialerOption is a grpc.DialOption that contains dialer with
|
||||
// socket option TCP_USER_TIMEOUT. This dialer requires go versions 1.11+.
|
||||
timeoutDialerOption = grpc.WithContextDialer(dialTCPUserTimeout)
|
||||
}
|
||||
|
||||
func dialTCPUserTimeout(ctx context.Context, addr string) (net.Conn, error) {
|
||||
control := func(network, address string, c syscall.RawConn) error {
|
||||
var syscallErr error
|
||||
controlErr := c.Control(func(fd uintptr) {
|
||||
syscallErr = syscall.SetsockoptInt(
|
||||
int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, tcpUserTimeoutMilliseconds)
|
||||
})
|
||||
if syscallErr != nil {
|
||||
return syscallErr
|
||||
}
|
||||
if controlErr != nil {
|
||||
return controlErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
d := &net.Dialer{
|
||||
Control: control,
|
||||
}
|
||||
return d.DialContext(ctx, "tcp", addr)
|
||||
}
|
92
vendor/google.golang.org/api/transport/grpc/pool.go
generated
vendored
Normal file
92
vendor/google.golang.org/api/transport/grpc/pool.go
generated
vendored
Normal file
|
@ -0,0 +1,92 @@
|
|||
// Copyright 2020 Google LLC.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"google.golang.org/api/internal"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// ConnPool is a pool of grpc.ClientConns.
|
||||
type ConnPool = internal.ConnPool // NOTE(cbro): type alias to export the type. It must live in internal to avoid a circular dependency.
|
||||
|
||||
var _ ConnPool = &roundRobinConnPool{}
|
||||
var _ ConnPool = &singleConnPool{}
|
||||
|
||||
// singleConnPool is a special case for a single connection.
|
||||
type singleConnPool struct {
|
||||
*grpc.ClientConn
|
||||
}
|
||||
|
||||
func (p *singleConnPool) Conn() *grpc.ClientConn { return p.ClientConn }
|
||||
func (p *singleConnPool) Num() int { return 1 }
|
||||
|
||||
type roundRobinConnPool struct {
|
||||
conns []*grpc.ClientConn
|
||||
|
||||
idx uint32 // access via sync/atomic
|
||||
}
|
||||
|
||||
func (p *roundRobinConnPool) Num() int {
|
||||
return len(p.conns)
|
||||
}
|
||||
|
||||
func (p *roundRobinConnPool) Conn() *grpc.ClientConn {
|
||||
i := atomic.AddUint32(&p.idx, 1)
|
||||
return p.conns[i%uint32(len(p.conns))]
|
||||
}
|
||||
|
||||
func (p *roundRobinConnPool) Close() error {
|
||||
var errs multiError
|
||||
for _, conn := range p.conns {
|
||||
if err := conn.Close(); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
if len(errs) == 0 {
|
||||
return nil
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
func (p *roundRobinConnPool) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...grpc.CallOption) error {
|
||||
return p.Conn().Invoke(ctx, method, args, reply, opts...)
|
||||
}
|
||||
|
||||
func (p *roundRobinConnPool) NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
return p.Conn().NewStream(ctx, desc, method, opts...)
|
||||
}
|
||||
|
||||
// multiError represents errors from multiple conns in the group.
|
||||
//
|
||||
// TODO: figure out how and whether this is useful to export. End users should
|
||||
// not be depending on the transport/grpc package directly, so there might need
|
||||
// to be some service-specific multi-error type.
|
||||
type multiError []error
|
||||
|
||||
func (m multiError) Error() string {
|
||||
s, n := "", 0
|
||||
for _, e := range m {
|
||||
if e != nil {
|
||||
if n == 0 {
|
||||
s = e.Error()
|
||||
}
|
||||
n++
|
||||
}
|
||||
}
|
||||
switch n {
|
||||
case 0:
|
||||
return "(0 errors)"
|
||||
case 1:
|
||||
return s
|
||||
case 2:
|
||||
return s + " (and 1 other error)"
|
||||
}
|
||||
return fmt.Sprintf("%s (and %d other errors)", s, n-1)
|
||||
}
|
2822
vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go
generated
vendored
Normal file
2822
vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
460
vendor/google.golang.org/appengine/internal/socket/socket_service.proto
generated
vendored
Normal file
460
vendor/google.golang.org/appengine/internal/socket/socket_service.proto
generated
vendored
Normal file
|
@ -0,0 +1,460 @@
|
|||
syntax = "proto2";
|
||||
option go_package = "socket";
|
||||
|
||||
package appengine;
|
||||
|
||||
message RemoteSocketServiceError {
|
||||
enum ErrorCode {
|
||||
SYSTEM_ERROR = 1;
|
||||
GAI_ERROR = 2;
|
||||
FAILURE = 4;
|
||||
PERMISSION_DENIED = 5;
|
||||
INVALID_REQUEST = 6;
|
||||
SOCKET_CLOSED = 7;
|
||||
}
|
||||
|
||||
enum SystemError {
|
||||
option allow_alias = true;
|
||||
|
||||
SYS_SUCCESS = 0;
|
||||
SYS_EPERM = 1;
|
||||
SYS_ENOENT = 2;
|
||||
SYS_ESRCH = 3;
|
||||
SYS_EINTR = 4;
|
||||
SYS_EIO = 5;
|
||||
SYS_ENXIO = 6;
|
||||
SYS_E2BIG = 7;
|
||||
SYS_ENOEXEC = 8;
|
||||
SYS_EBADF = 9;
|
||||
SYS_ECHILD = 10;
|
||||
SYS_EAGAIN = 11;
|
||||
SYS_EWOULDBLOCK = 11;
|
||||
SYS_ENOMEM = 12;
|
||||
SYS_EACCES = 13;
|
||||
SYS_EFAULT = 14;
|
||||
SYS_ENOTBLK = 15;
|
||||
SYS_EBUSY = 16;
|
||||
SYS_EEXIST = 17;
|
||||
SYS_EXDEV = 18;
|
||||
SYS_ENODEV = 19;
|
||||
SYS_ENOTDIR = 20;
|
||||
SYS_EISDIR = 21;
|
||||
SYS_EINVAL = 22;
|
||||
SYS_ENFILE = 23;
|
||||
SYS_EMFILE = 24;
|
||||
SYS_ENOTTY = 25;
|
||||
SYS_ETXTBSY = 26;
|
||||
SYS_EFBIG = 27;
|
||||
SYS_ENOSPC = 28;
|
||||
SYS_ESPIPE = 29;
|
||||
SYS_EROFS = 30;
|
||||
SYS_EMLINK = 31;
|
||||
SYS_EPIPE = 32;
|
||||
SYS_EDOM = 33;
|
||||
SYS_ERANGE = 34;
|
||||
SYS_EDEADLK = 35;
|
||||
SYS_EDEADLOCK = 35;
|
||||
SYS_ENAMETOOLONG = 36;
|
||||
SYS_ENOLCK = 37;
|
||||
SYS_ENOSYS = 38;
|
||||
SYS_ENOTEMPTY = 39;
|
||||
SYS_ELOOP = 40;
|
||||
SYS_ENOMSG = 42;
|
||||
SYS_EIDRM = 43;
|
||||
SYS_ECHRNG = 44;
|
||||
SYS_EL2NSYNC = 45;
|
||||
SYS_EL3HLT = 46;
|
||||
SYS_EL3RST = 47;
|
||||
SYS_ELNRNG = 48;
|
||||
SYS_EUNATCH = 49;
|
||||
SYS_ENOCSI = 50;
|
||||
SYS_EL2HLT = 51;
|
||||
SYS_EBADE = 52;
|
||||
SYS_EBADR = 53;
|
||||
SYS_EXFULL = 54;
|
||||
SYS_ENOANO = 55;
|
||||
SYS_EBADRQC = 56;
|
||||
SYS_EBADSLT = 57;
|
||||
SYS_EBFONT = 59;
|
||||
SYS_ENOSTR = 60;
|
||||
SYS_ENODATA = 61;
|
||||
SYS_ETIME = 62;
|
||||
SYS_ENOSR = 63;
|
||||
SYS_ENONET = 64;
|
||||
SYS_ENOPKG = 65;
|
||||
SYS_EREMOTE = 66;
|
||||
SYS_ENOLINK = 67;
|
||||
SYS_EADV = 68;
|
||||
SYS_ESRMNT = 69;
|
||||
SYS_ECOMM = 70;
|
||||
SYS_EPROTO = 71;
|
||||
SYS_EMULTIHOP = 72;
|
||||
SYS_EDOTDOT = 73;
|
||||
SYS_EBADMSG = 74;
|
||||
SYS_EOVERFLOW = 75;
|
||||
SYS_ENOTUNIQ = 76;
|
||||
SYS_EBADFD = 77;
|
||||
SYS_EREMCHG = 78;
|
||||
SYS_ELIBACC = 79;
|
||||
SYS_ELIBBAD = 80;
|
||||
SYS_ELIBSCN = 81;
|
||||
SYS_ELIBMAX = 82;
|
||||
SYS_ELIBEXEC = 83;
|
||||
SYS_EILSEQ = 84;
|
||||
SYS_ERESTART = 85;
|
||||
SYS_ESTRPIPE = 86;
|
||||
SYS_EUSERS = 87;
|
||||
SYS_ENOTSOCK = 88;
|
||||
SYS_EDESTADDRREQ = 89;
|
||||
SYS_EMSGSIZE = 90;
|
||||
SYS_EPROTOTYPE = 91;
|
||||
SYS_ENOPROTOOPT = 92;
|
||||
SYS_EPROTONOSUPPORT = 93;
|
||||
SYS_ESOCKTNOSUPPORT = 94;
|
||||
SYS_EOPNOTSUPP = 95;
|
||||
SYS_ENOTSUP = 95;
|
||||
SYS_EPFNOSUPPORT = 96;
|
||||
SYS_EAFNOSUPPORT = 97;
|
||||
SYS_EADDRINUSE = 98;
|
||||
SYS_EADDRNOTAVAIL = 99;
|
||||
SYS_ENETDOWN = 100;
|
||||
SYS_ENETUNREACH = 101;
|
||||
SYS_ENETRESET = 102;
|
||||
SYS_ECONNABORTED = 103;
|
||||
SYS_ECONNRESET = 104;
|
||||
SYS_ENOBUFS = 105;
|
||||
SYS_EISCONN = 106;
|
||||
SYS_ENOTCONN = 107;
|
||||
SYS_ESHUTDOWN = 108;
|
||||
SYS_ETOOMANYREFS = 109;
|
||||
SYS_ETIMEDOUT = 110;
|
||||
SYS_ECONNREFUSED = 111;
|
||||
SYS_EHOSTDOWN = 112;
|
||||
SYS_EHOSTUNREACH = 113;
|
||||
SYS_EALREADY = 114;
|
||||
SYS_EINPROGRESS = 115;
|
||||
SYS_ESTALE = 116;
|
||||
SYS_EUCLEAN = 117;
|
||||
SYS_ENOTNAM = 118;
|
||||
SYS_ENAVAIL = 119;
|
||||
SYS_EISNAM = 120;
|
||||
SYS_EREMOTEIO = 121;
|
||||
SYS_EDQUOT = 122;
|
||||
SYS_ENOMEDIUM = 123;
|
||||
SYS_EMEDIUMTYPE = 124;
|
||||
SYS_ECANCELED = 125;
|
||||
SYS_ENOKEY = 126;
|
||||
SYS_EKEYEXPIRED = 127;
|
||||
SYS_EKEYREVOKED = 128;
|
||||
SYS_EKEYREJECTED = 129;
|
||||
SYS_EOWNERDEAD = 130;
|
||||
SYS_ENOTRECOVERABLE = 131;
|
||||
SYS_ERFKILL = 132;
|
||||
}
|
||||
|
||||
optional int32 system_error = 1 [default=0];
|
||||
optional string error_detail = 2;
|
||||
}
|
||||
|
||||
message AddressPort {
|
||||
required int32 port = 1;
|
||||
optional bytes packed_address = 2;
|
||||
|
||||
optional string hostname_hint = 3;
|
||||
}
|
||||
|
||||
|
||||
|
||||
message CreateSocketRequest {
|
||||
enum SocketFamily {
|
||||
IPv4 = 1;
|
||||
IPv6 = 2;
|
||||
}
|
||||
|
||||
enum SocketProtocol {
|
||||
TCP = 1;
|
||||
UDP = 2;
|
||||
}
|
||||
|
||||
required SocketFamily family = 1;
|
||||
required SocketProtocol protocol = 2;
|
||||
|
||||
repeated SocketOption socket_options = 3;
|
||||
|
||||
optional AddressPort proxy_external_ip = 4;
|
||||
|
||||
optional int32 listen_backlog = 5 [default=0];
|
||||
|
||||
optional AddressPort remote_ip = 6;
|
||||
|
||||
optional string app_id = 9;
|
||||
|
||||
optional int64 project_id = 10;
|
||||
}
|
||||
|
||||
message CreateSocketReply {
|
||||
optional string socket_descriptor = 1;
|
||||
|
||||
optional AddressPort server_address = 3;
|
||||
|
||||
optional AddressPort proxy_external_ip = 4;
|
||||
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
|
||||
|
||||
message BindRequest {
|
||||
required string socket_descriptor = 1;
|
||||
required AddressPort proxy_external_ip = 2;
|
||||
}
|
||||
|
||||
message BindReply {
|
||||
optional AddressPort proxy_external_ip = 1;
|
||||
}
|
||||
|
||||
|
||||
|
||||
message GetSocketNameRequest {
|
||||
required string socket_descriptor = 1;
|
||||
}
|
||||
|
||||
message GetSocketNameReply {
|
||||
optional AddressPort proxy_external_ip = 2;
|
||||
}
|
||||
|
||||
|
||||
|
||||
message GetPeerNameRequest {
|
||||
required string socket_descriptor = 1;
|
||||
}
|
||||
|
||||
message GetPeerNameReply {
|
||||
optional AddressPort peer_ip = 2;
|
||||
}
|
||||
|
||||
|
||||
message SocketOption {
|
||||
|
||||
enum SocketOptionLevel {
|
||||
SOCKET_SOL_IP = 0;
|
||||
SOCKET_SOL_SOCKET = 1;
|
||||
SOCKET_SOL_TCP = 6;
|
||||
SOCKET_SOL_UDP = 17;
|
||||
}
|
||||
|
||||
enum SocketOptionName {
|
||||
option allow_alias = true;
|
||||
|
||||
SOCKET_SO_DEBUG = 1;
|
||||
SOCKET_SO_REUSEADDR = 2;
|
||||
SOCKET_SO_TYPE = 3;
|
||||
SOCKET_SO_ERROR = 4;
|
||||
SOCKET_SO_DONTROUTE = 5;
|
||||
SOCKET_SO_BROADCAST = 6;
|
||||
SOCKET_SO_SNDBUF = 7;
|
||||
SOCKET_SO_RCVBUF = 8;
|
||||
SOCKET_SO_KEEPALIVE = 9;
|
||||
SOCKET_SO_OOBINLINE = 10;
|
||||
SOCKET_SO_LINGER = 13;
|
||||
SOCKET_SO_RCVTIMEO = 20;
|
||||
SOCKET_SO_SNDTIMEO = 21;
|
||||
|
||||
SOCKET_IP_TOS = 1;
|
||||
SOCKET_IP_TTL = 2;
|
||||
SOCKET_IP_HDRINCL = 3;
|
||||
SOCKET_IP_OPTIONS = 4;
|
||||
|
||||
SOCKET_TCP_NODELAY = 1;
|
||||
SOCKET_TCP_MAXSEG = 2;
|
||||
SOCKET_TCP_CORK = 3;
|
||||
SOCKET_TCP_KEEPIDLE = 4;
|
||||
SOCKET_TCP_KEEPINTVL = 5;
|
||||
SOCKET_TCP_KEEPCNT = 6;
|
||||
SOCKET_TCP_SYNCNT = 7;
|
||||
SOCKET_TCP_LINGER2 = 8;
|
||||
SOCKET_TCP_DEFER_ACCEPT = 9;
|
||||
SOCKET_TCP_WINDOW_CLAMP = 10;
|
||||
SOCKET_TCP_INFO = 11;
|
||||
SOCKET_TCP_QUICKACK = 12;
|
||||
}
|
||||
|
||||
required SocketOptionLevel level = 1;
|
||||
required SocketOptionName option = 2;
|
||||
required bytes value = 3;
|
||||
}
|
||||
|
||||
|
||||
message SetSocketOptionsRequest {
|
||||
required string socket_descriptor = 1;
|
||||
repeated SocketOption options = 2;
|
||||
}
|
||||
|
||||
message SetSocketOptionsReply {
|
||||
}
|
||||
|
||||
message GetSocketOptionsRequest {
|
||||
required string socket_descriptor = 1;
|
||||
repeated SocketOption options = 2;
|
||||
}
|
||||
|
||||
message GetSocketOptionsReply {
|
||||
repeated SocketOption options = 2;
|
||||
}
|
||||
|
||||
|
||||
message ConnectRequest {
|
||||
required string socket_descriptor = 1;
|
||||
required AddressPort remote_ip = 2;
|
||||
optional double timeout_seconds = 3 [default=-1];
|
||||
}
|
||||
|
||||
message ConnectReply {
|
||||
optional AddressPort proxy_external_ip = 1;
|
||||
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
|
||||
message ListenRequest {
|
||||
required string socket_descriptor = 1;
|
||||
required int32 backlog = 2;
|
||||
}
|
||||
|
||||
message ListenReply {
|
||||
}
|
||||
|
||||
|
||||
message AcceptRequest {
|
||||
required string socket_descriptor = 1;
|
||||
optional double timeout_seconds = 2 [default=-1];
|
||||
}
|
||||
|
||||
message AcceptReply {
|
||||
optional bytes new_socket_descriptor = 2;
|
||||
optional AddressPort remote_address = 3;
|
||||
}
|
||||
|
||||
|
||||
|
||||
message ShutDownRequest {
|
||||
enum How {
|
||||
SOCKET_SHUT_RD = 1;
|
||||
SOCKET_SHUT_WR = 2;
|
||||
SOCKET_SHUT_RDWR = 3;
|
||||
}
|
||||
required string socket_descriptor = 1;
|
||||
required How how = 2;
|
||||
required int64 send_offset = 3;
|
||||
}
|
||||
|
||||
message ShutDownReply {
|
||||
}
|
||||
|
||||
|
||||
|
||||
message CloseRequest {
|
||||
required string socket_descriptor = 1;
|
||||
optional int64 send_offset = 2 [default=-1];
|
||||
}
|
||||
|
||||
message CloseReply {
|
||||
}
|
||||
|
||||
|
||||
|
||||
message SendRequest {
|
||||
required string socket_descriptor = 1;
|
||||
required bytes data = 2 [ctype=CORD];
|
||||
required int64 stream_offset = 3;
|
||||
optional int32 flags = 4 [default=0];
|
||||
optional AddressPort send_to = 5;
|
||||
optional double timeout_seconds = 6 [default=-1];
|
||||
}
|
||||
|
||||
message SendReply {
|
||||
optional int32 data_sent = 1;
|
||||
}
|
||||
|
||||
|
||||
message ReceiveRequest {
|
||||
enum Flags {
|
||||
MSG_OOB = 1;
|
||||
MSG_PEEK = 2;
|
||||
}
|
||||
required string socket_descriptor = 1;
|
||||
required int32 data_size = 2;
|
||||
optional int32 flags = 3 [default=0];
|
||||
optional double timeout_seconds = 5 [default=-1];
|
||||
}
|
||||
|
||||
message ReceiveReply {
|
||||
optional int64 stream_offset = 2;
|
||||
optional bytes data = 3 [ctype=CORD];
|
||||
optional AddressPort received_from = 4;
|
||||
optional int32 buffer_size = 5;
|
||||
}
|
||||
|
||||
|
||||
|
||||
message PollEvent {
|
||||
|
||||
enum PollEventFlag {
|
||||
SOCKET_POLLNONE = 0;
|
||||
SOCKET_POLLIN = 1;
|
||||
SOCKET_POLLPRI = 2;
|
||||
SOCKET_POLLOUT = 4;
|
||||
SOCKET_POLLERR = 8;
|
||||
SOCKET_POLLHUP = 16;
|
||||
SOCKET_POLLNVAL = 32;
|
||||
SOCKET_POLLRDNORM = 64;
|
||||
SOCKET_POLLRDBAND = 128;
|
||||
SOCKET_POLLWRNORM = 256;
|
||||
SOCKET_POLLWRBAND = 512;
|
||||
SOCKET_POLLMSG = 1024;
|
||||
SOCKET_POLLREMOVE = 4096;
|
||||
SOCKET_POLLRDHUP = 8192;
|
||||
};
|
||||
|
||||
required string socket_descriptor = 1;
|
||||
required int32 requested_events = 2;
|
||||
required int32 observed_events = 3;
|
||||
}
|
||||
|
||||
message PollRequest {
|
||||
repeated PollEvent events = 1;
|
||||
optional double timeout_seconds = 2 [default=-1];
|
||||
}
|
||||
|
||||
message PollReply {
|
||||
repeated PollEvent events = 2;
|
||||
}
|
||||
|
||||
message ResolveRequest {
|
||||
required string name = 1;
|
||||
repeated CreateSocketRequest.SocketFamily address_families = 2;
|
||||
}
|
||||
|
||||
message ResolveReply {
|
||||
enum ErrorCode {
|
||||
SOCKET_EAI_ADDRFAMILY = 1;
|
||||
SOCKET_EAI_AGAIN = 2;
|
||||
SOCKET_EAI_BADFLAGS = 3;
|
||||
SOCKET_EAI_FAIL = 4;
|
||||
SOCKET_EAI_FAMILY = 5;
|
||||
SOCKET_EAI_MEMORY = 6;
|
||||
SOCKET_EAI_NODATA = 7;
|
||||
SOCKET_EAI_NONAME = 8;
|
||||
SOCKET_EAI_SERVICE = 9;
|
||||
SOCKET_EAI_SOCKTYPE = 10;
|
||||
SOCKET_EAI_SYSTEM = 11;
|
||||
SOCKET_EAI_BADHINTS = 12;
|
||||
SOCKET_EAI_PROTOCOL = 13;
|
||||
SOCKET_EAI_OVERFLOW = 14;
|
||||
SOCKET_EAI_MAX = 15;
|
||||
};
|
||||
|
||||
repeated bytes packed_address = 2;
|
||||
optional string canonical_name = 3;
|
||||
repeated string aliases = 4;
|
||||
}
|
10
vendor/google.golang.org/appengine/socket/doc.go
generated
vendored
Normal file
10
vendor/google.golang.org/appengine/socket/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
// Copyright 2012 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package socket provides outbound network sockets.
|
||||
//
|
||||
// This package is only required in the classic App Engine environment.
|
||||
// Applications running only in App Engine "flexible environment" should
|
||||
// use the standard library's net package.
|
||||
package socket
|
290
vendor/google.golang.org/appengine/socket/socket_classic.go
generated
vendored
Normal file
290
vendor/google.golang.org/appengine/socket/socket_classic.go
generated
vendored
Normal file
|
@ -0,0 +1,290 @@
|
|||
// Copyright 2012 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build appengine
|
||||
|
||||
package socket
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/appengine/internal"
|
||||
|
||||
pb "google.golang.org/appengine/internal/socket"
|
||||
)
|
||||
|
||||
// Dial connects to the address addr on the network protocol.
|
||||
// The address format is host:port, where host may be a hostname or an IP address.
|
||||
// Known protocols are "tcp" and "udp".
|
||||
// The returned connection satisfies net.Conn, and is valid while ctx is valid;
|
||||
// if the connection is to be used after ctx becomes invalid, invoke SetContext
|
||||
// with the new context.
|
||||
func Dial(ctx context.Context, protocol, addr string) (*Conn, error) {
|
||||
return DialTimeout(ctx, protocol, addr, 0)
|
||||
}
|
||||
|
||||
var ipFamilies = []pb.CreateSocketRequest_SocketFamily{
|
||||
pb.CreateSocketRequest_IPv4,
|
||||
pb.CreateSocketRequest_IPv6,
|
||||
}
|
||||
|
||||
// DialTimeout is like Dial but takes a timeout.
|
||||
// The timeout includes name resolution, if required.
|
||||
func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) {
|
||||
dialCtx := ctx // Used for dialing and name resolution, but not stored in the *Conn.
|
||||
if timeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
dialCtx, cancel = context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
host, portStr, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("socket: bad port %q: %v", portStr, err)
|
||||
}
|
||||
|
||||
var prot pb.CreateSocketRequest_SocketProtocol
|
||||
switch protocol {
|
||||
case "tcp":
|
||||
prot = pb.CreateSocketRequest_TCP
|
||||
case "udp":
|
||||
prot = pb.CreateSocketRequest_UDP
|
||||
default:
|
||||
return nil, fmt.Errorf("socket: unknown protocol %q", protocol)
|
||||
}
|
||||
|
||||
packedAddrs, resolved, err := resolve(dialCtx, ipFamilies, host)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err)
|
||||
}
|
||||
if len(packedAddrs) == 0 {
|
||||
return nil, fmt.Errorf("no addresses for %q", host)
|
||||
}
|
||||
|
||||
packedAddr := packedAddrs[0] // use first address
|
||||
fam := pb.CreateSocketRequest_IPv4
|
||||
if len(packedAddr) == net.IPv6len {
|
||||
fam = pb.CreateSocketRequest_IPv6
|
||||
}
|
||||
|
||||
req := &pb.CreateSocketRequest{
|
||||
Family: fam.Enum(),
|
||||
Protocol: prot.Enum(),
|
||||
RemoteIp: &pb.AddressPort{
|
||||
Port: proto.Int32(int32(port)),
|
||||
PackedAddress: packedAddr,
|
||||
},
|
||||
}
|
||||
if resolved {
|
||||
req.RemoteIp.HostnameHint = &host
|
||||
}
|
||||
res := &pb.CreateSocketReply{}
|
||||
if err := internal.Call(dialCtx, "remote_socket", "CreateSocket", req, res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Conn{
|
||||
ctx: ctx,
|
||||
desc: res.GetSocketDescriptor(),
|
||||
prot: prot,
|
||||
local: res.ProxyExternalIp,
|
||||
remote: req.RemoteIp,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// LookupIP returns the given host's IP addresses.
|
||||
func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) {
|
||||
packedAddrs, _, err := resolve(ctx, ipFamilies, host)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err)
|
||||
}
|
||||
addrs = make([]net.IP, len(packedAddrs))
|
||||
for i, pa := range packedAddrs {
|
||||
addrs[i] = net.IP(pa)
|
||||
}
|
||||
return addrs, nil
|
||||
}
|
||||
|
||||
func resolve(ctx context.Context, fams []pb.CreateSocketRequest_SocketFamily, host string) ([][]byte, bool, error) {
|
||||
// Check if it's an IP address.
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
if ip := ip.To4(); ip != nil {
|
||||
return [][]byte{ip}, false, nil
|
||||
}
|
||||
return [][]byte{ip}, false, nil
|
||||
}
|
||||
|
||||
req := &pb.ResolveRequest{
|
||||
Name: &host,
|
||||
AddressFamilies: fams,
|
||||
}
|
||||
res := &pb.ResolveReply{}
|
||||
if err := internal.Call(ctx, "remote_socket", "Resolve", req, res); err != nil {
|
||||
// XXX: need to map to pb.ResolveReply_ErrorCode?
|
||||
return nil, false, err
|
||||
}
|
||||
return res.PackedAddress, true, nil
|
||||
}
|
||||
|
||||
// withDeadline is like context.WithDeadline, except it ignores the zero deadline.
|
||||
func withDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) {
|
||||
if deadline.IsZero() {
|
||||
return parent, func() {}
|
||||
}
|
||||
return context.WithDeadline(parent, deadline)
|
||||
}
|
||||
|
||||
// Conn represents a socket connection.
|
||||
// It implements net.Conn.
|
||||
type Conn struct {
|
||||
ctx context.Context
|
||||
desc string
|
||||
offset int64
|
||||
|
||||
prot pb.CreateSocketRequest_SocketProtocol
|
||||
local, remote *pb.AddressPort
|
||||
|
||||
readDeadline, writeDeadline time.Time // optional
|
||||
}
|
||||
|
||||
// SetContext sets the context that is used by this Conn.
|
||||
// It is usually used only when using a Conn that was created in a different context,
|
||||
// such as when a connection is created during a warmup request but used while
|
||||
// servicing a user request.
|
||||
func (cn *Conn) SetContext(ctx context.Context) {
|
||||
cn.ctx = ctx
|
||||
}
|
||||
|
||||
func (cn *Conn) Read(b []byte) (n int, err error) {
|
||||
const maxRead = 1 << 20
|
||||
if len(b) > maxRead {
|
||||
b = b[:maxRead]
|
||||
}
|
||||
|
||||
req := &pb.ReceiveRequest{
|
||||
SocketDescriptor: &cn.desc,
|
||||
DataSize: proto.Int32(int32(len(b))),
|
||||
}
|
||||
res := &pb.ReceiveReply{}
|
||||
if !cn.readDeadline.IsZero() {
|
||||
req.TimeoutSeconds = proto.Float64(cn.readDeadline.Sub(time.Now()).Seconds())
|
||||
}
|
||||
ctx, cancel := withDeadline(cn.ctx, cn.readDeadline)
|
||||
defer cancel()
|
||||
if err := internal.Call(ctx, "remote_socket", "Receive", req, res); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if len(res.Data) == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
if len(res.Data) > len(b) {
|
||||
return 0, fmt.Errorf("socket: internal error: read too much data: %d > %d", len(res.Data), len(b))
|
||||
}
|
||||
return copy(b, res.Data), nil
|
||||
}
|
||||
|
||||
func (cn *Conn) Write(b []byte) (n int, err error) {
|
||||
const lim = 1 << 20 // max per chunk
|
||||
|
||||
for n < len(b) {
|
||||
chunk := b[n:]
|
||||
if len(chunk) > lim {
|
||||
chunk = chunk[:lim]
|
||||
}
|
||||
|
||||
req := &pb.SendRequest{
|
||||
SocketDescriptor: &cn.desc,
|
||||
Data: chunk,
|
||||
StreamOffset: &cn.offset,
|
||||
}
|
||||
res := &pb.SendReply{}
|
||||
if !cn.writeDeadline.IsZero() {
|
||||
req.TimeoutSeconds = proto.Float64(cn.writeDeadline.Sub(time.Now()).Seconds())
|
||||
}
|
||||
ctx, cancel := withDeadline(cn.ctx, cn.writeDeadline)
|
||||
defer cancel()
|
||||
if err = internal.Call(ctx, "remote_socket", "Send", req, res); err != nil {
|
||||
// assume zero bytes were sent in this RPC
|
||||
break
|
||||
}
|
||||
n += int(res.GetDataSent())
|
||||
cn.offset += int64(res.GetDataSent())
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (cn *Conn) Close() error {
|
||||
req := &pb.CloseRequest{
|
||||
SocketDescriptor: &cn.desc,
|
||||
}
|
||||
res := &pb.CloseReply{}
|
||||
if err := internal.Call(cn.ctx, "remote_socket", "Close", req, res); err != nil {
|
||||
return err
|
||||
}
|
||||
cn.desc = "CLOSED"
|
||||
return nil
|
||||
}
|
||||
|
||||
func addr(prot pb.CreateSocketRequest_SocketProtocol, ap *pb.AddressPort) net.Addr {
|
||||
if ap == nil {
|
||||
return nil
|
||||
}
|
||||
switch prot {
|
||||
case pb.CreateSocketRequest_TCP:
|
||||
return &net.TCPAddr{
|
||||
IP: net.IP(ap.PackedAddress),
|
||||
Port: int(*ap.Port),
|
||||
}
|
||||
case pb.CreateSocketRequest_UDP:
|
||||
return &net.UDPAddr{
|
||||
IP: net.IP(ap.PackedAddress),
|
||||
Port: int(*ap.Port),
|
||||
}
|
||||
}
|
||||
panic("unknown protocol " + prot.String())
|
||||
}
|
||||
|
||||
func (cn *Conn) LocalAddr() net.Addr { return addr(cn.prot, cn.local) }
|
||||
func (cn *Conn) RemoteAddr() net.Addr { return addr(cn.prot, cn.remote) }
|
||||
|
||||
func (cn *Conn) SetDeadline(t time.Time) error {
|
||||
cn.readDeadline = t
|
||||
cn.writeDeadline = t
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cn *Conn) SetReadDeadline(t time.Time) error {
|
||||
cn.readDeadline = t
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cn *Conn) SetWriteDeadline(t time.Time) error {
|
||||
cn.writeDeadline = t
|
||||
return nil
|
||||
}
|
||||
|
||||
// KeepAlive signals that the connection is still in use.
|
||||
// It may be called to prevent the socket being closed due to inactivity.
|
||||
func (cn *Conn) KeepAlive() error {
|
||||
req := &pb.GetSocketNameRequest{
|
||||
SocketDescriptor: &cn.desc,
|
||||
}
|
||||
res := &pb.GetSocketNameReply{}
|
||||
return internal.Call(cn.ctx, "remote_socket", "GetSocketName", req, res)
|
||||
}
|
||||
|
||||
func init() {
|
||||
internal.RegisterErrorCodeMap("remote_socket", pb.RemoteSocketServiceError_ErrorCode_name)
|
||||
}
|
64
vendor/google.golang.org/appengine/socket/socket_vm.go
generated
vendored
Normal file
64
vendor/google.golang.org/appengine/socket/socket_vm.go
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
|||
// Copyright 2015 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
|
||||
package socket
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Dial connects to the address addr on the network protocol.
|
||||
// The address format is host:port, where host may be a hostname or an IP address.
|
||||
// Known protocols are "tcp" and "udp".
|
||||
// The returned connection satisfies net.Conn, and is valid while ctx is valid;
|
||||
// if the connection is to be used after ctx becomes invalid, invoke SetContext
|
||||
// with the new context.
|
||||
func Dial(ctx context.Context, protocol, addr string) (*Conn, error) {
|
||||
conn, err := net.Dial(protocol, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Conn{conn}, nil
|
||||
}
|
||||
|
||||
// DialTimeout is like Dial but takes a timeout.
|
||||
// The timeout includes name resolution, if required.
|
||||
func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) {
|
||||
conn, err := net.DialTimeout(protocol, addr, timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Conn{conn}, nil
|
||||
}
|
||||
|
||||
// LookupIP returns the given host's IP addresses.
|
||||
func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) {
|
||||
return net.LookupIP(host)
|
||||
}
|
||||
|
||||
// Conn represents a socket connection.
|
||||
// It implements net.Conn.
|
||||
type Conn struct {
|
||||
net.Conn
|
||||
}
|
||||
|
||||
// SetContext sets the context that is used by this Conn.
|
||||
// It is usually used only when using a Conn that was created in a different context,
|
||||
// such as when a connection is created during a warmup request but used while
|
||||
// servicing a user request.
|
||||
func (cn *Conn) SetContext(ctx context.Context) {
|
||||
// This function is not required in App Engine "flexible environment".
|
||||
}
|
||||
|
||||
// KeepAlive signals that the connection is still in use.
|
||||
// It may be called to prevent the socket being closed due to inactivity.
|
||||
func (cn *Conn) KeepAlive() error {
|
||||
// This function is not required in App Engine "flexible environment".
|
||||
return nil
|
||||
}
|
5175
vendor/google.golang.org/genproto/googleapis/storage/v2/storage.pb.go
generated
vendored
Normal file
5175
vendor/google.golang.org/genproto/googleapis/storage/v2/storage.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
200
vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go
generated
vendored
Normal file
200
vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,200 @@
|
|||
// Copyright 2021 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc v3.12.2
|
||||
// source: google/type/date.proto
|
||||
|
||||
package date
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// Represents a whole or partial calendar date, such as a birthday. The time of
|
||||
// day and time zone are either specified elsewhere or are insignificant. The
|
||||
// date is relative to the Gregorian Calendar. This can represent one of the
|
||||
// following:
|
||||
//
|
||||
// * A full date, with non-zero year, month, and day values
|
||||
// * A month and day value, with a zero year, such as an anniversary
|
||||
// * A year on its own, with zero month and day values
|
||||
// * A year and month value, with a zero day, such as a credit card expiration
|
||||
// date
|
||||
//
|
||||
// Related types are [google.type.TimeOfDay][google.type.TimeOfDay] and
|
||||
// `google.protobuf.Timestamp`.
|
||||
type Date struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Year of the date. Must be from 1 to 9999, or 0 to specify a date without
|
||||
// a year.
|
||||
Year int32 `protobuf:"varint,1,opt,name=year,proto3" json:"year,omitempty"`
|
||||
// Month of a year. Must be from 1 to 12, or 0 to specify a year without a
|
||||
// month and day.
|
||||
Month int32 `protobuf:"varint,2,opt,name=month,proto3" json:"month,omitempty"`
|
||||
// Day of a month. Must be from 1 to 31 and valid for the year and month, or 0
|
||||
// to specify a year by itself or a year and month where the day isn't
|
||||
// significant.
|
||||
Day int32 `protobuf:"varint,3,opt,name=day,proto3" json:"day,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Date) Reset() {
|
||||
*x = Date{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_type_date_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Date) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Date) ProtoMessage() {}
|
||||
|
||||
func (x *Date) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_type_date_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Date.ProtoReflect.Descriptor instead.
|
||||
func (*Date) Descriptor() ([]byte, []int) {
|
||||
return file_google_type_date_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Date) GetYear() int32 {
|
||||
if x != nil {
|
||||
return x.Year
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Date) GetMonth() int32 {
|
||||
if x != nil {
|
||||
return x.Month
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Date) GetDay() int32 {
|
||||
if x != nil {
|
||||
return x.Day
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
var File_google_type_date_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_google_type_date_proto_rawDesc = []byte{
|
||||
0x0a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61,
|
||||
0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2e, 0x74, 0x79, 0x70, 0x65, 0x22, 0x42, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a,
|
||||
0x04, 0x79, 0x65, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x79, 0x65, 0x61,
|
||||
0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x6f, 0x6e, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05,
|
||||
0x52, 0x05, 0x6d, 0x6f, 0x6e, 0x74, 0x68, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x61, 0x79, 0x18, 0x03,
|
||||
0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x64, 0x61, 0x79, 0x42, 0x5d, 0x0a, 0x0f, 0x63, 0x6f, 0x6d,
|
||||
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x42, 0x09, 0x44, 0x61,
|
||||
0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
|
||||
0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61, 0x74, 0x65, 0x3b, 0x64, 0x61, 0x74, 0x65, 0xf8,
|
||||
0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x54, 0x50, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_google_type_date_proto_rawDescOnce sync.Once
|
||||
file_google_type_date_proto_rawDescData = file_google_type_date_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_google_type_date_proto_rawDescGZIP() []byte {
|
||||
file_google_type_date_proto_rawDescOnce.Do(func() {
|
||||
file_google_type_date_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_type_date_proto_rawDescData)
|
||||
})
|
||||
return file_google_type_date_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_google_type_date_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||
var file_google_type_date_proto_goTypes = []interface{}{
|
||||
(*Date)(nil), // 0: google.type.Date
|
||||
}
|
||||
var file_google_type_date_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_google_type_date_proto_init() }
|
||||
func file_google_type_date_proto_init() {
|
||||
if File_google_type_date_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_google_type_date_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Date); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_google_type_date_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 1,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_google_type_date_proto_goTypes,
|
||||
DependencyIndexes: file_google_type_date_proto_depIdxs,
|
||||
MessageInfos: file_google_type_date_proto_msgTypes,
|
||||
}.Build()
|
||||
File_google_type_date_proto = out.File
|
||||
file_google_type_date_proto_rawDesc = nil
|
||||
file_google_type_date_proto_goTypes = nil
|
||||
file_google_type_date_proto_depIdxs = nil
|
||||
}
|
960
vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go
generated
vendored
Normal file
960
vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,960 @@
|
|||
// Copyright 2015 The gRPC Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This file defines the GRPCLB LoadBalancing protocol.
|
||||
//
|
||||
// The canonical version of this proto can be found at
|
||||
// https://github.com/grpc/grpc-proto/blob/master/grpc/lb/v1/load_balancer.proto
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.25.0
|
||||
// protoc v3.14.0
|
||||
// source: grpc/lb/v1/load_balancer.proto
|
||||
|
||||
package grpc_lb_v1
|
||||
|
||||
import (
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
durationpb "google.golang.org/protobuf/types/known/durationpb"
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// This is a compile-time assertion that a sufficiently up-to-date version
|
||||
// of the legacy proto package is being used.
|
||||
const _ = proto.ProtoPackageIsVersion4
|
||||
|
||||
type LoadBalanceRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Types that are assignable to LoadBalanceRequestType:
|
||||
// *LoadBalanceRequest_InitialRequest
|
||||
// *LoadBalanceRequest_ClientStats
|
||||
LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"`
|
||||
}
|
||||
|
||||
func (x *LoadBalanceRequest) Reset() {
|
||||
*x = LoadBalanceRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *LoadBalanceRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*LoadBalanceRequest) ProtoMessage() {}
|
||||
|
||||
func (x *LoadBalanceRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use LoadBalanceRequest.ProtoReflect.Descriptor instead.
|
||||
func (*LoadBalanceRequest) Descriptor() ([]byte, []int) {
|
||||
return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (m *LoadBalanceRequest) GetLoadBalanceRequestType() isLoadBalanceRequest_LoadBalanceRequestType {
|
||||
if m != nil {
|
||||
return m.LoadBalanceRequestType
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *LoadBalanceRequest) GetInitialRequest() *InitialLoadBalanceRequest {
|
||||
if x, ok := x.GetLoadBalanceRequestType().(*LoadBalanceRequest_InitialRequest); ok {
|
||||
return x.InitialRequest
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *LoadBalanceRequest) GetClientStats() *ClientStats {
|
||||
if x, ok := x.GetLoadBalanceRequestType().(*LoadBalanceRequest_ClientStats); ok {
|
||||
return x.ClientStats
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type isLoadBalanceRequest_LoadBalanceRequestType interface {
|
||||
isLoadBalanceRequest_LoadBalanceRequestType()
|
||||
}
|
||||
|
||||
type LoadBalanceRequest_InitialRequest struct {
|
||||
// This message should be sent on the first request to the load balancer.
|
||||
InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,json=initialRequest,proto3,oneof"`
|
||||
}
|
||||
|
||||
type LoadBalanceRequest_ClientStats struct {
|
||||
// The client stats should be periodically reported to the load balancer
|
||||
// based on the duration defined in the InitialLoadBalanceResponse.
|
||||
ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,json=clientStats,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*LoadBalanceRequest_InitialRequest) isLoadBalanceRequest_LoadBalanceRequestType() {}
|
||||
|
||||
func (*LoadBalanceRequest_ClientStats) isLoadBalanceRequest_LoadBalanceRequestType() {}
|
||||
|
||||
type InitialLoadBalanceRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The name of the load balanced service (e.g., service.googleapis.com). Its
|
||||
// length should be less than 256 bytes.
|
||||
// The name might include a port number. How to handle the port number is up
|
||||
// to the balancer.
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
}
|
||||
|
||||
func (x *InitialLoadBalanceRequest) Reset() {
|
||||
*x = InitialLoadBalanceRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *InitialLoadBalanceRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*InitialLoadBalanceRequest) ProtoMessage() {}
|
||||
|
||||
func (x *InitialLoadBalanceRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use InitialLoadBalanceRequest.ProtoReflect.Descriptor instead.
|
||||
func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) {
|
||||
return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *InitialLoadBalanceRequest) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Contains the number of calls finished for a particular load balance token.
|
||||
type ClientStatsPerToken struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// See Server.load_balance_token.
|
||||
LoadBalanceToken string `protobuf:"bytes,1,opt,name=load_balance_token,json=loadBalanceToken,proto3" json:"load_balance_token,omitempty"`
|
||||
// The total number of RPCs that finished associated with the token.
|
||||
NumCalls int64 `protobuf:"varint,2,opt,name=num_calls,json=numCalls,proto3" json:"num_calls,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ClientStatsPerToken) Reset() {
|
||||
*x = ClientStatsPerToken{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ClientStatsPerToken) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ClientStatsPerToken) ProtoMessage() {}
|
||||
|
||||
func (x *ClientStatsPerToken) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ClientStatsPerToken.ProtoReflect.Descriptor instead.
|
||||
func (*ClientStatsPerToken) Descriptor() ([]byte, []int) {
|
||||
return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *ClientStatsPerToken) GetLoadBalanceToken() string {
|
||||
if x != nil {
|
||||
return x.LoadBalanceToken
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ClientStatsPerToken) GetNumCalls() int64 {
|
||||
if x != nil {
|
||||
return x.NumCalls
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Contains client level statistics that are useful to load balancing. Each
|
||||
// count except the timestamp should be reset to zero after reporting the stats.
|
||||
type ClientStats struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The timestamp of generating the report.
|
||||
Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
// The total number of RPCs that started.
|
||||
NumCallsStarted int64 `protobuf:"varint,2,opt,name=num_calls_started,json=numCallsStarted,proto3" json:"num_calls_started,omitempty"`
|
||||
// The total number of RPCs that finished.
|
||||
NumCallsFinished int64 `protobuf:"varint,3,opt,name=num_calls_finished,json=numCallsFinished,proto3" json:"num_calls_finished,omitempty"`
|
||||
// The total number of RPCs that failed to reach a server except dropped RPCs.
|
||||
NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend,proto3" json:"num_calls_finished_with_client_failed_to_send,omitempty"`
|
||||
// The total number of RPCs that finished and are known to have been received
|
||||
// by a server.
|
||||
NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived,proto3" json:"num_calls_finished_known_received,omitempty"`
|
||||
// The list of dropped calls.
|
||||
CallsFinishedWithDrop []*ClientStatsPerToken `protobuf:"bytes,8,rep,name=calls_finished_with_drop,json=callsFinishedWithDrop,proto3" json:"calls_finished_with_drop,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ClientStats) Reset() {
|
||||
*x = ClientStats{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ClientStats) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ClientStats) ProtoMessage() {}
|
||||
|
||||
func (x *ClientStats) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ClientStats.ProtoReflect.Descriptor instead.
|
||||
func (*ClientStats) Descriptor() ([]byte, []int) {
|
||||
return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *ClientStats) GetTimestamp() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.Timestamp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ClientStats) GetNumCallsStarted() int64 {
|
||||
if x != nil {
|
||||
return x.NumCallsStarted
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *ClientStats) GetNumCallsFinished() int64 {
|
||||
if x != nil {
|
||||
return x.NumCallsFinished
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *ClientStats) GetNumCallsFinishedWithClientFailedToSend() int64 {
|
||||
if x != nil {
|
||||
return x.NumCallsFinishedWithClientFailedToSend
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *ClientStats) GetNumCallsFinishedKnownReceived() int64 {
|
||||
if x != nil {
|
||||
return x.NumCallsFinishedKnownReceived
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *ClientStats) GetCallsFinishedWithDrop() []*ClientStatsPerToken {
|
||||
if x != nil {
|
||||
return x.CallsFinishedWithDrop
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type LoadBalanceResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Types that are assignable to LoadBalanceResponseType:
|
||||
// *LoadBalanceResponse_InitialResponse
|
||||
// *LoadBalanceResponse_ServerList
|
||||
// *LoadBalanceResponse_FallbackResponse
|
||||
LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"`
|
||||
}
|
||||
|
||||
func (x *LoadBalanceResponse) Reset() {
|
||||
*x = LoadBalanceResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *LoadBalanceResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*LoadBalanceResponse) ProtoMessage() {}
|
||||
|
||||
func (x *LoadBalanceResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use LoadBalanceResponse.ProtoReflect.Descriptor instead.
|
||||
func (*LoadBalanceResponse) Descriptor() ([]byte, []int) {
|
||||
return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (m *LoadBalanceResponse) GetLoadBalanceResponseType() isLoadBalanceResponse_LoadBalanceResponseType {
|
||||
if m != nil {
|
||||
return m.LoadBalanceResponseType
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *LoadBalanceResponse) GetInitialResponse() *InitialLoadBalanceResponse {
|
||||
if x, ok := x.GetLoadBalanceResponseType().(*LoadBalanceResponse_InitialResponse); ok {
|
||||
return x.InitialResponse
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *LoadBalanceResponse) GetServerList() *ServerList {
|
||||
if x, ok := x.GetLoadBalanceResponseType().(*LoadBalanceResponse_ServerList); ok {
|
||||
return x.ServerList
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *LoadBalanceResponse) GetFallbackResponse() *FallbackResponse {
|
||||
if x, ok := x.GetLoadBalanceResponseType().(*LoadBalanceResponse_FallbackResponse); ok {
|
||||
return x.FallbackResponse
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type isLoadBalanceResponse_LoadBalanceResponseType interface {
|
||||
isLoadBalanceResponse_LoadBalanceResponseType()
|
||||
}
|
||||
|
||||
type LoadBalanceResponse_InitialResponse struct {
|
||||
// This message should be sent on the first response to the client.
|
||||
InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,json=initialResponse,proto3,oneof"`
|
||||
}
|
||||
|
||||
type LoadBalanceResponse_ServerList struct {
|
||||
// Contains the list of servers selected by the load balancer. The client
|
||||
// should send requests to these servers in the specified order.
|
||||
ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,json=serverList,proto3,oneof"`
|
||||
}
|
||||
|
||||
type LoadBalanceResponse_FallbackResponse struct {
|
||||
// If this field is set, then the client should eagerly enter fallback
|
||||
// mode (even if there are existing, healthy connections to backends).
|
||||
FallbackResponse *FallbackResponse `protobuf:"bytes,3,opt,name=fallback_response,json=fallbackResponse,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*LoadBalanceResponse_InitialResponse) isLoadBalanceResponse_LoadBalanceResponseType() {}
|
||||
|
||||
func (*LoadBalanceResponse_ServerList) isLoadBalanceResponse_LoadBalanceResponseType() {}
|
||||
|
||||
func (*LoadBalanceResponse_FallbackResponse) isLoadBalanceResponse_LoadBalanceResponseType() {}
|
||||
|
||||
type FallbackResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *FallbackResponse) Reset() {
|
||||
*x = FallbackResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *FallbackResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*FallbackResponse) ProtoMessage() {}
|
||||
|
||||
func (x *FallbackResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[5]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use FallbackResponse.ProtoReflect.Descriptor instead.
|
||||
func (*FallbackResponse) Descriptor() ([]byte, []int) {
|
||||
return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
type InitialLoadBalanceResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// This interval defines how often the client should send the client stats
|
||||
// to the load balancer. Stats should only be reported when the duration is
|
||||
// positive.
|
||||
ClientStatsReportInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval,proto3" json:"client_stats_report_interval,omitempty"`
|
||||
}
|
||||
|
||||
func (x *InitialLoadBalanceResponse) Reset() {
|
||||
*x = InitialLoadBalanceResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[6]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *InitialLoadBalanceResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*InitialLoadBalanceResponse) ProtoMessage() {}
|
||||
|
||||
func (x *InitialLoadBalanceResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[6]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use InitialLoadBalanceResponse.ProtoReflect.Descriptor instead.
|
||||
func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) {
|
||||
return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{6}
|
||||
}
|
||||
|
||||
func (x *InitialLoadBalanceResponse) GetClientStatsReportInterval() *durationpb.Duration {
|
||||
if x != nil {
|
||||
return x.ClientStatsReportInterval
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ServerList struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Contains a list of servers selected by the load balancer. The list will
|
||||
// be updated when server resolutions change or as needed to balance load
|
||||
// across more servers. The client should consume the server list in order
|
||||
// unless instructed otherwise via the client_config.
|
||||
Servers []*Server `protobuf:"bytes,1,rep,name=servers,proto3" json:"servers,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ServerList) Reset() {
|
||||
*x = ServerList{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[7]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ServerList) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ServerList) ProtoMessage() {}
|
||||
|
||||
func (x *ServerList) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[7]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ServerList.ProtoReflect.Descriptor instead.
|
||||
func (*ServerList) Descriptor() ([]byte, []int) {
|
||||
return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{7}
|
||||
}
|
||||
|
||||
func (x *ServerList) GetServers() []*Server {
|
||||
if x != nil {
|
||||
return x.Servers
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Contains server information. When the drop field is not true, use the other
|
||||
// fields.
|
||||
type Server struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// A resolved address for the server, serialized in network-byte-order. It may
|
||||
// either be an IPv4 or IPv6 address.
|
||||
IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"`
|
||||
// A resolved port number for the server.
|
||||
Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"`
|
||||
// An opaque but printable token for load reporting. The client must include
|
||||
// the token of the picked server into the initial metadata when it starts a
|
||||
// call to that server. The token is used by the server to verify the request
|
||||
// and to allow the server to report load to the gRPC LB system. The token is
|
||||
// also used in client stats for reporting dropped calls.
|
||||
//
|
||||
// Its length can be variable but must be less than 50 bytes.
|
||||
LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token,json=loadBalanceToken,proto3" json:"load_balance_token,omitempty"`
|
||||
// Indicates whether this particular request should be dropped by the client.
|
||||
// If the request is dropped, there will be a corresponding entry in
|
||||
// ClientStats.calls_finished_with_drop.
|
||||
Drop bool `protobuf:"varint,4,opt,name=drop,proto3" json:"drop,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Server) Reset() {
|
||||
*x = Server{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[8]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Server) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Server) ProtoMessage() {}
|
||||
|
||||
func (x *Server) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[8]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Server.ProtoReflect.Descriptor instead.
|
||||
func (*Server) Descriptor() ([]byte, []int) {
|
||||
return file_grpc_lb_v1_load_balancer_proto_rawDescGZIP(), []int{8}
|
||||
}
|
||||
|
||||
func (x *Server) GetIpAddress() []byte {
|
||||
if x != nil {
|
||||
return x.IpAddress
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Server) GetPort() int32 {
|
||||
if x != nil {
|
||||
return x.Port
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Server) GetLoadBalanceToken() string {
|
||||
if x != nil {
|
||||
return x.LoadBalanceToken
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Server) GetDrop() bool {
|
||||
if x != nil {
|
||||
return x.Drop
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var File_grpc_lb_v1_load_balancer_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_grpc_lb_v1_load_balancer_proto_rawDesc = []byte{
|
||||
0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x62, 0x2f, 0x76, 0x31, 0x2f, 0x6c, 0x6f, 0x61,
|
||||
0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x12, 0x0a, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75,
|
||||
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69,
|
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc1, 0x01,
|
||||
0x0a, 0x12, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f,
|
||||
0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e,
|
||||
0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69,
|
||||
0x61, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0e, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x52,
|
||||
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
|
||||
0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67,
|
||||
0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
|
||||
0x53, 0x74, 0x61, 0x74, 0x73, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53,
|
||||
0x74, 0x61, 0x74, 0x73, 0x42, 0x1b, 0x0a, 0x19, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c,
|
||||
0x61, 0x6e, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70,
|
||||
0x65, 0x22, 0x2f, 0x0a, 0x19, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4c, 0x6f, 0x61, 0x64,
|
||||
0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12,
|
||||
0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
|
||||
0x6d, 0x65, 0x22, 0x60, 0x0a, 0x13, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74,
|
||||
0x73, 0x50, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x6f, 0x61,
|
||||
0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e,
|
||||
0x63, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x75, 0x6d, 0x5f, 0x63,
|
||||
0x61, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6e, 0x75, 0x6d, 0x43,
|
||||
0x61, 0x6c, 0x6c, 0x73, 0x22, 0xb0, 0x03, 0x0a, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53,
|
||||
0x74, 0x61, 0x74, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
|
||||
0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
|
||||
0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2a,
|
||||
0x0a, 0x11, 0x6e, 0x75, 0x6d, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x72,
|
||||
0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x6e, 0x75, 0x6d, 0x43, 0x61,
|
||||
0x6c, 0x6c, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x75,
|
||||
0x6d, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64,
|
||||
0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x43, 0x61, 0x6c, 0x6c, 0x73,
|
||||
0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x12, 0x5d, 0x0a, 0x2d, 0x6e, 0x75, 0x6d, 0x5f,
|
||||
0x63, 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x77,
|
||||
0x69, 0x74, 0x68, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x65,
|
||||
0x64, 0x5f, 0x74, 0x6f, 0x5f, 0x73, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52,
|
||||
0x26, 0x6e, 0x75, 0x6d, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65,
|
||||
0x64, 0x57, 0x69, 0x74, 0x68, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65,
|
||||
0x64, 0x54, 0x6f, 0x53, 0x65, 0x6e, 0x64, 0x12, 0x48, 0x0a, 0x21, 0x6e, 0x75, 0x6d, 0x5f, 0x63,
|
||||
0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x6b, 0x6e,
|
||||
0x6f, 0x77, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x18, 0x07, 0x20, 0x01,
|
||||
0x28, 0x03, 0x52, 0x1d, 0x6e, 0x75, 0x6d, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x46, 0x69, 0x6e, 0x69,
|
||||
0x73, 0x68, 0x65, 0x64, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65,
|
||||
0x64, 0x12, 0x58, 0x0a, 0x18, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73,
|
||||
0x68, 0x65, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x18, 0x08, 0x20,
|
||||
0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x65, 0x72, 0x54,
|
||||
0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x15, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x46, 0x69, 0x6e, 0x69, 0x73,
|
||||
0x68, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x44, 0x72, 0x6f, 0x70, 0x4a, 0x04, 0x08, 0x04, 0x10,
|
||||
0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x90, 0x02, 0x0a, 0x13, 0x4c, 0x6f, 0x61, 0x64,
|
||||
0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
|
||||
0x53, 0x0a, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x72, 0x70, 0x63,
|
||||
0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4c, 0x6f,
|
||||
0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x48, 0x00, 0x52, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6c,
|
||||
0x69, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x72, 0x70, 0x63,
|
||||
0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x69, 0x73,
|
||||
0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12,
|
||||
0x4b, 0x0a, 0x11, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x72, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x72, 0x70,
|
||||
0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x10, 0x66, 0x61, 0x6c, 0x6c,
|
||||
0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x1c, 0x0a, 0x1a,
|
||||
0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0x12, 0x0a, 0x10, 0x46, 0x61,
|
||||
0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7e,
|
||||
0x0a, 0x1a, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c,
|
||||
0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x1c,
|
||||
0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x72, 0x65, 0x70,
|
||||
0x6f, 0x72, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x19, 0x63,
|
||||
0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74,
|
||||
0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x40,
|
||||
0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x07,
|
||||
0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e,
|
||||
0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65,
|
||||
0x72, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04,
|
||||
0x22, 0x83, 0x01, 0x0a, 0x06, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x69,
|
||||
0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
|
||||
0x09, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f,
|
||||
0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x2c,
|
||||
0x0a, 0x12, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x74,
|
||||
0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x6f, 0x61, 0x64,
|
||||
0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x12, 0x0a, 0x04,
|
||||
0x64, 0x72, 0x6f, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x72, 0x6f, 0x70,
|
||||
0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x32, 0x62, 0x0a, 0x0c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61,
|
||||
0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0b, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63,
|
||||
0x65, 0x4c, 0x6f, 0x61, 0x64, 0x12, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x57, 0x0a, 0x0d, 0x69, 0x6f,
|
||||
0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x62, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x4c, 0x6f, 0x61,
|
||||
0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
|
||||
0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e,
|
||||
0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65,
|
||||
0x72, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x62,
|
||||
0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_grpc_lb_v1_load_balancer_proto_rawDescOnce sync.Once
|
||||
file_grpc_lb_v1_load_balancer_proto_rawDescData = file_grpc_lb_v1_load_balancer_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_grpc_lb_v1_load_balancer_proto_rawDescGZIP() []byte {
|
||||
file_grpc_lb_v1_load_balancer_proto_rawDescOnce.Do(func() {
|
||||
file_grpc_lb_v1_load_balancer_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_lb_v1_load_balancer_proto_rawDescData)
|
||||
})
|
||||
return file_grpc_lb_v1_load_balancer_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_grpc_lb_v1_load_balancer_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
|
||||
var file_grpc_lb_v1_load_balancer_proto_goTypes = []interface{}{
|
||||
(*LoadBalanceRequest)(nil), // 0: grpc.lb.v1.LoadBalanceRequest
|
||||
(*InitialLoadBalanceRequest)(nil), // 1: grpc.lb.v1.InitialLoadBalanceRequest
|
||||
(*ClientStatsPerToken)(nil), // 2: grpc.lb.v1.ClientStatsPerToken
|
||||
(*ClientStats)(nil), // 3: grpc.lb.v1.ClientStats
|
||||
(*LoadBalanceResponse)(nil), // 4: grpc.lb.v1.LoadBalanceResponse
|
||||
(*FallbackResponse)(nil), // 5: grpc.lb.v1.FallbackResponse
|
||||
(*InitialLoadBalanceResponse)(nil), // 6: grpc.lb.v1.InitialLoadBalanceResponse
|
||||
(*ServerList)(nil), // 7: grpc.lb.v1.ServerList
|
||||
(*Server)(nil), // 8: grpc.lb.v1.Server
|
||||
(*timestamppb.Timestamp)(nil), // 9: google.protobuf.Timestamp
|
||||
(*durationpb.Duration)(nil), // 10: google.protobuf.Duration
|
||||
}
|
||||
var file_grpc_lb_v1_load_balancer_proto_depIdxs = []int32{
|
||||
1, // 0: grpc.lb.v1.LoadBalanceRequest.initial_request:type_name -> grpc.lb.v1.InitialLoadBalanceRequest
|
||||
3, // 1: grpc.lb.v1.LoadBalanceRequest.client_stats:type_name -> grpc.lb.v1.ClientStats
|
||||
9, // 2: grpc.lb.v1.ClientStats.timestamp:type_name -> google.protobuf.Timestamp
|
||||
2, // 3: grpc.lb.v1.ClientStats.calls_finished_with_drop:type_name -> grpc.lb.v1.ClientStatsPerToken
|
||||
6, // 4: grpc.lb.v1.LoadBalanceResponse.initial_response:type_name -> grpc.lb.v1.InitialLoadBalanceResponse
|
||||
7, // 5: grpc.lb.v1.LoadBalanceResponse.server_list:type_name -> grpc.lb.v1.ServerList
|
||||
5, // 6: grpc.lb.v1.LoadBalanceResponse.fallback_response:type_name -> grpc.lb.v1.FallbackResponse
|
||||
10, // 7: grpc.lb.v1.InitialLoadBalanceResponse.client_stats_report_interval:type_name -> google.protobuf.Duration
|
||||
8, // 8: grpc.lb.v1.ServerList.servers:type_name -> grpc.lb.v1.Server
|
||||
0, // 9: grpc.lb.v1.LoadBalancer.BalanceLoad:input_type -> grpc.lb.v1.LoadBalanceRequest
|
||||
4, // 10: grpc.lb.v1.LoadBalancer.BalanceLoad:output_type -> grpc.lb.v1.LoadBalanceResponse
|
||||
10, // [10:11] is the sub-list for method output_type
|
||||
9, // [9:10] is the sub-list for method input_type
|
||||
9, // [9:9] is the sub-list for extension type_name
|
||||
9, // [9:9] is the sub-list for extension extendee
|
||||
0, // [0:9] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_grpc_lb_v1_load_balancer_proto_init() }
|
||||
func file_grpc_lb_v1_load_balancer_proto_init() {
|
||||
if File_grpc_lb_v1_load_balancer_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_grpc_lb_v1_load_balancer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*LoadBalanceRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_grpc_lb_v1_load_balancer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*InitialLoadBalanceRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_grpc_lb_v1_load_balancer_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ClientStatsPerToken); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_grpc_lb_v1_load_balancer_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ClientStats); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_grpc_lb_v1_load_balancer_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*LoadBalanceResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_grpc_lb_v1_load_balancer_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*FallbackResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_grpc_lb_v1_load_balancer_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*InitialLoadBalanceResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_grpc_lb_v1_load_balancer_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ServerList); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_grpc_lb_v1_load_balancer_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Server); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
file_grpc_lb_v1_load_balancer_proto_msgTypes[0].OneofWrappers = []interface{}{
|
||||
(*LoadBalanceRequest_InitialRequest)(nil),
|
||||
(*LoadBalanceRequest_ClientStats)(nil),
|
||||
}
|
||||
file_grpc_lb_v1_load_balancer_proto_msgTypes[4].OneofWrappers = []interface{}{
|
||||
(*LoadBalanceResponse_InitialResponse)(nil),
|
||||
(*LoadBalanceResponse_ServerList)(nil),
|
||||
(*LoadBalanceResponse_FallbackResponse)(nil),
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_grpc_lb_v1_load_balancer_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 9,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
GoTypes: file_grpc_lb_v1_load_balancer_proto_goTypes,
|
||||
DependencyIndexes: file_grpc_lb_v1_load_balancer_proto_depIdxs,
|
||||
MessageInfos: file_grpc_lb_v1_load_balancer_proto_msgTypes,
|
||||
}.Build()
|
||||
File_grpc_lb_v1_load_balancer_proto = out.File
|
||||
file_grpc_lb_v1_load_balancer_proto_rawDesc = nil
|
||||
file_grpc_lb_v1_load_balancer_proto_goTypes = nil
|
||||
file_grpc_lb_v1_load_balancer_proto_depIdxs = nil
|
||||
}
|
137
vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go
generated
vendored
Normal file
137
vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,137 @@
|
|||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.1.0
|
||||
// - protoc v3.14.0
|
||||
// source: grpc/lb/v1/load_balancer.proto
|
||||
|
||||
package grpc_lb_v1
|
||||
|
||||
import (
|
||||
context "context"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
// LoadBalancerClient is the client API for LoadBalancer service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
type LoadBalancerClient interface {
|
||||
// Bidirectional rpc to get a list of servers.
|
||||
BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error)
|
||||
}
|
||||
|
||||
type loadBalancerClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewLoadBalancerClient(cc grpc.ClientConnInterface) LoadBalancerClient {
|
||||
return &loadBalancerClient{cc}
|
||||
}
|
||||
|
||||
func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &LoadBalancer_ServiceDesc.Streams[0], "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &loadBalancerBalanceLoadClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type LoadBalancer_BalanceLoadClient interface {
|
||||
Send(*LoadBalanceRequest) error
|
||||
Recv() (*LoadBalanceResponse, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type loadBalancerBalanceLoadClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *loadBalancerBalanceLoadClient) Send(m *LoadBalanceRequest) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *loadBalancerBalanceLoadClient) Recv() (*LoadBalanceResponse, error) {
|
||||
m := new(LoadBalanceResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// LoadBalancerServer is the server API for LoadBalancer service.
|
||||
// All implementations should embed UnimplementedLoadBalancerServer
|
||||
// for forward compatibility
|
||||
type LoadBalancerServer interface {
|
||||
// Bidirectional rpc to get a list of servers.
|
||||
BalanceLoad(LoadBalancer_BalanceLoadServer) error
|
||||
}
|
||||
|
||||
// UnimplementedLoadBalancerServer should be embedded to have forward compatible implementations.
|
||||
type UnimplementedLoadBalancerServer struct {
|
||||
}
|
||||
|
||||
func (UnimplementedLoadBalancerServer) BalanceLoad(LoadBalancer_BalanceLoadServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method BalanceLoad not implemented")
|
||||
}
|
||||
|
||||
// UnsafeLoadBalancerServer may be embedded to opt out of forward compatibility for this service.
|
||||
// Use of this interface is not recommended, as added methods to LoadBalancerServer will
|
||||
// result in compilation errors.
|
||||
type UnsafeLoadBalancerServer interface {
|
||||
mustEmbedUnimplementedLoadBalancerServer()
|
||||
}
|
||||
|
||||
func RegisterLoadBalancerServer(s grpc.ServiceRegistrar, srv LoadBalancerServer) {
|
||||
s.RegisterService(&LoadBalancer_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
func _LoadBalancer_BalanceLoad_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
return srv.(LoadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{stream})
|
||||
}
|
||||
|
||||
type LoadBalancer_BalanceLoadServer interface {
|
||||
Send(*LoadBalanceResponse) error
|
||||
Recv() (*LoadBalanceRequest, error)
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type loadBalancerBalanceLoadServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *loadBalancerBalanceLoadServer) Send(m *LoadBalanceResponse) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *loadBalancerBalanceLoadServer) Recv() (*LoadBalanceRequest, error) {
|
||||
m := new(LoadBalanceRequest)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// LoadBalancer_ServiceDesc is the grpc.ServiceDesc for LoadBalancer service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
var LoadBalancer_ServiceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "grpc.lb.v1.LoadBalancer",
|
||||
HandlerType: (*LoadBalancerServer)(nil),
|
||||
Methods: []grpc.MethodDesc{},
|
||||
Streams: []grpc.StreamDesc{
|
||||
{
|
||||
StreamName: "BalanceLoad",
|
||||
Handler: _LoadBalancer_BalanceLoad_Handler,
|
||||
ServerStreams: true,
|
||||
ClientStreams: true,
|
||||
},
|
||||
},
|
||||
Metadata: "grpc/lb/v1/load_balancer.proto",
|
||||
}
|
490
vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go
generated
vendored
Normal file
490
vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go
generated
vendored
Normal file
|
@ -0,0 +1,490 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2016 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package grpclb defines a grpclb balancer.
|
||||
//
|
||||
// To install grpclb balancer, import this package as:
|
||||
// import _ "google.golang.org/grpc/balancer/grpclb"
|
||||
package grpclb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/balancer"
|
||||
grpclbstate "google.golang.org/grpc/balancer/grpclb/state"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/backoff"
|
||||
"google.golang.org/grpc/internal/resolver/dns"
|
||||
"google.golang.org/grpc/resolver"
|
||||
|
||||
durationpb "github.com/golang/protobuf/ptypes/duration"
|
||||
lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1"
|
||||
)
|
||||
|
||||
const (
|
||||
lbTokenKey = "lb-token"
|
||||
defaultFallbackTimeout = 10 * time.Second
|
||||
grpclbName = "grpclb"
|
||||
)
|
||||
|
||||
var errServerTerminatedConnection = errors.New("grpclb: failed to recv server list: server terminated connection")
|
||||
var logger = grpclog.Component("grpclb")
|
||||
|
||||
func convertDuration(d *durationpb.Duration) time.Duration {
|
||||
if d == nil {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond
|
||||
}
|
||||
|
||||
// Client API for LoadBalancer service.
|
||||
// Mostly copied from generated pb.go file.
|
||||
// To avoid circular dependency.
|
||||
type loadBalancerClient struct {
|
||||
cc *grpc.ClientConn
|
||||
}
|
||||
|
||||
func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (*balanceLoadClientStream, error) {
|
||||
desc := &grpc.StreamDesc{
|
||||
StreamName: "BalanceLoad",
|
||||
ServerStreams: true,
|
||||
ClientStreams: true,
|
||||
}
|
||||
stream, err := c.cc.NewStream(ctx, desc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &balanceLoadClientStream{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type balanceLoadClientStream struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) {
|
||||
m := new(lbpb.LoadBalanceResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
balancer.Register(newLBBuilder())
|
||||
dns.EnableSRVLookups = true
|
||||
}
|
||||
|
||||
// newLBBuilder creates a builder for grpclb.
|
||||
func newLBBuilder() balancer.Builder {
|
||||
return newLBBuilderWithFallbackTimeout(defaultFallbackTimeout)
|
||||
}
|
||||
|
||||
// newLBBuilderWithFallbackTimeout creates a grpclb builder with the given
|
||||
// fallbackTimeout. If no response is received from the remote balancer within
|
||||
// fallbackTimeout, the backend addresses from the resolved address list will be
|
||||
// used.
|
||||
//
|
||||
// Only call this function when a non-default fallback timeout is needed.
|
||||
func newLBBuilderWithFallbackTimeout(fallbackTimeout time.Duration) balancer.Builder {
|
||||
return &lbBuilder{
|
||||
fallbackTimeout: fallbackTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
type lbBuilder struct {
|
||||
fallbackTimeout time.Duration
|
||||
}
|
||||
|
||||
func (b *lbBuilder) Name() string {
|
||||
return grpclbName
|
||||
}
|
||||
|
||||
func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
|
||||
// This generates a manual resolver builder with a fixed scheme. This
|
||||
// scheme will be used to dial to remote LB, so we can send filtered
|
||||
// address updates to remote LB ClientConn using this manual resolver.
|
||||
r := &lbManualResolver{scheme: "grpclb-internal", ccb: cc}
|
||||
|
||||
lb := &lbBalancer{
|
||||
cc: newLBCacheClientConn(cc),
|
||||
target: opt.Target.Endpoint,
|
||||
opt: opt,
|
||||
fallbackTimeout: b.fallbackTimeout,
|
||||
doneCh: make(chan struct{}),
|
||||
|
||||
manualResolver: r,
|
||||
subConns: make(map[resolver.Address]balancer.SubConn),
|
||||
scStates: make(map[balancer.SubConn]connectivity.State),
|
||||
picker: &errPicker{err: balancer.ErrNoSubConnAvailable},
|
||||
clientStats: newRPCStats(),
|
||||
backoff: backoff.DefaultExponential, // TODO: make backoff configurable.
|
||||
}
|
||||
|
||||
var err error
|
||||
if opt.CredsBundle != nil {
|
||||
lb.grpclbClientConnCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBalancer)
|
||||
if err != nil {
|
||||
logger.Warningf("lbBalancer: client connection creds NewWithMode failed: %v", err)
|
||||
}
|
||||
lb.grpclbBackendCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBackendFromBalancer)
|
||||
if err != nil {
|
||||
logger.Warningf("lbBalancer: backend creds NewWithMode failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return lb
|
||||
}
|
||||
|
||||
type lbBalancer struct {
|
||||
cc *lbCacheClientConn
|
||||
target string
|
||||
opt balancer.BuildOptions
|
||||
|
||||
usePickFirst bool
|
||||
|
||||
// grpclbClientConnCreds is the creds bundle to be used to connect to grpclb
|
||||
// servers. If it's nil, use the TransportCredentials from BuildOptions
|
||||
// instead.
|
||||
grpclbClientConnCreds credentials.Bundle
|
||||
// grpclbBackendCreds is the creds bundle to be used for addresses that are
|
||||
// returned by grpclb server. If it's nil, don't set anything when creating
|
||||
// SubConns.
|
||||
grpclbBackendCreds credentials.Bundle
|
||||
|
||||
fallbackTimeout time.Duration
|
||||
doneCh chan struct{}
|
||||
|
||||
// manualResolver is used in the remote LB ClientConn inside grpclb. When
|
||||
// resolved address updates are received by grpclb, filtered updates will be
|
||||
// send to remote LB ClientConn through this resolver.
|
||||
manualResolver *lbManualResolver
|
||||
// The ClientConn to talk to the remote balancer.
|
||||
ccRemoteLB *remoteBalancerCCWrapper
|
||||
// backoff for calling remote balancer.
|
||||
backoff backoff.Strategy
|
||||
|
||||
// Support client side load reporting. Each picker gets a reference to this,
|
||||
// and will update its content.
|
||||
clientStats *rpcStats
|
||||
|
||||
mu sync.Mutex // guards everything following.
|
||||
// The full server list including drops, used to check if the newly received
|
||||
// serverList contains anything new. Each generate picker will also have
|
||||
// reference to this list to do the first layer pick.
|
||||
fullServerList []*lbpb.Server
|
||||
// Backend addresses. It's kept so the addresses are available when
|
||||
// switching between round_robin and pickfirst.
|
||||
backendAddrs []resolver.Address
|
||||
// All backends addresses, with metadata set to nil. This list contains all
|
||||
// backend addresses in the same order and with the same duplicates as in
|
||||
// serverlist. When generating picker, a SubConn slice with the same order
|
||||
// but with only READY SCs will be gerenated.
|
||||
backendAddrsWithoutMetadata []resolver.Address
|
||||
// Roundrobin functionalities.
|
||||
state connectivity.State
|
||||
subConns map[resolver.Address]balancer.SubConn // Used to new/remove SubConn.
|
||||
scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns.
|
||||
picker balancer.Picker
|
||||
// Support fallback to resolved backend addresses if there's no response
|
||||
// from remote balancer within fallbackTimeout.
|
||||
remoteBalancerConnected bool
|
||||
serverListReceived bool
|
||||
inFallback bool
|
||||
// resolvedBackendAddrs is resolvedAddrs minus remote balancers. It's set
|
||||
// when resolved address updates are received, and read in the goroutine
|
||||
// handling fallback.
|
||||
resolvedBackendAddrs []resolver.Address
|
||||
connErr error // the last connection error
|
||||
}
|
||||
|
||||
// regeneratePicker takes a snapshot of the balancer, and generates a picker from
|
||||
// it. The picker
|
||||
// - always returns ErrTransientFailure if the balancer is in TransientFailure,
|
||||
// - does two layer roundrobin pick otherwise.
|
||||
// Caller must hold lb.mu.
|
||||
func (lb *lbBalancer) regeneratePicker(resetDrop bool) {
|
||||
if lb.state == connectivity.TransientFailure {
|
||||
lb.picker = &errPicker{err: fmt.Errorf("all SubConns are in TransientFailure, last connection error: %v", lb.connErr)}
|
||||
return
|
||||
}
|
||||
|
||||
if lb.state == connectivity.Connecting {
|
||||
lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable}
|
||||
return
|
||||
}
|
||||
|
||||
var readySCs []balancer.SubConn
|
||||
if lb.usePickFirst {
|
||||
for _, sc := range lb.subConns {
|
||||
readySCs = append(readySCs, sc)
|
||||
break
|
||||
}
|
||||
} else {
|
||||
for _, a := range lb.backendAddrsWithoutMetadata {
|
||||
if sc, ok := lb.subConns[a]; ok {
|
||||
if st, ok := lb.scStates[sc]; ok && st == connectivity.Ready {
|
||||
readySCs = append(readySCs, sc)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(readySCs) <= 0 {
|
||||
// If there's no ready SubConns, always re-pick. This is to avoid drops
|
||||
// unless at least one SubConn is ready. Otherwise we may drop more
|
||||
// often than want because of drops + re-picks(which become re-drops).
|
||||
//
|
||||
// This doesn't seem to be necessary after the connecting check above.
|
||||
// Kept for safety.
|
||||
lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable}
|
||||
return
|
||||
}
|
||||
if lb.inFallback {
|
||||
lb.picker = newRRPicker(readySCs)
|
||||
return
|
||||
}
|
||||
if resetDrop {
|
||||
lb.picker = newLBPicker(lb.fullServerList, readySCs, lb.clientStats)
|
||||
return
|
||||
}
|
||||
prevLBPicker, ok := lb.picker.(*lbPicker)
|
||||
if !ok {
|
||||
lb.picker = newLBPicker(lb.fullServerList, readySCs, lb.clientStats)
|
||||
return
|
||||
}
|
||||
prevLBPicker.updateReadySCs(readySCs)
|
||||
}
|
||||
|
||||
// aggregateSubConnStats calculate the aggregated state of SubConns in
|
||||
// lb.SubConns. These SubConns are subconns in use (when switching between
|
||||
// fallback and grpclb). lb.scState contains states for all SubConns, including
|
||||
// those in cache (SubConns are cached for 10 seconds after remove).
|
||||
//
|
||||
// The aggregated state is:
|
||||
// - If at least one SubConn in Ready, the aggregated state is Ready;
|
||||
// - Else if at least one SubConn in Connecting or IDLE, the aggregated state is Connecting;
|
||||
// - It's OK to consider IDLE as Connecting. SubConns never stay in IDLE,
|
||||
// they start to connect immediately. But there's a race between the overall
|
||||
// state is reported, and when the new SubConn state arrives. And SubConns
|
||||
// never go back to IDLE.
|
||||
// - Else the aggregated state is TransientFailure.
|
||||
func (lb *lbBalancer) aggregateSubConnStates() connectivity.State {
|
||||
var numConnecting uint64
|
||||
|
||||
for _, sc := range lb.subConns {
|
||||
if state, ok := lb.scStates[sc]; ok {
|
||||
switch state {
|
||||
case connectivity.Ready:
|
||||
return connectivity.Ready
|
||||
case connectivity.Connecting, connectivity.Idle:
|
||||
numConnecting++
|
||||
}
|
||||
}
|
||||
}
|
||||
if numConnecting > 0 {
|
||||
return connectivity.Connecting
|
||||
}
|
||||
return connectivity.TransientFailure
|
||||
}
|
||||
|
||||
func (lb *lbBalancer) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) {
|
||||
s := scs.ConnectivityState
|
||||
if logger.V(2) {
|
||||
logger.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s)
|
||||
}
|
||||
lb.mu.Lock()
|
||||
defer lb.mu.Unlock()
|
||||
|
||||
oldS, ok := lb.scStates[sc]
|
||||
if !ok {
|
||||
if logger.V(2) {
|
||||
logger.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
|
||||
}
|
||||
return
|
||||
}
|
||||
lb.scStates[sc] = s
|
||||
switch s {
|
||||
case connectivity.Idle:
|
||||
sc.Connect()
|
||||
case connectivity.Shutdown:
|
||||
// When an address was removed by resolver, b called RemoveSubConn but
|
||||
// kept the sc's state in scStates. Remove state for this sc here.
|
||||
delete(lb.scStates, sc)
|
||||
case connectivity.TransientFailure:
|
||||
lb.connErr = scs.ConnectionError
|
||||
}
|
||||
// Force regenerate picker if
|
||||
// - this sc became ready from not-ready
|
||||
// - this sc became not-ready from ready
|
||||
lb.updateStateAndPicker((oldS == connectivity.Ready) != (s == connectivity.Ready), false)
|
||||
|
||||
// Enter fallback when the aggregated state is not Ready and the connection
|
||||
// to remote balancer is lost.
|
||||
if lb.state != connectivity.Ready {
|
||||
if !lb.inFallback && !lb.remoteBalancerConnected {
|
||||
// Enter fallback.
|
||||
lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateStateAndPicker re-calculate the aggregated state, and regenerate picker
|
||||
// if overall state is changed.
|
||||
//
|
||||
// If forceRegeneratePicker is true, picker will be regenerated.
|
||||
func (lb *lbBalancer) updateStateAndPicker(forceRegeneratePicker bool, resetDrop bool) {
|
||||
oldAggrState := lb.state
|
||||
lb.state = lb.aggregateSubConnStates()
|
||||
// Regenerate picker when one of the following happens:
|
||||
// - caller wants to regenerate
|
||||
// - the aggregated state changed
|
||||
if forceRegeneratePicker || (lb.state != oldAggrState) {
|
||||
lb.regeneratePicker(resetDrop)
|
||||
}
|
||||
|
||||
lb.cc.UpdateState(balancer.State{ConnectivityState: lb.state, Picker: lb.picker})
|
||||
}
|
||||
|
||||
// fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use
|
||||
// resolved backends (backends received from resolver, not from remote balancer)
|
||||
// if no connection to remote balancers was successful.
|
||||
func (lb *lbBalancer) fallbackToBackendsAfter(fallbackTimeout time.Duration) {
|
||||
timer := time.NewTimer(fallbackTimeout)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-timer.C:
|
||||
case <-lb.doneCh:
|
||||
return
|
||||
}
|
||||
lb.mu.Lock()
|
||||
if lb.inFallback || lb.serverListReceived {
|
||||
lb.mu.Unlock()
|
||||
return
|
||||
}
|
||||
// Enter fallback.
|
||||
lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst)
|
||||
lb.mu.Unlock()
|
||||
}
|
||||
|
||||
func (lb *lbBalancer) handleServiceConfig(gc *grpclbServiceConfig) {
|
||||
lb.mu.Lock()
|
||||
defer lb.mu.Unlock()
|
||||
|
||||
newUsePickFirst := childIsPickFirst(gc)
|
||||
if lb.usePickFirst == newUsePickFirst {
|
||||
return
|
||||
}
|
||||
if logger.V(2) {
|
||||
logger.Infof("lbBalancer: switching mode, new usePickFirst: %+v", newUsePickFirst)
|
||||
}
|
||||
lb.refreshSubConns(lb.backendAddrs, lb.inFallback, newUsePickFirst)
|
||||
}
|
||||
|
||||
func (lb *lbBalancer) ResolverError(error) {
|
||||
// Ignore resolver errors. GRPCLB is not selected unless the resolver
|
||||
// works at least once.
|
||||
}
|
||||
|
||||
func (lb *lbBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error {
|
||||
if logger.V(2) {
|
||||
logger.Infof("lbBalancer: UpdateClientConnState: %+v", ccs)
|
||||
}
|
||||
gc, _ := ccs.BalancerConfig.(*grpclbServiceConfig)
|
||||
lb.handleServiceConfig(gc)
|
||||
|
||||
addrs := ccs.ResolverState.Addresses
|
||||
|
||||
var remoteBalancerAddrs, backendAddrs []resolver.Address
|
||||
for _, a := range addrs {
|
||||
if a.Type == resolver.GRPCLB {
|
||||
a.Type = resolver.Backend
|
||||
remoteBalancerAddrs = append(remoteBalancerAddrs, a)
|
||||
} else {
|
||||
backendAddrs = append(backendAddrs, a)
|
||||
}
|
||||
}
|
||||
if sd := grpclbstate.Get(ccs.ResolverState); sd != nil {
|
||||
// Override any balancer addresses provided via
|
||||
// ccs.ResolverState.Addresses.
|
||||
remoteBalancerAddrs = sd.BalancerAddresses
|
||||
}
|
||||
|
||||
if len(backendAddrs)+len(remoteBalancerAddrs) == 0 {
|
||||
// There should be at least one address, either grpclb server or
|
||||
// fallback. Empty address is not valid.
|
||||
return balancer.ErrBadResolverState
|
||||
}
|
||||
|
||||
if len(remoteBalancerAddrs) == 0 {
|
||||
if lb.ccRemoteLB != nil {
|
||||
lb.ccRemoteLB.close()
|
||||
lb.ccRemoteLB = nil
|
||||
}
|
||||
} else if lb.ccRemoteLB == nil {
|
||||
// First time receiving resolved addresses, create a cc to remote
|
||||
// balancers.
|
||||
lb.newRemoteBalancerCCWrapper()
|
||||
// Start the fallback goroutine.
|
||||
go lb.fallbackToBackendsAfter(lb.fallbackTimeout)
|
||||
}
|
||||
|
||||
if lb.ccRemoteLB != nil {
|
||||
// cc to remote balancers uses lb.manualResolver. Send the updated remote
|
||||
// balancer addresses to it through manualResolver.
|
||||
lb.manualResolver.UpdateState(resolver.State{Addresses: remoteBalancerAddrs})
|
||||
}
|
||||
|
||||
lb.mu.Lock()
|
||||
lb.resolvedBackendAddrs = backendAddrs
|
||||
if len(remoteBalancerAddrs) == 0 || lb.inFallback {
|
||||
// If there's no remote balancer address in ClientConn update, grpclb
|
||||
// enters fallback mode immediately.
|
||||
//
|
||||
// If a new update is received while grpclb is in fallback, update the
|
||||
// list of backends being used to the new fallback backends.
|
||||
lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst)
|
||||
}
|
||||
lb.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lb *lbBalancer) Close() {
|
||||
select {
|
||||
case <-lb.doneCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
close(lb.doneCh)
|
||||
if lb.ccRemoteLB != nil {
|
||||
lb.ccRemoteLB.close()
|
||||
}
|
||||
lb.cc.close()
|
||||
}
|
66
vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go
generated
vendored
Normal file
66
vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go
generated
vendored
Normal file
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2019 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpclb
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/balancer/roundrobin"
|
||||
"google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
const (
|
||||
roundRobinName = roundrobin.Name
|
||||
pickFirstName = grpc.PickFirstBalancerName
|
||||
)
|
||||
|
||||
type grpclbServiceConfig struct {
|
||||
serviceconfig.LoadBalancingConfig
|
||||
ChildPolicy *[]map[string]json.RawMessage
|
||||
}
|
||||
|
||||
func (b *lbBuilder) ParseConfig(lbConfig json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
|
||||
ret := &grpclbServiceConfig{}
|
||||
if err := json.Unmarshal(lbConfig, ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func childIsPickFirst(sc *grpclbServiceConfig) bool {
|
||||
if sc == nil {
|
||||
return false
|
||||
}
|
||||
childConfigs := sc.ChildPolicy
|
||||
if childConfigs == nil {
|
||||
return false
|
||||
}
|
||||
for _, childC := range *childConfigs {
|
||||
// If round_robin exists before pick_first, return false
|
||||
if _, ok := childC[roundRobinName]; ok {
|
||||
return false
|
||||
}
|
||||
// If pick_first is before round_robin, return true
|
||||
if _, ok := childC[pickFirstName]; ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
202
vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go
generated
vendored
Normal file
202
vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpclb
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// rpcStats is same as lbpb.ClientStats, except that numCallsDropped is a map
|
||||
// instead of a slice.
|
||||
type rpcStats struct {
|
||||
// Only access the following fields atomically.
|
||||
numCallsStarted int64
|
||||
numCallsFinished int64
|
||||
numCallsFinishedWithClientFailedToSend int64
|
||||
numCallsFinishedKnownReceived int64
|
||||
|
||||
mu sync.Mutex
|
||||
// map load_balance_token -> num_calls_dropped
|
||||
numCallsDropped map[string]int64
|
||||
}
|
||||
|
||||
func newRPCStats() *rpcStats {
|
||||
return &rpcStats{
|
||||
numCallsDropped: make(map[string]int64),
|
||||
}
|
||||
}
|
||||
|
||||
func isZeroStats(stats *lbpb.ClientStats) bool {
|
||||
return len(stats.CallsFinishedWithDrop) == 0 &&
|
||||
stats.NumCallsStarted == 0 &&
|
||||
stats.NumCallsFinished == 0 &&
|
||||
stats.NumCallsFinishedWithClientFailedToSend == 0 &&
|
||||
stats.NumCallsFinishedKnownReceived == 0
|
||||
}
|
||||
|
||||
// toClientStats converts rpcStats to lbpb.ClientStats, and clears rpcStats.
|
||||
func (s *rpcStats) toClientStats() *lbpb.ClientStats {
|
||||
stats := &lbpb.ClientStats{
|
||||
NumCallsStarted: atomic.SwapInt64(&s.numCallsStarted, 0),
|
||||
NumCallsFinished: atomic.SwapInt64(&s.numCallsFinished, 0),
|
||||
NumCallsFinishedWithClientFailedToSend: atomic.SwapInt64(&s.numCallsFinishedWithClientFailedToSend, 0),
|
||||
NumCallsFinishedKnownReceived: atomic.SwapInt64(&s.numCallsFinishedKnownReceived, 0),
|
||||
}
|
||||
s.mu.Lock()
|
||||
dropped := s.numCallsDropped
|
||||
s.numCallsDropped = make(map[string]int64)
|
||||
s.mu.Unlock()
|
||||
for token, count := range dropped {
|
||||
stats.CallsFinishedWithDrop = append(stats.CallsFinishedWithDrop, &lbpb.ClientStatsPerToken{
|
||||
LoadBalanceToken: token,
|
||||
NumCalls: count,
|
||||
})
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
func (s *rpcStats) drop(token string) {
|
||||
atomic.AddInt64(&s.numCallsStarted, 1)
|
||||
s.mu.Lock()
|
||||
s.numCallsDropped[token]++
|
||||
s.mu.Unlock()
|
||||
atomic.AddInt64(&s.numCallsFinished, 1)
|
||||
}
|
||||
|
||||
func (s *rpcStats) failedToSend() {
|
||||
atomic.AddInt64(&s.numCallsStarted, 1)
|
||||
atomic.AddInt64(&s.numCallsFinishedWithClientFailedToSend, 1)
|
||||
atomic.AddInt64(&s.numCallsFinished, 1)
|
||||
}
|
||||
|
||||
func (s *rpcStats) knownReceived() {
|
||||
atomic.AddInt64(&s.numCallsStarted, 1)
|
||||
atomic.AddInt64(&s.numCallsFinishedKnownReceived, 1)
|
||||
atomic.AddInt64(&s.numCallsFinished, 1)
|
||||
}
|
||||
|
||||
type errPicker struct {
|
||||
// Pick always returns this err.
|
||||
err error
|
||||
}
|
||||
|
||||
func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
|
||||
return balancer.PickResult{}, p.err
|
||||
}
|
||||
|
||||
// rrPicker does roundrobin on subConns. It's typically used when there's no
|
||||
// response from remote balancer, and grpclb falls back to the resolved
|
||||
// backends.
|
||||
//
|
||||
// It guaranteed that len(subConns) > 0.
|
||||
type rrPicker struct {
|
||||
mu sync.Mutex
|
||||
subConns []balancer.SubConn // The subConns that were READY when taking the snapshot.
|
||||
subConnsNext int
|
||||
}
|
||||
|
||||
func newRRPicker(readySCs []balancer.SubConn) *rrPicker {
|
||||
return &rrPicker{
|
||||
subConns: readySCs,
|
||||
subConnsNext: grpcrand.Intn(len(readySCs)),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
sc := p.subConns[p.subConnsNext]
|
||||
p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns)
|
||||
return balancer.PickResult{SubConn: sc}, nil
|
||||
}
|
||||
|
||||
// lbPicker does two layers of picks:
|
||||
//
|
||||
// First layer: roundrobin on all servers in serverList, including drops and backends.
|
||||
// - If it picks a drop, the RPC will fail as being dropped.
|
||||
// - If it picks a backend, do a second layer pick to pick the real backend.
|
||||
//
|
||||
// Second layer: roundrobin on all READY backends.
|
||||
//
|
||||
// It's guaranteed that len(serverList) > 0.
|
||||
type lbPicker struct {
|
||||
mu sync.Mutex
|
||||
serverList []*lbpb.Server
|
||||
serverListNext int
|
||||
subConns []balancer.SubConn // The subConns that were READY when taking the snapshot.
|
||||
subConnsNext int
|
||||
|
||||
stats *rpcStats
|
||||
}
|
||||
|
||||
func newLBPicker(serverList []*lbpb.Server, readySCs []balancer.SubConn, stats *rpcStats) *lbPicker {
|
||||
return &lbPicker{
|
||||
serverList: serverList,
|
||||
subConns: readySCs,
|
||||
subConnsNext: grpcrand.Intn(len(readySCs)),
|
||||
stats: stats,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *lbPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
// Layer one roundrobin on serverList.
|
||||
s := p.serverList[p.serverListNext]
|
||||
p.serverListNext = (p.serverListNext + 1) % len(p.serverList)
|
||||
|
||||
// If it's a drop, return an error and fail the RPC.
|
||||
if s.Drop {
|
||||
p.stats.drop(s.LoadBalanceToken)
|
||||
return balancer.PickResult{}, status.Errorf(codes.Unavailable, "request dropped by grpclb")
|
||||
}
|
||||
|
||||
// If not a drop but there's no ready subConns.
|
||||
if len(p.subConns) <= 0 {
|
||||
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
|
||||
}
|
||||
|
||||
// Return the next ready subConn in the list, also collect rpc stats.
|
||||
sc := p.subConns[p.subConnsNext]
|
||||
p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns)
|
||||
done := func(info balancer.DoneInfo) {
|
||||
if !info.BytesSent {
|
||||
p.stats.failedToSend()
|
||||
} else if info.BytesReceived {
|
||||
p.stats.knownReceived()
|
||||
}
|
||||
}
|
||||
return balancer.PickResult{SubConn: sc, Done: done}, nil
|
||||
}
|
||||
|
||||
func (p *lbPicker) updateReadySCs(readySCs []balancer.SubConn) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
p.subConns = readySCs
|
||||
p.subConnsNext = p.subConnsNext % len(readySCs)
|
||||
}
|
410
vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go
generated
vendored
Normal file
410
vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go
generated
vendored
Normal file
|
@ -0,0 +1,410 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpclb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
timestamppb "github.com/golang/protobuf/ptypes/timestamp"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/balancer"
|
||||
lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/internal/backoff"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
imetadata "google.golang.org/grpc/internal/metadata"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// processServerList updates balancer's internal state, create/remove SubConns
|
||||
// and regenerates picker using the received serverList.
|
||||
func (lb *lbBalancer) processServerList(l *lbpb.ServerList) {
|
||||
if logger.V(2) {
|
||||
logger.Infof("lbBalancer: processing server list: %+v", l)
|
||||
}
|
||||
lb.mu.Lock()
|
||||
defer lb.mu.Unlock()
|
||||
|
||||
// Set serverListReceived to true so fallback will not take effect if it has
|
||||
// not hit timeout.
|
||||
lb.serverListReceived = true
|
||||
|
||||
// If the new server list == old server list, do nothing.
|
||||
if cmp.Equal(lb.fullServerList, l.Servers, cmp.Comparer(proto.Equal)) {
|
||||
if logger.V(2) {
|
||||
logger.Infof("lbBalancer: new serverlist same as the previous one, ignoring")
|
||||
}
|
||||
return
|
||||
}
|
||||
lb.fullServerList = l.Servers
|
||||
|
||||
var backendAddrs []resolver.Address
|
||||
for i, s := range l.Servers {
|
||||
if s.Drop {
|
||||
continue
|
||||
}
|
||||
|
||||
md := metadata.Pairs(lbTokenKey, s.LoadBalanceToken)
|
||||
ip := net.IP(s.IpAddress)
|
||||
ipStr := ip.String()
|
||||
if ip.To4() == nil {
|
||||
// Add square brackets to ipv6 addresses, otherwise net.Dial() and
|
||||
// net.SplitHostPort() will return too many colons error.
|
||||
ipStr = fmt.Sprintf("[%s]", ipStr)
|
||||
}
|
||||
addr := imetadata.Set(resolver.Address{Addr: fmt.Sprintf("%s:%d", ipStr, s.Port)}, md)
|
||||
if logger.V(2) {
|
||||
logger.Infof("lbBalancer: server list entry[%d]: ipStr:|%s|, port:|%d|, load balancer token:|%v|",
|
||||
i, ipStr, s.Port, s.LoadBalanceToken)
|
||||
}
|
||||
backendAddrs = append(backendAddrs, addr)
|
||||
}
|
||||
|
||||
// Call refreshSubConns to create/remove SubConns. If we are in fallback,
|
||||
// this is also exiting fallback.
|
||||
lb.refreshSubConns(backendAddrs, false, lb.usePickFirst)
|
||||
}
|
||||
|
||||
// refreshSubConns creates/removes SubConns with backendAddrs, and refreshes
|
||||
// balancer state and picker.
|
||||
//
|
||||
// Caller must hold lb.mu.
|
||||
func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback bool, pickFirst bool) {
|
||||
opts := balancer.NewSubConnOptions{}
|
||||
if !fallback {
|
||||
opts.CredsBundle = lb.grpclbBackendCreds
|
||||
}
|
||||
|
||||
lb.backendAddrs = backendAddrs
|
||||
lb.backendAddrsWithoutMetadata = nil
|
||||
|
||||
fallbackModeChanged := lb.inFallback != fallback
|
||||
lb.inFallback = fallback
|
||||
if fallbackModeChanged && lb.inFallback {
|
||||
// Clear previous received list when entering fallback, so if the server
|
||||
// comes back and sends the same list again, the new addresses will be
|
||||
// used.
|
||||
lb.fullServerList = nil
|
||||
}
|
||||
|
||||
balancingPolicyChanged := lb.usePickFirst != pickFirst
|
||||
oldUsePickFirst := lb.usePickFirst
|
||||
lb.usePickFirst = pickFirst
|
||||
|
||||
if fallbackModeChanged || balancingPolicyChanged {
|
||||
// Remove all SubConns when switching balancing policy or switching
|
||||
// fallback mode.
|
||||
//
|
||||
// For fallback mode switching with pickfirst, we want to recreate the
|
||||
// SubConn because the creds could be different.
|
||||
for a, sc := range lb.subConns {
|
||||
if oldUsePickFirst {
|
||||
// If old SubConn were created for pickfirst, bypass cache and
|
||||
// remove directly.
|
||||
lb.cc.cc.RemoveSubConn(sc)
|
||||
} else {
|
||||
lb.cc.RemoveSubConn(sc)
|
||||
}
|
||||
delete(lb.subConns, a)
|
||||
}
|
||||
}
|
||||
|
||||
if lb.usePickFirst {
|
||||
var sc balancer.SubConn
|
||||
for _, sc = range lb.subConns {
|
||||
break
|
||||
}
|
||||
if sc != nil {
|
||||
lb.cc.cc.UpdateAddresses(sc, backendAddrs)
|
||||
sc.Connect()
|
||||
return
|
||||
}
|
||||
// This bypasses the cc wrapper with SubConn cache.
|
||||
sc, err := lb.cc.cc.NewSubConn(backendAddrs, opts)
|
||||
if err != nil {
|
||||
logger.Warningf("grpclb: failed to create new SubConn: %v", err)
|
||||
return
|
||||
}
|
||||
sc.Connect()
|
||||
lb.subConns[backendAddrs[0]] = sc
|
||||
lb.scStates[sc] = connectivity.Idle
|
||||
return
|
||||
}
|
||||
|
||||
// addrsSet is the set converted from backendAddrsWithoutMetadata, it's used to quick
|
||||
// lookup for an address.
|
||||
addrsSet := make(map[resolver.Address]struct{})
|
||||
// Create new SubConns.
|
||||
for _, addr := range backendAddrs {
|
||||
addrWithoutAttrs := addr
|
||||
addrWithoutAttrs.Attributes = nil
|
||||
addrsSet[addrWithoutAttrs] = struct{}{}
|
||||
lb.backendAddrsWithoutMetadata = append(lb.backendAddrsWithoutMetadata, addrWithoutAttrs)
|
||||
|
||||
if _, ok := lb.subConns[addrWithoutAttrs]; !ok {
|
||||
// Use addrWithMD to create the SubConn.
|
||||
sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, opts)
|
||||
if err != nil {
|
||||
logger.Warningf("grpclb: failed to create new SubConn: %v", err)
|
||||
continue
|
||||
}
|
||||
lb.subConns[addrWithoutAttrs] = sc // Use the addr without MD as key for the map.
|
||||
if _, ok := lb.scStates[sc]; !ok {
|
||||
// Only set state of new sc to IDLE. The state could already be
|
||||
// READY for cached SubConns.
|
||||
lb.scStates[sc] = connectivity.Idle
|
||||
}
|
||||
sc.Connect()
|
||||
}
|
||||
}
|
||||
|
||||
for a, sc := range lb.subConns {
|
||||
// a was removed by resolver.
|
||||
if _, ok := addrsSet[a]; !ok {
|
||||
lb.cc.RemoveSubConn(sc)
|
||||
delete(lb.subConns, a)
|
||||
// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
|
||||
// The entry will be deleted in UpdateSubConnState.
|
||||
}
|
||||
}
|
||||
|
||||
// Regenerate and update picker after refreshing subconns because with
|
||||
// cache, even if SubConn was newed/removed, there might be no state
|
||||
// changes (the subconn will be kept in cache, not actually
|
||||
// newed/removed).
|
||||
lb.updateStateAndPicker(true, true)
|
||||
}
|
||||
|
||||
type remoteBalancerCCWrapper struct {
|
||||
cc *grpc.ClientConn
|
||||
lb *lbBalancer
|
||||
backoff backoff.Strategy
|
||||
done chan struct{}
|
||||
|
||||
// waitgroup to wait for all goroutines to exit.
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
func (lb *lbBalancer) newRemoteBalancerCCWrapper() {
|
||||
var dopts []grpc.DialOption
|
||||
if creds := lb.opt.DialCreds; creds != nil {
|
||||
dopts = append(dopts, grpc.WithTransportCredentials(creds))
|
||||
} else if bundle := lb.grpclbClientConnCreds; bundle != nil {
|
||||
dopts = append(dopts, grpc.WithCredentialsBundle(bundle))
|
||||
} else {
|
||||
dopts = append(dopts, grpc.WithInsecure())
|
||||
}
|
||||
if lb.opt.Dialer != nil {
|
||||
dopts = append(dopts, grpc.WithContextDialer(lb.opt.Dialer))
|
||||
}
|
||||
if lb.opt.CustomUserAgent != "" {
|
||||
dopts = append(dopts, grpc.WithUserAgent(lb.opt.CustomUserAgent))
|
||||
}
|
||||
// Explicitly set pickfirst as the balancer.
|
||||
dopts = append(dopts, grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"pick_first"}`))
|
||||
dopts = append(dopts, grpc.WithResolvers(lb.manualResolver))
|
||||
if channelz.IsOn() {
|
||||
dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParentID))
|
||||
}
|
||||
|
||||
// Enable Keepalive for grpclb client.
|
||||
dopts = append(dopts, grpc.WithKeepaliveParams(keepalive.ClientParameters{
|
||||
Time: 20 * time.Second,
|
||||
Timeout: 10 * time.Second,
|
||||
PermitWithoutStream: true,
|
||||
}))
|
||||
|
||||
// The dial target is not important.
|
||||
//
|
||||
// The grpclb server addresses will set field ServerName, and creds will
|
||||
// receive ServerName as authority.
|
||||
cc, err := grpc.DialContext(context.Background(), lb.manualResolver.Scheme()+":///grpclb.subClientConn", dopts...)
|
||||
if err != nil {
|
||||
logger.Fatalf("failed to dial: %v", err)
|
||||
}
|
||||
ccw := &remoteBalancerCCWrapper{
|
||||
cc: cc,
|
||||
lb: lb,
|
||||
backoff: lb.backoff,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
lb.ccRemoteLB = ccw
|
||||
ccw.wg.Add(1)
|
||||
go ccw.watchRemoteBalancer()
|
||||
}
|
||||
|
||||
// close closed the ClientConn to remote balancer, and waits until all
|
||||
// goroutines to finish.
|
||||
func (ccw *remoteBalancerCCWrapper) close() {
|
||||
close(ccw.done)
|
||||
ccw.cc.Close()
|
||||
ccw.wg.Wait()
|
||||
}
|
||||
|
||||
func (ccw *remoteBalancerCCWrapper) readServerList(s *balanceLoadClientStream) error {
|
||||
for {
|
||||
reply, err := s.Recv()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return errServerTerminatedConnection
|
||||
}
|
||||
return fmt.Errorf("grpclb: failed to recv server list: %v", err)
|
||||
}
|
||||
if serverList := reply.GetServerList(); serverList != nil {
|
||||
ccw.lb.processServerList(serverList)
|
||||
}
|
||||
if reply.GetFallbackResponse() != nil {
|
||||
// Eagerly enter fallback
|
||||
ccw.lb.mu.Lock()
|
||||
ccw.lb.refreshSubConns(ccw.lb.resolvedBackendAddrs, true, ccw.lb.usePickFirst)
|
||||
ccw.lb.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ccw *remoteBalancerCCWrapper) sendLoadReport(s *balanceLoadClientStream, interval time.Duration) {
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
lastZero := false
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
case <-s.Context().Done():
|
||||
return
|
||||
}
|
||||
stats := ccw.lb.clientStats.toClientStats()
|
||||
zero := isZeroStats(stats)
|
||||
if zero && lastZero {
|
||||
// Quash redundant empty load reports.
|
||||
continue
|
||||
}
|
||||
lastZero = zero
|
||||
t := time.Now()
|
||||
stats.Timestamp = ×tamppb.Timestamp{
|
||||
Seconds: t.Unix(),
|
||||
Nanos: int32(t.Nanosecond()),
|
||||
}
|
||||
if err := s.Send(&lbpb.LoadBalanceRequest{
|
||||
LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{
|
||||
ClientStats: stats,
|
||||
},
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ccw *remoteBalancerCCWrapper) callRemoteBalancer() (backoff bool, _ error) {
|
||||
lbClient := &loadBalancerClient{cc: ccw.cc}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
stream, err := lbClient.BalanceLoad(ctx, grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err)
|
||||
}
|
||||
ccw.lb.mu.Lock()
|
||||
ccw.lb.remoteBalancerConnected = true
|
||||
ccw.lb.mu.Unlock()
|
||||
|
||||
// grpclb handshake on the stream.
|
||||
initReq := &lbpb.LoadBalanceRequest{
|
||||
LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{
|
||||
InitialRequest: &lbpb.InitialLoadBalanceRequest{
|
||||
Name: ccw.lb.target,
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := stream.Send(initReq); err != nil {
|
||||
return true, fmt.Errorf("grpclb: failed to send init request: %v", err)
|
||||
}
|
||||
reply, err := stream.Recv()
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("grpclb: failed to recv init response: %v", err)
|
||||
}
|
||||
initResp := reply.GetInitialResponse()
|
||||
if initResp == nil {
|
||||
return true, fmt.Errorf("grpclb: reply from remote balancer did not include initial response")
|
||||
}
|
||||
|
||||
ccw.wg.Add(1)
|
||||
go func() {
|
||||
defer ccw.wg.Done()
|
||||
if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 {
|
||||
ccw.sendLoadReport(stream, d)
|
||||
}
|
||||
}()
|
||||
// No backoff if init req/resp handshake was successful.
|
||||
return false, ccw.readServerList(stream)
|
||||
}
|
||||
|
||||
func (ccw *remoteBalancerCCWrapper) watchRemoteBalancer() {
|
||||
defer ccw.wg.Done()
|
||||
var retryCount int
|
||||
for {
|
||||
doBackoff, err := ccw.callRemoteBalancer()
|
||||
select {
|
||||
case <-ccw.done:
|
||||
return
|
||||
default:
|
||||
if err != nil {
|
||||
if err == errServerTerminatedConnection {
|
||||
logger.Info(err)
|
||||
} else {
|
||||
logger.Warning(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Trigger a re-resolve when the stream errors.
|
||||
ccw.lb.cc.cc.ResolveNow(resolver.ResolveNowOptions{})
|
||||
|
||||
ccw.lb.mu.Lock()
|
||||
ccw.lb.remoteBalancerConnected = false
|
||||
ccw.lb.fullServerList = nil
|
||||
// Enter fallback when connection to remote balancer is lost, and the
|
||||
// aggregated state is not Ready.
|
||||
if !ccw.lb.inFallback && ccw.lb.state != connectivity.Ready {
|
||||
// Entering fallback.
|
||||
ccw.lb.refreshSubConns(ccw.lb.resolvedBackendAddrs, true, ccw.lb.usePickFirst)
|
||||
}
|
||||
ccw.lb.mu.Unlock()
|
||||
|
||||
if !doBackoff {
|
||||
retryCount = 0
|
||||
continue
|
||||
}
|
||||
|
||||
timer := time.NewTimer(ccw.backoff.Backoff(retryCount)) // Copy backoff
|
||||
select {
|
||||
case <-timer.C:
|
||||
case <-ccw.done:
|
||||
timer.Stop()
|
||||
return
|
||||
}
|
||||
retryCount++
|
||||
}
|
||||
}
|
208
vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go
generated
vendored
Normal file
208
vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go
generated
vendored
Normal file
|
@ -0,0 +1,208 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2016 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpclb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// The parent ClientConn should re-resolve when grpclb loses connection to the
|
||||
// remote balancer. When the ClientConn inside grpclb gets a TransientFailure,
|
||||
// it calls lbManualResolver.ResolveNow(), which calls parent ClientConn's
|
||||
// ResolveNow, and eventually results in re-resolve happening in parent
|
||||
// ClientConn's resolver (DNS for example).
|
||||
//
|
||||
// parent
|
||||
// ClientConn
|
||||
// +-----------------------------------------------------------------+
|
||||
// | parent +---------------------------------+ |
|
||||
// | DNS ClientConn | grpclb | |
|
||||
// | resolver balancerWrapper | | |
|
||||
// | + + | grpclb grpclb | |
|
||||
// | | | | ManualResolver ClientConn | |
|
||||
// | | | | + + | |
|
||||
// | | | | | | Transient | |
|
||||
// | | | | | | Failure | |
|
||||
// | | | | | <--------- | | |
|
||||
// | | | <--------------- | ResolveNow | | |
|
||||
// | | <--------- | ResolveNow | | | | |
|
||||
// | | ResolveNow | | | | | |
|
||||
// | | | | | | | |
|
||||
// | + + | + + | |
|
||||
// | +---------------------------------+ |
|
||||
// +-----------------------------------------------------------------+
|
||||
|
||||
// lbManualResolver is used by the ClientConn inside grpclb. It's a manual
|
||||
// resolver with a special ResolveNow() function.
|
||||
//
|
||||
// When ResolveNow() is called, it calls ResolveNow() on the parent ClientConn,
|
||||
// so when grpclb client lose contact with remote balancers, the parent
|
||||
// ClientConn's resolver will re-resolve.
|
||||
type lbManualResolver struct {
|
||||
scheme string
|
||||
ccr resolver.ClientConn
|
||||
|
||||
ccb balancer.ClientConn
|
||||
}
|
||||
|
||||
func (r *lbManualResolver) Build(_ resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) {
|
||||
r.ccr = cc
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (r *lbManualResolver) Scheme() string {
|
||||
return r.scheme
|
||||
}
|
||||
|
||||
// ResolveNow calls resolveNow on the parent ClientConn.
|
||||
func (r *lbManualResolver) ResolveNow(o resolver.ResolveNowOptions) {
|
||||
r.ccb.ResolveNow(o)
|
||||
}
|
||||
|
||||
// Close is a noop for Resolver.
|
||||
func (*lbManualResolver) Close() {}
|
||||
|
||||
// UpdateState calls cc.UpdateState.
|
||||
func (r *lbManualResolver) UpdateState(s resolver.State) {
|
||||
r.ccr.UpdateState(s)
|
||||
}
|
||||
|
||||
const subConnCacheTime = time.Second * 10
|
||||
|
||||
// lbCacheClientConn is a wrapper balancer.ClientConn with a SubConn cache.
|
||||
// SubConns will be kept in cache for subConnCacheTime before being removed.
|
||||
//
|
||||
// Its new and remove methods are updated to do cache first.
|
||||
type lbCacheClientConn struct {
|
||||
cc balancer.ClientConn
|
||||
timeout time.Duration
|
||||
|
||||
mu sync.Mutex
|
||||
// subConnCache only keeps subConns that are being deleted.
|
||||
subConnCache map[resolver.Address]*subConnCacheEntry
|
||||
subConnToAddr map[balancer.SubConn]resolver.Address
|
||||
}
|
||||
|
||||
type subConnCacheEntry struct {
|
||||
sc balancer.SubConn
|
||||
|
||||
cancel func()
|
||||
abortDeleting bool
|
||||
}
|
||||
|
||||
func newLBCacheClientConn(cc balancer.ClientConn) *lbCacheClientConn {
|
||||
return &lbCacheClientConn{
|
||||
cc: cc,
|
||||
timeout: subConnCacheTime,
|
||||
subConnCache: make(map[resolver.Address]*subConnCacheEntry),
|
||||
subConnToAddr: make(map[balancer.SubConn]resolver.Address),
|
||||
}
|
||||
}
|
||||
|
||||
func (ccc *lbCacheClientConn) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
||||
if len(addrs) != 1 {
|
||||
return nil, fmt.Errorf("grpclb calling NewSubConn with addrs of length %v", len(addrs))
|
||||
}
|
||||
addrWithoutAttrs := addrs[0]
|
||||
addrWithoutAttrs.Attributes = nil
|
||||
|
||||
ccc.mu.Lock()
|
||||
defer ccc.mu.Unlock()
|
||||
if entry, ok := ccc.subConnCache[addrWithoutAttrs]; ok {
|
||||
// If entry is in subConnCache, the SubConn was being deleted.
|
||||
// cancel function will never be nil.
|
||||
entry.cancel()
|
||||
delete(ccc.subConnCache, addrWithoutAttrs)
|
||||
return entry.sc, nil
|
||||
}
|
||||
|
||||
scNew, err := ccc.cc.NewSubConn(addrs, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ccc.subConnToAddr[scNew] = addrWithoutAttrs
|
||||
return scNew, nil
|
||||
}
|
||||
|
||||
func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) {
|
||||
ccc.mu.Lock()
|
||||
defer ccc.mu.Unlock()
|
||||
addr, ok := ccc.subConnToAddr[sc]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if entry, ok := ccc.subConnCache[addr]; ok {
|
||||
if entry.sc != sc {
|
||||
// This could happen if NewSubConn was called multiple times for the
|
||||
// same address, and those SubConns are all removed. We remove sc
|
||||
// immediately here.
|
||||
delete(ccc.subConnToAddr, sc)
|
||||
ccc.cc.RemoveSubConn(sc)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
entry := &subConnCacheEntry{
|
||||
sc: sc,
|
||||
}
|
||||
ccc.subConnCache[addr] = entry
|
||||
|
||||
timer := time.AfterFunc(ccc.timeout, func() {
|
||||
ccc.mu.Lock()
|
||||
defer ccc.mu.Unlock()
|
||||
if entry.abortDeleting {
|
||||
return
|
||||
}
|
||||
ccc.cc.RemoveSubConn(sc)
|
||||
delete(ccc.subConnToAddr, sc)
|
||||
delete(ccc.subConnCache, addr)
|
||||
})
|
||||
entry.cancel = func() {
|
||||
if !timer.Stop() {
|
||||
// If stop was not successful, the timer has fired (this can only
|
||||
// happen in a race). But the deleting function is blocked on ccc.mu
|
||||
// because the mutex was held by the caller of this function.
|
||||
//
|
||||
// Set abortDeleting to true to abort the deleting function. When
|
||||
// the lock is released, the deleting function will acquire the
|
||||
// lock, check the value of abortDeleting and return.
|
||||
entry.abortDeleting = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ccc *lbCacheClientConn) UpdateState(s balancer.State) {
|
||||
ccc.cc.UpdateState(s)
|
||||
}
|
||||
|
||||
func (ccc *lbCacheClientConn) close() {
|
||||
ccc.mu.Lock()
|
||||
// Only cancel all existing timers. There's no need to remove SubConns.
|
||||
for _, entry := range ccc.subConnCache {
|
||||
entry.cancel()
|
||||
}
|
||||
ccc.mu.Unlock()
|
||||
}
|
332
vendor/google.golang.org/grpc/credentials/alts/alts.go
generated
vendored
Normal file
332
vendor/google.golang.org/grpc/credentials/alts/alts.go
generated
vendored
Normal file
|
@ -0,0 +1,332 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package alts implements the ALTS credential support by gRPC library, which
|
||||
// encapsulates all the state needed by a client to authenticate with a server
|
||||
// using ALTS and make various assertions, e.g., about the client's identity,
|
||||
// role, or whether it is authorized to make a particular call.
|
||||
// This package is experimental.
|
||||
package alts
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/credentials"
|
||||
core "google.golang.org/grpc/credentials/alts/internal"
|
||||
"google.golang.org/grpc/credentials/alts/internal/handshaker"
|
||||
"google.golang.org/grpc/credentials/alts/internal/handshaker/service"
|
||||
altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/googlecloud"
|
||||
)
|
||||
|
||||
const (
|
||||
// hypervisorHandshakerServiceAddress represents the default ALTS gRPC
|
||||
// handshaker service address in the hypervisor.
|
||||
hypervisorHandshakerServiceAddress = "metadata.google.internal.:8080"
|
||||
// defaultTimeout specifies the server handshake timeout.
|
||||
defaultTimeout = 30.0 * time.Second
|
||||
// The following constants specify the minimum and maximum acceptable
|
||||
// protocol versions.
|
||||
protocolVersionMaxMajor = 2
|
||||
protocolVersionMaxMinor = 1
|
||||
protocolVersionMinMajor = 2
|
||||
protocolVersionMinMinor = 1
|
||||
)
|
||||
|
||||
var (
|
||||
vmOnGCP bool
|
||||
once sync.Once
|
||||
maxRPCVersion = &altspb.RpcProtocolVersions_Version{
|
||||
Major: protocolVersionMaxMajor,
|
||||
Minor: protocolVersionMaxMinor,
|
||||
}
|
||||
minRPCVersion = &altspb.RpcProtocolVersions_Version{
|
||||
Major: protocolVersionMinMajor,
|
||||
Minor: protocolVersionMinMinor,
|
||||
}
|
||||
// ErrUntrustedPlatform is returned from ClientHandshake and
|
||||
// ServerHandshake is running on a platform where the trustworthiness of
|
||||
// the handshaker service is not guaranteed.
|
||||
ErrUntrustedPlatform = errors.New("ALTS: untrusted platform. ALTS is only supported on GCP")
|
||||
logger = grpclog.Component("alts")
|
||||
)
|
||||
|
||||
// AuthInfo exposes security information from the ALTS handshake to the
|
||||
// application. This interface is to be implemented by ALTS. Users should not
|
||||
// need a brand new implementation of this interface. For situations like
|
||||
// testing, any new implementation should embed this interface. This allows
|
||||
// ALTS to add new methods to this interface.
|
||||
type AuthInfo interface {
|
||||
// ApplicationProtocol returns application protocol negotiated for the
|
||||
// ALTS connection.
|
||||
ApplicationProtocol() string
|
||||
// RecordProtocol returns the record protocol negotiated for the ALTS
|
||||
// connection.
|
||||
RecordProtocol() string
|
||||
// SecurityLevel returns the security level of the created ALTS secure
|
||||
// channel.
|
||||
SecurityLevel() altspb.SecurityLevel
|
||||
// PeerServiceAccount returns the peer service account.
|
||||
PeerServiceAccount() string
|
||||
// LocalServiceAccount returns the local service account.
|
||||
LocalServiceAccount() string
|
||||
// PeerRPCVersions returns the RPC version supported by the peer.
|
||||
PeerRPCVersions() *altspb.RpcProtocolVersions
|
||||
}
|
||||
|
||||
// ClientOptions contains the client-side options of an ALTS channel. These
|
||||
// options will be passed to the underlying ALTS handshaker.
|
||||
type ClientOptions struct {
|
||||
// TargetServiceAccounts contains a list of expected target service
|
||||
// accounts.
|
||||
TargetServiceAccounts []string
|
||||
// HandshakerServiceAddress represents the ALTS handshaker gRPC service
|
||||
// address to connect to.
|
||||
HandshakerServiceAddress string
|
||||
}
|
||||
|
||||
// DefaultClientOptions creates a new ClientOptions object with the default
|
||||
// values.
|
||||
func DefaultClientOptions() *ClientOptions {
|
||||
return &ClientOptions{
|
||||
HandshakerServiceAddress: hypervisorHandshakerServiceAddress,
|
||||
}
|
||||
}
|
||||
|
||||
// ServerOptions contains the server-side options of an ALTS channel. These
|
||||
// options will be passed to the underlying ALTS handshaker.
|
||||
type ServerOptions struct {
|
||||
// HandshakerServiceAddress represents the ALTS handshaker gRPC service
|
||||
// address to connect to.
|
||||
HandshakerServiceAddress string
|
||||
}
|
||||
|
||||
// DefaultServerOptions creates a new ServerOptions object with the default
|
||||
// values.
|
||||
func DefaultServerOptions() *ServerOptions {
|
||||
return &ServerOptions{
|
||||
HandshakerServiceAddress: hypervisorHandshakerServiceAddress,
|
||||
}
|
||||
}
|
||||
|
||||
// altsTC is the credentials required for authenticating a connection using ALTS.
|
||||
// It implements credentials.TransportCredentials interface.
|
||||
type altsTC struct {
|
||||
info *credentials.ProtocolInfo
|
||||
side core.Side
|
||||
accounts []string
|
||||
hsAddress string
|
||||
}
|
||||
|
||||
// NewClientCreds constructs a client-side ALTS TransportCredentials object.
|
||||
func NewClientCreds(opts *ClientOptions) credentials.TransportCredentials {
|
||||
return newALTS(core.ClientSide, opts.TargetServiceAccounts, opts.HandshakerServiceAddress)
|
||||
}
|
||||
|
||||
// NewServerCreds constructs a server-side ALTS TransportCredentials object.
|
||||
func NewServerCreds(opts *ServerOptions) credentials.TransportCredentials {
|
||||
return newALTS(core.ServerSide, nil, opts.HandshakerServiceAddress)
|
||||
}
|
||||
|
||||
func newALTS(side core.Side, accounts []string, hsAddress string) credentials.TransportCredentials {
|
||||
once.Do(func() {
|
||||
vmOnGCP = googlecloud.OnGCE()
|
||||
})
|
||||
if hsAddress == "" {
|
||||
hsAddress = hypervisorHandshakerServiceAddress
|
||||
}
|
||||
return &altsTC{
|
||||
info: &credentials.ProtocolInfo{
|
||||
SecurityProtocol: "alts",
|
||||
SecurityVersion: "1.0",
|
||||
},
|
||||
side: side,
|
||||
accounts: accounts,
|
||||
hsAddress: hsAddress,
|
||||
}
|
||||
}
|
||||
|
||||
// ClientHandshake implements the client side handshake protocol.
|
||||
func (g *altsTC) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (_ net.Conn, _ credentials.AuthInfo, err error) {
|
||||
if !vmOnGCP {
|
||||
return nil, nil, ErrUntrustedPlatform
|
||||
}
|
||||
|
||||
// Connecting to ALTS handshaker service.
|
||||
hsConn, err := service.Dial(g.hsAddress)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// Do not close hsConn since it is shared with other handshakes.
|
||||
|
||||
// Possible context leak:
|
||||
// The cancel function for the child context we create will only be
|
||||
// called a non-nil error is returned.
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithCancel(ctx)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
cancel()
|
||||
}
|
||||
}()
|
||||
|
||||
opts := handshaker.DefaultClientHandshakerOptions()
|
||||
opts.TargetName = addr
|
||||
opts.TargetServiceAccounts = g.accounts
|
||||
opts.RPCVersions = &altspb.RpcProtocolVersions{
|
||||
MaxRpcVersion: maxRPCVersion,
|
||||
MinRpcVersion: minRPCVersion,
|
||||
}
|
||||
chs, err := handshaker.NewClientHandshaker(ctx, hsConn, rawConn, opts)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
chs.Close()
|
||||
}
|
||||
}()
|
||||
secConn, authInfo, err := chs.ClientHandshake(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
altsAuthInfo, ok := authInfo.(AuthInfo)
|
||||
if !ok {
|
||||
return nil, nil, errors.New("client-side auth info is not of type alts.AuthInfo")
|
||||
}
|
||||
match, _ := checkRPCVersions(opts.RPCVersions, altsAuthInfo.PeerRPCVersions())
|
||||
if !match {
|
||||
return nil, nil, fmt.Errorf("server-side RPC versions are not compatible with this client, local versions: %v, peer versions: %v", opts.RPCVersions, altsAuthInfo.PeerRPCVersions())
|
||||
}
|
||||
return secConn, authInfo, nil
|
||||
}
|
||||
|
||||
// ServerHandshake implements the server side ALTS handshaker.
|
||||
func (g *altsTC) ServerHandshake(rawConn net.Conn) (_ net.Conn, _ credentials.AuthInfo, err error) {
|
||||
if !vmOnGCP {
|
||||
return nil, nil, ErrUntrustedPlatform
|
||||
}
|
||||
// Connecting to ALTS handshaker service.
|
||||
hsConn, err := service.Dial(g.hsAddress)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// Do not close hsConn since it's shared with other handshakes.
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout)
|
||||
defer cancel()
|
||||
opts := handshaker.DefaultServerHandshakerOptions()
|
||||
opts.RPCVersions = &altspb.RpcProtocolVersions{
|
||||
MaxRpcVersion: maxRPCVersion,
|
||||
MinRpcVersion: minRPCVersion,
|
||||
}
|
||||
shs, err := handshaker.NewServerHandshaker(ctx, hsConn, rawConn, opts)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
shs.Close()
|
||||
}
|
||||
}()
|
||||
secConn, authInfo, err := shs.ServerHandshake(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
altsAuthInfo, ok := authInfo.(AuthInfo)
|
||||
if !ok {
|
||||
return nil, nil, errors.New("server-side auth info is not of type alts.AuthInfo")
|
||||
}
|
||||
match, _ := checkRPCVersions(opts.RPCVersions, altsAuthInfo.PeerRPCVersions())
|
||||
if !match {
|
||||
return nil, nil, fmt.Errorf("client-side RPC versions is not compatible with this server, local versions: %v, peer versions: %v", opts.RPCVersions, altsAuthInfo.PeerRPCVersions())
|
||||
}
|
||||
return secConn, authInfo, nil
|
||||
}
|
||||
|
||||
func (g *altsTC) Info() credentials.ProtocolInfo {
|
||||
return *g.info
|
||||
}
|
||||
|
||||
func (g *altsTC) Clone() credentials.TransportCredentials {
|
||||
info := *g.info
|
||||
var accounts []string
|
||||
if g.accounts != nil {
|
||||
accounts = make([]string, len(g.accounts))
|
||||
copy(accounts, g.accounts)
|
||||
}
|
||||
return &altsTC{
|
||||
info: &info,
|
||||
side: g.side,
|
||||
hsAddress: g.hsAddress,
|
||||
accounts: accounts,
|
||||
}
|
||||
}
|
||||
|
||||
func (g *altsTC) OverrideServerName(serverNameOverride string) error {
|
||||
g.info.ServerName = serverNameOverride
|
||||
return nil
|
||||
}
|
||||
|
||||
// compareRPCVersion returns 0 if v1 == v2, 1 if v1 > v2 and -1 if v1 < v2.
|
||||
func compareRPCVersions(v1, v2 *altspb.RpcProtocolVersions_Version) int {
|
||||
switch {
|
||||
case v1.GetMajor() > v2.GetMajor(),
|
||||
v1.GetMajor() == v2.GetMajor() && v1.GetMinor() > v2.GetMinor():
|
||||
return 1
|
||||
case v1.GetMajor() < v2.GetMajor(),
|
||||
v1.GetMajor() == v2.GetMajor() && v1.GetMinor() < v2.GetMinor():
|
||||
return -1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// checkRPCVersions performs a version check between local and peer rpc protocol
|
||||
// versions. This function returns true if the check passes which means both
|
||||
// parties agreed on a common rpc protocol to use, and false otherwise. The
|
||||
// function also returns the highest common RPC protocol version both parties
|
||||
// agreed on.
|
||||
func checkRPCVersions(local, peer *altspb.RpcProtocolVersions) (bool, *altspb.RpcProtocolVersions_Version) {
|
||||
if local == nil || peer == nil {
|
||||
logger.Error("invalid checkRPCVersions argument, either local or peer is nil.")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// maxCommonVersion is MIN(local.max, peer.max).
|
||||
maxCommonVersion := local.GetMaxRpcVersion()
|
||||
if compareRPCVersions(local.GetMaxRpcVersion(), peer.GetMaxRpcVersion()) > 0 {
|
||||
maxCommonVersion = peer.GetMaxRpcVersion()
|
||||
}
|
||||
|
||||
// minCommonVersion is MAX(local.min, peer.min).
|
||||
minCommonVersion := peer.GetMinRpcVersion()
|
||||
if compareRPCVersions(local.GetMinRpcVersion(), peer.GetMinRpcVersion()) > 0 {
|
||||
minCommonVersion = local.GetMinRpcVersion()
|
||||
}
|
||||
|
||||
if compareRPCVersions(maxCommonVersion, minCommonVersion) < 0 {
|
||||
return false, nil
|
||||
}
|
||||
return true, maxCommonVersion
|
||||
}
|
95
vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go
generated
vendored
Normal file
95
vendor/google.golang.org/grpc/credentials/alts/internal/authinfo/authinfo.go
generated
vendored
Normal file
|
@ -0,0 +1,95 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package authinfo provide authentication information returned by handshakers.
|
||||
package authinfo
|
||||
|
||||
import (
|
||||
"google.golang.org/grpc/credentials"
|
||||
altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp"
|
||||
)
|
||||
|
||||
var _ credentials.AuthInfo = (*altsAuthInfo)(nil)
|
||||
|
||||
// altsAuthInfo exposes security information from the ALTS handshake to the
|
||||
// application. altsAuthInfo is immutable and implements credentials.AuthInfo.
|
||||
type altsAuthInfo struct {
|
||||
p *altspb.AltsContext
|
||||
credentials.CommonAuthInfo
|
||||
}
|
||||
|
||||
// New returns a new altsAuthInfo object given handshaker results.
|
||||
func New(result *altspb.HandshakerResult) credentials.AuthInfo {
|
||||
return newAuthInfo(result)
|
||||
}
|
||||
|
||||
func newAuthInfo(result *altspb.HandshakerResult) *altsAuthInfo {
|
||||
return &altsAuthInfo{
|
||||
p: &altspb.AltsContext{
|
||||
ApplicationProtocol: result.GetApplicationProtocol(),
|
||||
RecordProtocol: result.GetRecordProtocol(),
|
||||
// TODO: assign security level from result.
|
||||
SecurityLevel: altspb.SecurityLevel_INTEGRITY_AND_PRIVACY,
|
||||
PeerServiceAccount: result.GetPeerIdentity().GetServiceAccount(),
|
||||
LocalServiceAccount: result.GetLocalIdentity().GetServiceAccount(),
|
||||
PeerRpcVersions: result.GetPeerRpcVersions(),
|
||||
PeerAttributes: result.GetPeerIdentity().GetAttributes(),
|
||||
},
|
||||
CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity},
|
||||
}
|
||||
}
|
||||
|
||||
// AuthType identifies the context as providing ALTS authentication information.
|
||||
func (s *altsAuthInfo) AuthType() string {
|
||||
return "alts"
|
||||
}
|
||||
|
||||
// ApplicationProtocol returns the context's application protocol.
|
||||
func (s *altsAuthInfo) ApplicationProtocol() string {
|
||||
return s.p.GetApplicationProtocol()
|
||||
}
|
||||
|
||||
// RecordProtocol returns the context's record protocol.
|
||||
func (s *altsAuthInfo) RecordProtocol() string {
|
||||
return s.p.GetRecordProtocol()
|
||||
}
|
||||
|
||||
// SecurityLevel returns the context's security level.
|
||||
func (s *altsAuthInfo) SecurityLevel() altspb.SecurityLevel {
|
||||
return s.p.GetSecurityLevel()
|
||||
}
|
||||
|
||||
// PeerServiceAccount returns the context's peer service account.
|
||||
func (s *altsAuthInfo) PeerServiceAccount() string {
|
||||
return s.p.GetPeerServiceAccount()
|
||||
}
|
||||
|
||||
// LocalServiceAccount returns the context's local service account.
|
||||
func (s *altsAuthInfo) LocalServiceAccount() string {
|
||||
return s.p.GetLocalServiceAccount()
|
||||
}
|
||||
|
||||
// PeerRPCVersions returns the context's peer RPC versions.
|
||||
func (s *altsAuthInfo) PeerRPCVersions() *altspb.RpcProtocolVersions {
|
||||
return s.p.GetPeerRpcVersions()
|
||||
}
|
||||
|
||||
// PeerAttributes returns the context's peer attributes.
|
||||
func (s *altsAuthInfo) PeerAttributes() map[string]string {
|
||||
return s.p.GetPeerAttributes()
|
||||
}
|
67
vendor/google.golang.org/grpc/credentials/alts/internal/common.go
generated
vendored
Normal file
67
vendor/google.golang.org/grpc/credentials/alts/internal/common.go
generated
vendored
Normal file
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package internal contains common core functionality for ALTS.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
|
||||
"google.golang.org/grpc/credentials"
|
||||
)
|
||||
|
||||
const (
|
||||
// ClientSide identifies the client in this communication.
|
||||
ClientSide Side = iota
|
||||
// ServerSide identifies the server in this communication.
|
||||
ServerSide
|
||||
)
|
||||
|
||||
// PeerNotRespondingError is returned when a peer server is not responding
|
||||
// after a channel has been established. It is treated as a temporary connection
|
||||
// error and re-connection to the server should be attempted.
|
||||
var PeerNotRespondingError = &peerNotRespondingError{}
|
||||
|
||||
// Side identifies the party's role: client or server.
|
||||
type Side int
|
||||
|
||||
type peerNotRespondingError struct{}
|
||||
|
||||
// Return an error message for the purpose of logging.
|
||||
func (e *peerNotRespondingError) Error() string {
|
||||
return "peer server is not responding and re-connection should be attempted."
|
||||
}
|
||||
|
||||
// Temporary indicates if this connection error is temporary or fatal.
|
||||
func (e *peerNotRespondingError) Temporary() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Handshaker defines a ALTS handshaker interface.
|
||||
type Handshaker interface {
|
||||
// ClientHandshake starts and completes a client-side handshaking and
|
||||
// returns a secure connection and corresponding auth information.
|
||||
ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error)
|
||||
// ServerHandshake starts and completes a server-side handshaking and
|
||||
// returns a secure connection and corresponding auth information.
|
||||
ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error)
|
||||
// Close terminates the Handshaker. It should be called when the caller
|
||||
// obtains the secure connection.
|
||||
Close()
|
||||
}
|
131
vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go
generated
vendored
Normal file
131
vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go
generated
vendored
Normal file
|
@ -0,0 +1,131 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package conn
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// rekeyAEAD holds the necessary information for an AEAD based on
|
||||
// AES-GCM that performs nonce-based key derivation and XORs the
|
||||
// nonce with a random mask.
|
||||
type rekeyAEAD struct {
|
||||
kdfKey []byte
|
||||
kdfCounter []byte
|
||||
nonceMask []byte
|
||||
nonceBuf []byte
|
||||
gcmAEAD cipher.AEAD
|
||||
}
|
||||
|
||||
// KeySizeError signals that the given key does not have the correct size.
|
||||
type KeySizeError int
|
||||
|
||||
func (k KeySizeError) Error() string {
|
||||
return "alts/conn: invalid key size " + strconv.Itoa(int(k))
|
||||
}
|
||||
|
||||
// newRekeyAEAD creates a new instance of aes128gcm with rekeying.
|
||||
// The key argument should be 44 bytes, the first 32 bytes are used as a key
|
||||
// for HKDF-expand and the remainining 12 bytes are used as a random mask for
|
||||
// the counter.
|
||||
func newRekeyAEAD(key []byte) (*rekeyAEAD, error) {
|
||||
k := len(key)
|
||||
if k != kdfKeyLen+nonceLen {
|
||||
return nil, KeySizeError(k)
|
||||
}
|
||||
return &rekeyAEAD{
|
||||
kdfKey: key[:kdfKeyLen],
|
||||
kdfCounter: make([]byte, kdfCounterLen),
|
||||
nonceMask: key[kdfKeyLen:],
|
||||
nonceBuf: make([]byte, nonceLen),
|
||||
gcmAEAD: nil,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Seal rekeys if nonce[2:8] is different than in the last call, masks the nonce,
|
||||
// and calls Seal for aes128gcm.
|
||||
func (s *rekeyAEAD) Seal(dst, nonce, plaintext, additionalData []byte) []byte {
|
||||
if err := s.rekeyIfRequired(nonce); err != nil {
|
||||
panic(fmt.Sprintf("Rekeying failed with: %s", err.Error()))
|
||||
}
|
||||
maskNonce(s.nonceBuf, nonce, s.nonceMask)
|
||||
return s.gcmAEAD.Seal(dst, s.nonceBuf, plaintext, additionalData)
|
||||
}
|
||||
|
||||
// Open rekeys if nonce[2:8] is different than in the last call, masks the nonce,
|
||||
// and calls Open for aes128gcm.
|
||||
func (s *rekeyAEAD) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) {
|
||||
if err := s.rekeyIfRequired(nonce); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
maskNonce(s.nonceBuf, nonce, s.nonceMask)
|
||||
return s.gcmAEAD.Open(dst, s.nonceBuf, ciphertext, additionalData)
|
||||
}
|
||||
|
||||
// rekeyIfRequired creates a new aes128gcm AEAD if the existing AEAD is nil
|
||||
// or cannot be used with given nonce.
|
||||
func (s *rekeyAEAD) rekeyIfRequired(nonce []byte) error {
|
||||
newKdfCounter := nonce[kdfCounterOffset : kdfCounterOffset+kdfCounterLen]
|
||||
if s.gcmAEAD != nil && bytes.Equal(newKdfCounter, s.kdfCounter) {
|
||||
return nil
|
||||
}
|
||||
copy(s.kdfCounter, newKdfCounter)
|
||||
a, err := aes.NewCipher(hkdfExpand(s.kdfKey, s.kdfCounter))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.gcmAEAD, err = cipher.NewGCM(a)
|
||||
return err
|
||||
}
|
||||
|
||||
// maskNonce XORs the given nonce with the mask and stores the result in dst.
|
||||
func maskNonce(dst, nonce, mask []byte) {
|
||||
nonce1 := binary.LittleEndian.Uint64(nonce[:sizeUint64])
|
||||
nonce2 := binary.LittleEndian.Uint32(nonce[sizeUint64:])
|
||||
mask1 := binary.LittleEndian.Uint64(mask[:sizeUint64])
|
||||
mask2 := binary.LittleEndian.Uint32(mask[sizeUint64:])
|
||||
binary.LittleEndian.PutUint64(dst[:sizeUint64], nonce1^mask1)
|
||||
binary.LittleEndian.PutUint32(dst[sizeUint64:], nonce2^mask2)
|
||||
}
|
||||
|
||||
// NonceSize returns the required nonce size.
|
||||
func (s *rekeyAEAD) NonceSize() int {
|
||||
return s.gcmAEAD.NonceSize()
|
||||
}
|
||||
|
||||
// Overhead returns the ciphertext overhead.
|
||||
func (s *rekeyAEAD) Overhead() int {
|
||||
return s.gcmAEAD.Overhead()
|
||||
}
|
||||
|
||||
// hkdfExpand computes the first 16 bytes of the HKDF-expand function
|
||||
// defined in RFC5869.
|
||||
func hkdfExpand(key, info []byte) []byte {
|
||||
mac := hmac.New(sha256.New, key)
|
||||
mac.Write(info)
|
||||
mac.Write([]byte{0x01}[:])
|
||||
return mac.Sum(nil)[:aeadKeyLen]
|
||||
}
|
105
vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go
generated
vendored
Normal file
105
vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcm.go
generated
vendored
Normal file
|
@ -0,0 +1,105 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package conn
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
|
||||
core "google.golang.org/grpc/credentials/alts/internal"
|
||||
)
|
||||
|
||||
const (
|
||||
// Overflow length n in bytes, never encrypt more than 2^(n*8) frames (in
|
||||
// each direction).
|
||||
overflowLenAES128GCM = 5
|
||||
)
|
||||
|
||||
// aes128gcm is the struct that holds necessary information for ALTS record.
|
||||
// The counter value is NOT included in the payload during the encryption and
|
||||
// decryption operations.
|
||||
type aes128gcm struct {
|
||||
// inCounter is used in ALTS record to check that incoming counters are
|
||||
// as expected, since ALTS record guarantees that messages are unwrapped
|
||||
// in the same order that the peer wrapped them.
|
||||
inCounter Counter
|
||||
outCounter Counter
|
||||
aead cipher.AEAD
|
||||
}
|
||||
|
||||
// NewAES128GCM creates an instance that uses aes128gcm for ALTS record.
|
||||
func NewAES128GCM(side core.Side, key []byte) (ALTSRecordCrypto, error) {
|
||||
c, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a, err := cipher.NewGCM(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &aes128gcm{
|
||||
inCounter: NewInCounter(side, overflowLenAES128GCM),
|
||||
outCounter: NewOutCounter(side, overflowLenAES128GCM),
|
||||
aead: a,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Encrypt is the encryption function. dst can contain bytes at the beginning of
|
||||
// the ciphertext that will not be encrypted but will be authenticated. If dst
|
||||
// has enough capacity to hold these bytes, the ciphertext and the tag, no
|
||||
// allocation and copy operations will be performed. dst and plaintext do not
|
||||
// overlap.
|
||||
func (s *aes128gcm) Encrypt(dst, plaintext []byte) ([]byte, error) {
|
||||
// If we need to allocate an output buffer, we want to include space for
|
||||
// GCM tag to avoid forcing ALTS record to reallocate as well.
|
||||
dlen := len(dst)
|
||||
dst, out := SliceForAppend(dst, len(plaintext)+GcmTagSize)
|
||||
seq, err := s.outCounter.Value()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data := out[:len(plaintext)]
|
||||
copy(data, plaintext) // data may alias plaintext
|
||||
|
||||
// Seal appends the ciphertext and the tag to its first argument and
|
||||
// returns the updated slice. However, SliceForAppend above ensures that
|
||||
// dst has enough capacity to avoid a reallocation and copy due to the
|
||||
// append.
|
||||
dst = s.aead.Seal(dst[:dlen], seq, data, nil)
|
||||
s.outCounter.Inc()
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
func (s *aes128gcm) EncryptionOverhead() int {
|
||||
return GcmTagSize
|
||||
}
|
||||
|
||||
func (s *aes128gcm) Decrypt(dst, ciphertext []byte) ([]byte, error) {
|
||||
seq, err := s.inCounter.Value()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// If dst is equal to ciphertext[:0], ciphertext storage is reused.
|
||||
plaintext, err := s.aead.Open(dst, seq, ciphertext, nil)
|
||||
if err != nil {
|
||||
return nil, ErrAuth
|
||||
}
|
||||
s.inCounter.Inc()
|
||||
return plaintext, nil
|
||||
}
|
116
vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go
generated
vendored
Normal file
116
vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go
generated
vendored
Normal file
|
@ -0,0 +1,116 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package conn
|
||||
|
||||
import (
|
||||
"crypto/cipher"
|
||||
|
||||
core "google.golang.org/grpc/credentials/alts/internal"
|
||||
)
|
||||
|
||||
const (
|
||||
// Overflow length n in bytes, never encrypt more than 2^(n*8) frames (in
|
||||
// each direction).
|
||||
overflowLenAES128GCMRekey = 8
|
||||
nonceLen = 12
|
||||
aeadKeyLen = 16
|
||||
kdfKeyLen = 32
|
||||
kdfCounterOffset = 2
|
||||
kdfCounterLen = 6
|
||||
sizeUint64 = 8
|
||||
)
|
||||
|
||||
// aes128gcmRekey is the struct that holds necessary information for ALTS record.
|
||||
// The counter value is NOT included in the payload during the encryption and
|
||||
// decryption operations.
|
||||
type aes128gcmRekey struct {
|
||||
// inCounter is used in ALTS record to check that incoming counters are
|
||||
// as expected, since ALTS record guarantees that messages are unwrapped
|
||||
// in the same order that the peer wrapped them.
|
||||
inCounter Counter
|
||||
outCounter Counter
|
||||
inAEAD cipher.AEAD
|
||||
outAEAD cipher.AEAD
|
||||
}
|
||||
|
||||
// NewAES128GCMRekey creates an instance that uses aes128gcm with rekeying
|
||||
// for ALTS record. The key argument should be 44 bytes, the first 32 bytes
|
||||
// are used as a key for HKDF-expand and the remainining 12 bytes are used
|
||||
// as a random mask for the counter.
|
||||
func NewAES128GCMRekey(side core.Side, key []byte) (ALTSRecordCrypto, error) {
|
||||
inCounter := NewInCounter(side, overflowLenAES128GCMRekey)
|
||||
outCounter := NewOutCounter(side, overflowLenAES128GCMRekey)
|
||||
inAEAD, err := newRekeyAEAD(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
outAEAD, err := newRekeyAEAD(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &aes128gcmRekey{
|
||||
inCounter,
|
||||
outCounter,
|
||||
inAEAD,
|
||||
outAEAD,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Encrypt is the encryption function. dst can contain bytes at the beginning of
|
||||
// the ciphertext that will not be encrypted but will be authenticated. If dst
|
||||
// has enough capacity to hold these bytes, the ciphertext and the tag, no
|
||||
// allocation and copy operations will be performed. dst and plaintext do not
|
||||
// overlap.
|
||||
func (s *aes128gcmRekey) Encrypt(dst, plaintext []byte) ([]byte, error) {
|
||||
// If we need to allocate an output buffer, we want to include space for
|
||||
// GCM tag to avoid forcing ALTS record to reallocate as well.
|
||||
dlen := len(dst)
|
||||
dst, out := SliceForAppend(dst, len(plaintext)+GcmTagSize)
|
||||
seq, err := s.outCounter.Value()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data := out[:len(plaintext)]
|
||||
copy(data, plaintext) // data may alias plaintext
|
||||
|
||||
// Seal appends the ciphertext and the tag to its first argument and
|
||||
// returns the updated slice. However, SliceForAppend above ensures that
|
||||
// dst has enough capacity to avoid a reallocation and copy due to the
|
||||
// append.
|
||||
dst = s.outAEAD.Seal(dst[:dlen], seq, data, nil)
|
||||
s.outCounter.Inc()
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
func (s *aes128gcmRekey) EncryptionOverhead() int {
|
||||
return GcmTagSize
|
||||
}
|
||||
|
||||
func (s *aes128gcmRekey) Decrypt(dst, ciphertext []byte) ([]byte, error) {
|
||||
seq, err := s.inCounter.Value()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
plaintext, err := s.inAEAD.Open(dst, seq, ciphertext, nil)
|
||||
if err != nil {
|
||||
return nil, ErrAuth
|
||||
}
|
||||
s.inCounter.Inc()
|
||||
return plaintext, nil
|
||||
}
|
70
vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go
generated
vendored
Normal file
70
vendor/google.golang.org/grpc/credentials/alts/internal/conn/common.go
generated
vendored
Normal file
|
@ -0,0 +1,70 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package conn
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const (
|
||||
// GcmTagSize is the GCM tag size is the difference in length between
|
||||
// plaintext and ciphertext. From crypto/cipher/gcm.go in Go crypto
|
||||
// library.
|
||||
GcmTagSize = 16
|
||||
)
|
||||
|
||||
// ErrAuth occurs on authentication failure.
|
||||
var ErrAuth = errors.New("message authentication failed")
|
||||
|
||||
// SliceForAppend takes a slice and a requested number of bytes. It returns a
|
||||
// slice with the contents of the given slice followed by that many bytes and a
|
||||
// second slice that aliases into it and contains only the extra bytes. If the
|
||||
// original slice has sufficient capacity then no allocation is performed.
|
||||
func SliceForAppend(in []byte, n int) (head, tail []byte) {
|
||||
if total := len(in) + n; cap(in) >= total {
|
||||
head = in[:total]
|
||||
} else {
|
||||
head = make([]byte, total)
|
||||
copy(head, in)
|
||||
}
|
||||
tail = head[len(in):]
|
||||
return head, tail
|
||||
}
|
||||
|
||||
// ParseFramedMsg parse the provided buffer and returns a frame of the format
|
||||
// msgLength+msg and any remaining bytes in that buffer.
|
||||
func ParseFramedMsg(b []byte, maxLen uint32) ([]byte, []byte, error) {
|
||||
// If the size field is not complete, return the provided buffer as
|
||||
// remaining buffer.
|
||||
if len(b) < MsgLenFieldSize {
|
||||
return nil, b, nil
|
||||
}
|
||||
msgLenField := b[:MsgLenFieldSize]
|
||||
length := binary.LittleEndian.Uint32(msgLenField)
|
||||
if length > maxLen {
|
||||
return nil, nil, fmt.Errorf("received the frame length %d larger than the limit %d", length, maxLen)
|
||||
}
|
||||
if len(b) < int(length)+4 { // account for the first 4 msg length bytes.
|
||||
// Frame is not complete yet.
|
||||
return nil, b, nil
|
||||
}
|
||||
return b[:MsgLenFieldSize+length], b[MsgLenFieldSize+length:], nil
|
||||
}
|
62
vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go
generated
vendored
Normal file
62
vendor/google.golang.org/grpc/credentials/alts/internal/conn/counter.go
generated
vendored
Normal file
|
@ -0,0 +1,62 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package conn
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
const counterLen = 12
|
||||
|
||||
var (
|
||||
errInvalidCounter = errors.New("invalid counter")
|
||||
)
|
||||
|
||||
// Counter is a 96-bit, little-endian counter.
|
||||
type Counter struct {
|
||||
value [counterLen]byte
|
||||
invalid bool
|
||||
overflowLen int
|
||||
}
|
||||
|
||||
// Value returns the current value of the counter as a byte slice.
|
||||
func (c *Counter) Value() ([]byte, error) {
|
||||
if c.invalid {
|
||||
return nil, errInvalidCounter
|
||||
}
|
||||
return c.value[:], nil
|
||||
}
|
||||
|
||||
// Inc increments the counter and checks for overflow.
|
||||
func (c *Counter) Inc() {
|
||||
// If the counter is already invalid, there is no need to increase it.
|
||||
if c.invalid {
|
||||
return
|
||||
}
|
||||
i := 0
|
||||
for ; i < c.overflowLen; i++ {
|
||||
c.value[i]++
|
||||
if c.value[i] != 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i == c.overflowLen {
|
||||
c.invalid = true
|
||||
}
|
||||
}
|
275
vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go
generated
vendored
Normal file
275
vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go
generated
vendored
Normal file
|
@ -0,0 +1,275 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package conn contains an implementation of a secure channel created by gRPC
|
||||
// handshakers.
|
||||
package conn
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
|
||||
core "google.golang.org/grpc/credentials/alts/internal"
|
||||
)
|
||||
|
||||
// ALTSRecordCrypto is the interface for gRPC ALTS record protocol.
|
||||
type ALTSRecordCrypto interface {
|
||||
// Encrypt encrypts the plaintext and computes the tag (if any) of dst
|
||||
// and plaintext. dst and plaintext may fully overlap or not at all.
|
||||
Encrypt(dst, plaintext []byte) ([]byte, error)
|
||||
// EncryptionOverhead returns the tag size (if any) in bytes.
|
||||
EncryptionOverhead() int
|
||||
// Decrypt decrypts ciphertext and verify the tag (if any). dst and
|
||||
// ciphertext may alias exactly or not at all. To reuse ciphertext's
|
||||
// storage for the decrypted output, use ciphertext[:0] as dst.
|
||||
Decrypt(dst, ciphertext []byte) ([]byte, error)
|
||||
}
|
||||
|
||||
// ALTSRecordFunc is a function type for factory functions that create
|
||||
// ALTSRecordCrypto instances.
|
||||
type ALTSRecordFunc func(s core.Side, keyData []byte) (ALTSRecordCrypto, error)
|
||||
|
||||
const (
|
||||
// MsgLenFieldSize is the byte size of the frame length field of a
|
||||
// framed message.
|
||||
MsgLenFieldSize = 4
|
||||
// The byte size of the message type field of a framed message.
|
||||
msgTypeFieldSize = 4
|
||||
// The bytes size limit for a ALTS record message.
|
||||
altsRecordLengthLimit = 1024 * 1024 // 1 MiB
|
||||
// The default bytes size of a ALTS record message.
|
||||
altsRecordDefaultLength = 4 * 1024 // 4KiB
|
||||
// Message type value included in ALTS record framing.
|
||||
altsRecordMsgType = uint32(0x06)
|
||||
// The initial write buffer size.
|
||||
altsWriteBufferInitialSize = 32 * 1024 // 32KiB
|
||||
// The maximum write buffer size. This *must* be multiple of
|
||||
// altsRecordDefaultLength.
|
||||
altsWriteBufferMaxSize = 512 * 1024 // 512KiB
|
||||
)
|
||||
|
||||
var (
|
||||
protocols = make(map[string]ALTSRecordFunc)
|
||||
)
|
||||
|
||||
// RegisterProtocol register a ALTS record encryption protocol.
|
||||
func RegisterProtocol(protocol string, f ALTSRecordFunc) error {
|
||||
if _, ok := protocols[protocol]; ok {
|
||||
return fmt.Errorf("protocol %v is already registered", protocol)
|
||||
}
|
||||
protocols[protocol] = f
|
||||
return nil
|
||||
}
|
||||
|
||||
// conn represents a secured connection. It implements the net.Conn interface.
|
||||
type conn struct {
|
||||
net.Conn
|
||||
crypto ALTSRecordCrypto
|
||||
// buf holds data that has been read from the connection and decrypted,
|
||||
// but has not yet been returned by Read.
|
||||
buf []byte
|
||||
payloadLengthLimit int
|
||||
// protected holds data read from the network but have not yet been
|
||||
// decrypted. This data might not compose a complete frame.
|
||||
protected []byte
|
||||
// writeBuf is a buffer used to contain encrypted frames before being
|
||||
// written to the network.
|
||||
writeBuf []byte
|
||||
// nextFrame stores the next frame (in protected buffer) info.
|
||||
nextFrame []byte
|
||||
// overhead is the calculated overhead of each frame.
|
||||
overhead int
|
||||
}
|
||||
|
||||
// NewConn creates a new secure channel instance given the other party role and
|
||||
// handshaking result.
|
||||
func NewConn(c net.Conn, side core.Side, recordProtocol string, key []byte, protected []byte) (net.Conn, error) {
|
||||
newCrypto := protocols[recordProtocol]
|
||||
if newCrypto == nil {
|
||||
return nil, fmt.Errorf("negotiated unknown next_protocol %q", recordProtocol)
|
||||
}
|
||||
crypto, err := newCrypto(side, key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("protocol %q: %v", recordProtocol, err)
|
||||
}
|
||||
overhead := MsgLenFieldSize + msgTypeFieldSize + crypto.EncryptionOverhead()
|
||||
payloadLengthLimit := altsRecordDefaultLength - overhead
|
||||
var protectedBuf []byte
|
||||
if protected == nil {
|
||||
// We pre-allocate protected to be of size
|
||||
// 2*altsRecordDefaultLength-1 during initialization. We only
|
||||
// read from the network into protected when protected does not
|
||||
// contain a complete frame, which is at most
|
||||
// altsRecordDefaultLength-1 (bytes). And we read at most
|
||||
// altsRecordDefaultLength (bytes) data into protected at one
|
||||
// time. Therefore, 2*altsRecordDefaultLength-1 is large enough
|
||||
// to buffer data read from the network.
|
||||
protectedBuf = make([]byte, 0, 2*altsRecordDefaultLength-1)
|
||||
} else {
|
||||
protectedBuf = make([]byte, len(protected))
|
||||
copy(protectedBuf, protected)
|
||||
}
|
||||
|
||||
altsConn := &conn{
|
||||
Conn: c,
|
||||
crypto: crypto,
|
||||
payloadLengthLimit: payloadLengthLimit,
|
||||
protected: protectedBuf,
|
||||
writeBuf: make([]byte, altsWriteBufferInitialSize),
|
||||
nextFrame: protectedBuf,
|
||||
overhead: overhead,
|
||||
}
|
||||
return altsConn, nil
|
||||
}
|
||||
|
||||
// Read reads and decrypts a frame from the underlying connection, and copies the
|
||||
// decrypted payload into b. If the size of the payload is greater than len(b),
|
||||
// Read retains the remaining bytes in an internal buffer, and subsequent calls
|
||||
// to Read will read from this buffer until it is exhausted.
|
||||
func (p *conn) Read(b []byte) (n int, err error) {
|
||||
if len(p.buf) == 0 {
|
||||
var framedMsg []byte
|
||||
framedMsg, p.nextFrame, err = ParseFramedMsg(p.nextFrame, altsRecordLengthLimit)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
// Check whether the next frame to be decrypted has been
|
||||
// completely received yet.
|
||||
if len(framedMsg) == 0 {
|
||||
copy(p.protected, p.nextFrame)
|
||||
p.protected = p.protected[:len(p.nextFrame)]
|
||||
// Always copy next incomplete frame to the beginning of
|
||||
// the protected buffer and reset nextFrame to it.
|
||||
p.nextFrame = p.protected
|
||||
}
|
||||
// Check whether a complete frame has been received yet.
|
||||
for len(framedMsg) == 0 {
|
||||
if len(p.protected) == cap(p.protected) {
|
||||
tmp := make([]byte, len(p.protected), cap(p.protected)+altsRecordDefaultLength)
|
||||
copy(tmp, p.protected)
|
||||
p.protected = tmp
|
||||
}
|
||||
n, err = p.Conn.Read(p.protected[len(p.protected):min(cap(p.protected), len(p.protected)+altsRecordDefaultLength)])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
p.protected = p.protected[:len(p.protected)+n]
|
||||
framedMsg, p.nextFrame, err = ParseFramedMsg(p.protected, altsRecordLengthLimit)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
// Now we have a complete frame, decrypted it.
|
||||
msg := framedMsg[MsgLenFieldSize:]
|
||||
msgType := binary.LittleEndian.Uint32(msg[:msgTypeFieldSize])
|
||||
if msgType&0xff != altsRecordMsgType {
|
||||
return 0, fmt.Errorf("received frame with incorrect message type %v, expected lower byte %v",
|
||||
msgType, altsRecordMsgType)
|
||||
}
|
||||
ciphertext := msg[msgTypeFieldSize:]
|
||||
|
||||
// Decrypt requires that if the dst and ciphertext alias, they
|
||||
// must alias exactly. Code here used to use msg[:0], but msg
|
||||
// starts MsgLenFieldSize+msgTypeFieldSize bytes earlier than
|
||||
// ciphertext, so they alias inexactly. Using ciphertext[:0]
|
||||
// arranges the appropriate aliasing without needing to copy
|
||||
// ciphertext or use a separate destination buffer. For more info
|
||||
// check: https://golang.org/pkg/crypto/cipher/#AEAD.
|
||||
p.buf, err = p.crypto.Decrypt(ciphertext[:0], ciphertext)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
n = copy(b, p.buf)
|
||||
p.buf = p.buf[n:]
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Write encrypts, frames, and writes bytes from b to the underlying connection.
|
||||
func (p *conn) Write(b []byte) (n int, err error) {
|
||||
n = len(b)
|
||||
// Calculate the output buffer size with framing and encryption overhead.
|
||||
numOfFrames := int(math.Ceil(float64(len(b)) / float64(p.payloadLengthLimit)))
|
||||
size := len(b) + numOfFrames*p.overhead
|
||||
// If writeBuf is too small, increase its size up to the maximum size.
|
||||
partialBSize := len(b)
|
||||
if size > altsWriteBufferMaxSize {
|
||||
size = altsWriteBufferMaxSize
|
||||
const numOfFramesInMaxWriteBuf = altsWriteBufferMaxSize / altsRecordDefaultLength
|
||||
partialBSize = numOfFramesInMaxWriteBuf * p.payloadLengthLimit
|
||||
}
|
||||
if len(p.writeBuf) < size {
|
||||
p.writeBuf = make([]byte, size)
|
||||
}
|
||||
|
||||
for partialBStart := 0; partialBStart < len(b); partialBStart += partialBSize {
|
||||
partialBEnd := partialBStart + partialBSize
|
||||
if partialBEnd > len(b) {
|
||||
partialBEnd = len(b)
|
||||
}
|
||||
partialB := b[partialBStart:partialBEnd]
|
||||
writeBufIndex := 0
|
||||
for len(partialB) > 0 {
|
||||
payloadLen := len(partialB)
|
||||
if payloadLen > p.payloadLengthLimit {
|
||||
payloadLen = p.payloadLengthLimit
|
||||
}
|
||||
buf := partialB[:payloadLen]
|
||||
partialB = partialB[payloadLen:]
|
||||
|
||||
// Write buffer contains: length, type, payload, and tag
|
||||
// if any.
|
||||
|
||||
// 1. Fill in type field.
|
||||
msg := p.writeBuf[writeBufIndex+MsgLenFieldSize:]
|
||||
binary.LittleEndian.PutUint32(msg, altsRecordMsgType)
|
||||
|
||||
// 2. Encrypt the payload and create a tag if any.
|
||||
msg, err = p.crypto.Encrypt(msg[:msgTypeFieldSize], buf)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
// 3. Fill in the size field.
|
||||
binary.LittleEndian.PutUint32(p.writeBuf[writeBufIndex:], uint32(len(msg)))
|
||||
|
||||
// 4. Increase writeBufIndex.
|
||||
writeBufIndex += len(buf) + p.overhead
|
||||
}
|
||||
nn, err := p.Conn.Write(p.writeBuf[:writeBufIndex])
|
||||
if err != nil {
|
||||
// We need to calculate the actual data size that was
|
||||
// written. This means we need to remove header,
|
||||
// encryption overheads, and any partially-written
|
||||
// frame data.
|
||||
numOfWrittenFrames := int(math.Floor(float64(nn) / float64(altsRecordDefaultLength)))
|
||||
return partialBStart + numOfWrittenFrames*p.payloadLengthLimit, err
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
63
vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go
generated
vendored
Normal file
63
vendor/google.golang.org/grpc/credentials/alts/internal/conn/utils.go
generated
vendored
Normal file
|
@ -0,0 +1,63 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package conn
|
||||
|
||||
import core "google.golang.org/grpc/credentials/alts/internal"
|
||||
|
||||
// NewOutCounter returns an outgoing counter initialized to the starting sequence
|
||||
// number for the client/server side of a connection.
|
||||
func NewOutCounter(s core.Side, overflowLen int) (c Counter) {
|
||||
c.overflowLen = overflowLen
|
||||
if s == core.ServerSide {
|
||||
// Server counters in ALTS record have the little-endian high bit
|
||||
// set.
|
||||
c.value[counterLen-1] = 0x80
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewInCounter returns an incoming counter initialized to the starting sequence
|
||||
// number for the client/server side of a connection. This is used in ALTS record
|
||||
// to check that incoming counters are as expected, since ALTS record guarantees
|
||||
// that messages are unwrapped in the same order that the peer wrapped them.
|
||||
func NewInCounter(s core.Side, overflowLen int) (c Counter) {
|
||||
c.overflowLen = overflowLen
|
||||
if s == core.ClientSide {
|
||||
// Server counters in ALTS record have the little-endian high bit
|
||||
// set.
|
||||
c.value[counterLen-1] = 0x80
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CounterFromValue creates a new counter given an initial value.
|
||||
func CounterFromValue(value []byte, overflowLen int) (c Counter) {
|
||||
c.overflowLen = overflowLen
|
||||
copy(c.value[:], value)
|
||||
return
|
||||
}
|
||||
|
||||
// CounterSide returns the connection side (client/server) a sequence counter is
|
||||
// associated with.
|
||||
func CounterSide(c []byte) core.Side {
|
||||
if c[counterLen-1]&0x80 == 0x80 {
|
||||
return core.ServerSide
|
||||
}
|
||||
return core.ClientSide
|
||||
}
|
375
vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go
generated
vendored
Normal file
375
vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go
generated
vendored
Normal file
|
@ -0,0 +1,375 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package handshaker provides ALTS handshaking functionality for GCP.
|
||||
package handshaker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
grpc "google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
core "google.golang.org/grpc/credentials/alts/internal"
|
||||
"google.golang.org/grpc/credentials/alts/internal/authinfo"
|
||||
"google.golang.org/grpc/credentials/alts/internal/conn"
|
||||
altsgrpc "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp"
|
||||
altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp"
|
||||
)
|
||||
|
||||
const (
|
||||
// The maximum byte size of receive frames.
|
||||
frameLimit = 64 * 1024 // 64 KB
|
||||
rekeyRecordProtocolName = "ALTSRP_GCM_AES128_REKEY"
|
||||
// maxPendingHandshakes represents the maximum number of concurrent
|
||||
// handshakes.
|
||||
maxPendingHandshakes = 100
|
||||
)
|
||||
|
||||
var (
|
||||
hsProtocol = altspb.HandshakeProtocol_ALTS
|
||||
appProtocols = []string{"grpc"}
|
||||
recordProtocols = []string{rekeyRecordProtocolName}
|
||||
keyLength = map[string]int{
|
||||
rekeyRecordProtocolName: 44,
|
||||
}
|
||||
altsRecordFuncs = map[string]conn.ALTSRecordFunc{
|
||||
// ALTS handshaker protocols.
|
||||
rekeyRecordProtocolName: func(s core.Side, keyData []byte) (conn.ALTSRecordCrypto, error) {
|
||||
return conn.NewAES128GCMRekey(s, keyData)
|
||||
},
|
||||
}
|
||||
// control number of concurrent created (but not closed) handshakers.
|
||||
mu sync.Mutex
|
||||
concurrentHandshakes = int64(0)
|
||||
// errDropped occurs when maxPendingHandshakes is reached.
|
||||
errDropped = errors.New("maximum number of concurrent ALTS handshakes is reached")
|
||||
// errOutOfBound occurs when the handshake service returns a consumed
|
||||
// bytes value larger than the buffer that was passed to it originally.
|
||||
errOutOfBound = errors.New("handshaker service consumed bytes value is out-of-bound")
|
||||
)
|
||||
|
||||
func init() {
|
||||
for protocol, f := range altsRecordFuncs {
|
||||
if err := conn.RegisterProtocol(protocol, f); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func acquire() bool {
|
||||
mu.Lock()
|
||||
// If we need n to be configurable, we can pass it as an argument.
|
||||
n := int64(1)
|
||||
success := maxPendingHandshakes-concurrentHandshakes >= n
|
||||
if success {
|
||||
concurrentHandshakes += n
|
||||
}
|
||||
mu.Unlock()
|
||||
return success
|
||||
}
|
||||
|
||||
func release() {
|
||||
mu.Lock()
|
||||
// If we need n to be configurable, we can pass it as an argument.
|
||||
n := int64(1)
|
||||
concurrentHandshakes -= n
|
||||
if concurrentHandshakes < 0 {
|
||||
mu.Unlock()
|
||||
panic("bad release")
|
||||
}
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
// ClientHandshakerOptions contains the client handshaker options that can
|
||||
// provided by the caller.
|
||||
type ClientHandshakerOptions struct {
|
||||
// ClientIdentity is the handshaker client local identity.
|
||||
ClientIdentity *altspb.Identity
|
||||
// TargetName is the server service account name for secure name
|
||||
// checking.
|
||||
TargetName string
|
||||
// TargetServiceAccounts contains a list of expected target service
|
||||
// accounts. One of these accounts should match one of the accounts in
|
||||
// the handshaker results. Otherwise, the handshake fails.
|
||||
TargetServiceAccounts []string
|
||||
// RPCVersions specifies the gRPC versions accepted by the client.
|
||||
RPCVersions *altspb.RpcProtocolVersions
|
||||
}
|
||||
|
||||
// ServerHandshakerOptions contains the server handshaker options that can
|
||||
// provided by the caller.
|
||||
type ServerHandshakerOptions struct {
|
||||
// RPCVersions specifies the gRPC versions accepted by the server.
|
||||
RPCVersions *altspb.RpcProtocolVersions
|
||||
}
|
||||
|
||||
// DefaultClientHandshakerOptions returns the default client handshaker options.
|
||||
func DefaultClientHandshakerOptions() *ClientHandshakerOptions {
|
||||
return &ClientHandshakerOptions{}
|
||||
}
|
||||
|
||||
// DefaultServerHandshakerOptions returns the default client handshaker options.
|
||||
func DefaultServerHandshakerOptions() *ServerHandshakerOptions {
|
||||
return &ServerHandshakerOptions{}
|
||||
}
|
||||
|
||||
// TODO: add support for future local and remote endpoint in both client options
|
||||
// and server options (server options struct does not exist now. When
|
||||
// caller can provide endpoints, it should be created.
|
||||
|
||||
// altsHandshaker is used to complete a ALTS handshaking between client and
|
||||
// server. This handshaker talks to the ALTS handshaker service in the metadata
|
||||
// server.
|
||||
type altsHandshaker struct {
|
||||
// RPC stream used to access the ALTS Handshaker service.
|
||||
stream altsgrpc.HandshakerService_DoHandshakeClient
|
||||
// the connection to the peer.
|
||||
conn net.Conn
|
||||
// client handshake options.
|
||||
clientOpts *ClientHandshakerOptions
|
||||
// server handshake options.
|
||||
serverOpts *ServerHandshakerOptions
|
||||
// defines the side doing the handshake, client or server.
|
||||
side core.Side
|
||||
}
|
||||
|
||||
// NewClientHandshaker creates a ALTS handshaker for GCP which contains an RPC
|
||||
// stub created using the passed conn and used to talk to the ALTS Handshaker
|
||||
// service in the metadata server.
|
||||
func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) {
|
||||
stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx, grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &altsHandshaker{
|
||||
stream: stream,
|
||||
conn: c,
|
||||
clientOpts: opts,
|
||||
side: core.ClientSide,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewServerHandshaker creates a ALTS handshaker for GCP which contains an RPC
|
||||
// stub created using the passed conn and used to talk to the ALTS Handshaker
|
||||
// service in the metadata server.
|
||||
func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) {
|
||||
stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx, grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &altsHandshaker{
|
||||
stream: stream,
|
||||
conn: c,
|
||||
serverOpts: opts,
|
||||
side: core.ServerSide,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ClientHandshake starts and completes a client ALTS handshaking for GCP. Once
|
||||
// done, ClientHandshake returns a secure connection.
|
||||
func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) {
|
||||
if !acquire() {
|
||||
return nil, nil, errDropped
|
||||
}
|
||||
defer release()
|
||||
|
||||
if h.side != core.ClientSide {
|
||||
return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client handshaker")
|
||||
}
|
||||
|
||||
// Create target identities from service account list.
|
||||
targetIdentities := make([]*altspb.Identity, 0, len(h.clientOpts.TargetServiceAccounts))
|
||||
for _, account := range h.clientOpts.TargetServiceAccounts {
|
||||
targetIdentities = append(targetIdentities, &altspb.Identity{
|
||||
IdentityOneof: &altspb.Identity_ServiceAccount{
|
||||
ServiceAccount: account,
|
||||
},
|
||||
})
|
||||
}
|
||||
req := &altspb.HandshakerReq{
|
||||
ReqOneof: &altspb.HandshakerReq_ClientStart{
|
||||
ClientStart: &altspb.StartClientHandshakeReq{
|
||||
HandshakeSecurityProtocol: hsProtocol,
|
||||
ApplicationProtocols: appProtocols,
|
||||
RecordProtocols: recordProtocols,
|
||||
TargetIdentities: targetIdentities,
|
||||
LocalIdentity: h.clientOpts.ClientIdentity,
|
||||
TargetName: h.clientOpts.TargetName,
|
||||
RpcVersions: h.clientOpts.RPCVersions,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
conn, result, err := h.doHandshake(req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
authInfo := authinfo.New(result)
|
||||
return conn, authInfo, nil
|
||||
}
|
||||
|
||||
// ServerHandshake starts and completes a server ALTS handshaking for GCP. Once
|
||||
// done, ServerHandshake returns a secure connection.
|
||||
func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) {
|
||||
if !acquire() {
|
||||
return nil, nil, errDropped
|
||||
}
|
||||
defer release()
|
||||
|
||||
if h.side != core.ServerSide {
|
||||
return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server handshaker")
|
||||
}
|
||||
|
||||
p := make([]byte, frameLimit)
|
||||
n, err := h.conn.Read(p)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Prepare server parameters.
|
||||
// TODO: currently only ALTS parameters are provided. Might need to use
|
||||
// more options in the future.
|
||||
params := make(map[int32]*altspb.ServerHandshakeParameters)
|
||||
params[int32(altspb.HandshakeProtocol_ALTS)] = &altspb.ServerHandshakeParameters{
|
||||
RecordProtocols: recordProtocols,
|
||||
}
|
||||
req := &altspb.HandshakerReq{
|
||||
ReqOneof: &altspb.HandshakerReq_ServerStart{
|
||||
ServerStart: &altspb.StartServerHandshakeReq{
|
||||
ApplicationProtocols: appProtocols,
|
||||
HandshakeParameters: params,
|
||||
InBytes: p[:n],
|
||||
RpcVersions: h.serverOpts.RPCVersions,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
conn, result, err := h.doHandshake(req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
authInfo := authinfo.New(result)
|
||||
return conn, authInfo, nil
|
||||
}
|
||||
|
||||
func (h *altsHandshaker) doHandshake(req *altspb.HandshakerReq) (net.Conn, *altspb.HandshakerResult, error) {
|
||||
resp, err := h.accessHandshakerService(req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// Check of the returned status is an error.
|
||||
if resp.GetStatus() != nil {
|
||||
if got, want := resp.GetStatus().Code, uint32(codes.OK); got != want {
|
||||
return nil, nil, fmt.Errorf("%v", resp.GetStatus().Details)
|
||||
}
|
||||
}
|
||||
|
||||
var extra []byte
|
||||
if req.GetServerStart() != nil {
|
||||
if resp.GetBytesConsumed() > uint32(len(req.GetServerStart().GetInBytes())) {
|
||||
return nil, nil, errOutOfBound
|
||||
}
|
||||
extra = req.GetServerStart().GetInBytes()[resp.GetBytesConsumed():]
|
||||
}
|
||||
result, extra, err := h.processUntilDone(resp, extra)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// The handshaker returns a 128 bytes key. It should be truncated based
|
||||
// on the returned record protocol.
|
||||
keyLen, ok := keyLength[result.RecordProtocol]
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("unknown resulted record protocol %v", result.RecordProtocol)
|
||||
}
|
||||
sc, err := conn.NewConn(h.conn, h.side, result.GetRecordProtocol(), result.KeyData[:keyLen], extra)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return sc, result, nil
|
||||
}
|
||||
|
||||
func (h *altsHandshaker) accessHandshakerService(req *altspb.HandshakerReq) (*altspb.HandshakerResp, error) {
|
||||
if err := h.stream.Send(req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := h.stream.Recv()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// processUntilDone processes the handshake until the handshaker service returns
|
||||
// the results. Handshaker service takes care of frame parsing, so we read
|
||||
// whatever received from the network and send it to the handshaker service.
|
||||
func (h *altsHandshaker) processUntilDone(resp *altspb.HandshakerResp, extra []byte) (*altspb.HandshakerResult, []byte, error) {
|
||||
for {
|
||||
if len(resp.OutFrames) > 0 {
|
||||
if _, err := h.conn.Write(resp.OutFrames); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
if resp.Result != nil {
|
||||
return resp.Result, extra, nil
|
||||
}
|
||||
buf := make([]byte, frameLimit)
|
||||
n, err := h.conn.Read(buf)
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, nil, err
|
||||
}
|
||||
// If there is nothing to send to the handshaker service, and
|
||||
// nothing is received from the peer, then we are stuck.
|
||||
// This covers the case when the peer is not responding. Note
|
||||
// that handshaker service connection issues are caught in
|
||||
// accessHandshakerService before we even get here.
|
||||
if len(resp.OutFrames) == 0 && n == 0 {
|
||||
return nil, nil, core.PeerNotRespondingError
|
||||
}
|
||||
// Append extra bytes from the previous interaction with the
|
||||
// handshaker service with the current buffer read from conn.
|
||||
p := append(extra, buf[:n]...)
|
||||
// From here on, p and extra point to the same slice.
|
||||
resp, err = h.accessHandshakerService(&altspb.HandshakerReq{
|
||||
ReqOneof: &altspb.HandshakerReq_Next{
|
||||
Next: &altspb.NextHandshakeMessageReq{
|
||||
InBytes: p,
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// Set extra based on handshaker service response.
|
||||
if resp.GetBytesConsumed() > uint32(len(p)) {
|
||||
return nil, nil, errOutOfBound
|
||||
}
|
||||
extra = p[resp.GetBytesConsumed():]
|
||||
}
|
||||
}
|
||||
|
||||
// Close terminates the Handshaker. It should be called when the caller obtains
|
||||
// the secure connection.
|
||||
func (h *altsHandshaker) Close() {
|
||||
h.stream.CloseSend()
|
||||
}
|
59
vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go
generated
vendored
Normal file
59
vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go
generated
vendored
Normal file
|
@ -0,0 +1,59 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package service manages connections between the VM application and the ALTS
|
||||
// handshaker service.
|
||||
package service
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
grpc "google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
// mu guards hsConnMap and hsDialer.
|
||||
mu sync.Mutex
|
||||
// hsConn represents a mapping from a hypervisor handshaker service address
|
||||
// to a corresponding connection to a hypervisor handshaker service
|
||||
// instance.
|
||||
hsConnMap = make(map[string]*grpc.ClientConn)
|
||||
// hsDialer will be reassigned in tests.
|
||||
hsDialer = grpc.Dial
|
||||
)
|
||||
|
||||
// Dial dials the handshake service in the hypervisor. If a connection has
|
||||
// already been established, this function returns it. Otherwise, a new
|
||||
// connection is created.
|
||||
func Dial(hsAddress string) (*grpc.ClientConn, error) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
hsConn, ok := hsConnMap[hsAddress]
|
||||
if !ok {
|
||||
// Create a new connection to the handshaker service. Note that
|
||||
// this connection stays open until the application is closed.
|
||||
var err error
|
||||
hsConn, err = hsDialer(hsAddress, grpc.WithInsecure())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hsConnMap[hsAddress] = hsConn
|
||||
}
|
||||
return hsConn, nil
|
||||
}
|
264
vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go
generated
vendored
Normal file
264
vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,264 @@
|
|||
// Copyright 2018 The gRPC Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// The canonical version of this proto can be found at
|
||||
// https://github.com/grpc/grpc-proto/blob/master/grpc/gcp/altscontext.proto
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.25.0
|
||||
// protoc v3.14.0
|
||||
// source: grpc/gcp/altscontext.proto
|
||||
|
||||
package grpc_gcp
|
||||
|
||||
import (
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// This is a compile-time assertion that a sufficiently up-to-date version
|
||||
// of the legacy proto package is being used.
|
||||
const _ = proto.ProtoPackageIsVersion4
|
||||
|
||||
type AltsContext struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The application protocol negotiated for this connection.
|
||||
ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"`
|
||||
// The record protocol negotiated for this connection.
|
||||
RecordProtocol string `protobuf:"bytes,2,opt,name=record_protocol,json=recordProtocol,proto3" json:"record_protocol,omitempty"`
|
||||
// The security level of the created secure channel.
|
||||
SecurityLevel SecurityLevel `protobuf:"varint,3,opt,name=security_level,json=securityLevel,proto3,enum=grpc.gcp.SecurityLevel" json:"security_level,omitempty"`
|
||||
// The peer service account.
|
||||
PeerServiceAccount string `protobuf:"bytes,4,opt,name=peer_service_account,json=peerServiceAccount,proto3" json:"peer_service_account,omitempty"`
|
||||
// The local service account.
|
||||
LocalServiceAccount string `protobuf:"bytes,5,opt,name=local_service_account,json=localServiceAccount,proto3" json:"local_service_account,omitempty"`
|
||||
// The RPC protocol versions supported by the peer.
|
||||
PeerRpcVersions *RpcProtocolVersions `protobuf:"bytes,6,opt,name=peer_rpc_versions,json=peerRpcVersions,proto3" json:"peer_rpc_versions,omitempty"`
|
||||
// Additional attributes of the peer.
|
||||
PeerAttributes map[string]string `protobuf:"bytes,7,rep,name=peer_attributes,json=peerAttributes,proto3" json:"peer_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
}
|
||||
|
||||
func (x *AltsContext) Reset() {
|
||||
*x = AltsContext{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_grpc_gcp_altscontext_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *AltsContext) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*AltsContext) ProtoMessage() {}
|
||||
|
||||
func (x *AltsContext) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_grpc_gcp_altscontext_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use AltsContext.ProtoReflect.Descriptor instead.
|
||||
func (*AltsContext) Descriptor() ([]byte, []int) {
|
||||
return file_grpc_gcp_altscontext_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *AltsContext) GetApplicationProtocol() string {
|
||||
if x != nil {
|
||||
return x.ApplicationProtocol
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *AltsContext) GetRecordProtocol() string {
|
||||
if x != nil {
|
||||
return x.RecordProtocol
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *AltsContext) GetSecurityLevel() SecurityLevel {
|
||||
if x != nil {
|
||||
return x.SecurityLevel
|
||||
}
|
||||
return SecurityLevel_SECURITY_NONE
|
||||
}
|
||||
|
||||
func (x *AltsContext) GetPeerServiceAccount() string {
|
||||
if x != nil {
|
||||
return x.PeerServiceAccount
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *AltsContext) GetLocalServiceAccount() string {
|
||||
if x != nil {
|
||||
return x.LocalServiceAccount
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *AltsContext) GetPeerRpcVersions() *RpcProtocolVersions {
|
||||
if x != nil {
|
||||
return x.PeerRpcVersions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *AltsContext) GetPeerAttributes() map[string]string {
|
||||
if x != nil {
|
||||
return x.PeerAttributes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_grpc_gcp_altscontext_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_grpc_gcp_altscontext_proto_rawDesc = []byte{
|
||||
0x0a, 0x1a, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x63,
|
||||
0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x67, 0x72,
|
||||
0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x1a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70,
|
||||
0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72,
|
||||
0x69, 0x74, 0x79, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x22, 0xf1, 0x03, 0x0a, 0x0b, 0x41, 0x6c, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74,
|
||||
0x12, 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13,
|
||||
0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x63, 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65,
|
||||
0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x3e, 0x0a, 0x0e,
|
||||
0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x03,
|
||||
0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e,
|
||||
0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0d, 0x73,
|
||||
0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x30, 0x0a, 0x14,
|
||||
0x70, 0x65, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63,
|
||||
0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x70, 0x65, 0x65, 0x72,
|
||||
0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x32,
|
||||
0x0a, 0x15, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f,
|
||||
0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x6c,
|
||||
0x6f, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75,
|
||||
0x6e, 0x74, 0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76,
|
||||
0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e,
|
||||
0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0f, 0x70, 0x65,
|
||||
0x65, 0x72, 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x52, 0x0a,
|
||||
0x0f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73,
|
||||
0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63,
|
||||
0x70, 0x2e, 0x41, 0x6c, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x50, 0x65,
|
||||
0x65, 0x72, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72,
|
||||
0x79, 0x52, 0x0e, 0x70, 0x65, 0x65, 0x72, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65,
|
||||
0x73, 0x1a, 0x41, 0x0a, 0x13, 0x50, 0x65, 0x65, 0x72, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75,
|
||||
0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
|
||||
0x3a, 0x02, 0x38, 0x01, 0x42, 0x6c, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e,
|
||||
0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x10, 0x41,
|
||||
0x6c, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
|
||||
0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
|
||||
0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e,
|
||||
0x74, 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72,
|
||||
0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67,
|
||||
0x63, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_grpc_gcp_altscontext_proto_rawDescOnce sync.Once
|
||||
file_grpc_gcp_altscontext_proto_rawDescData = file_grpc_gcp_altscontext_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_grpc_gcp_altscontext_proto_rawDescGZIP() []byte {
|
||||
file_grpc_gcp_altscontext_proto_rawDescOnce.Do(func() {
|
||||
file_grpc_gcp_altscontext_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_gcp_altscontext_proto_rawDescData)
|
||||
})
|
||||
return file_grpc_gcp_altscontext_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_grpc_gcp_altscontext_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_grpc_gcp_altscontext_proto_goTypes = []interface{}{
|
||||
(*AltsContext)(nil), // 0: grpc.gcp.AltsContext
|
||||
nil, // 1: grpc.gcp.AltsContext.PeerAttributesEntry
|
||||
(SecurityLevel)(0), // 2: grpc.gcp.SecurityLevel
|
||||
(*RpcProtocolVersions)(nil), // 3: grpc.gcp.RpcProtocolVersions
|
||||
}
|
||||
var file_grpc_gcp_altscontext_proto_depIdxs = []int32{
|
||||
2, // 0: grpc.gcp.AltsContext.security_level:type_name -> grpc.gcp.SecurityLevel
|
||||
3, // 1: grpc.gcp.AltsContext.peer_rpc_versions:type_name -> grpc.gcp.RpcProtocolVersions
|
||||
1, // 2: grpc.gcp.AltsContext.peer_attributes:type_name -> grpc.gcp.AltsContext.PeerAttributesEntry
|
||||
3, // [3:3] is the sub-list for method output_type
|
||||
3, // [3:3] is the sub-list for method input_type
|
||||
3, // [3:3] is the sub-list for extension type_name
|
||||
3, // [3:3] is the sub-list for extension extendee
|
||||
0, // [0:3] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_grpc_gcp_altscontext_proto_init() }
|
||||
func file_grpc_gcp_altscontext_proto_init() {
|
||||
if File_grpc_gcp_altscontext_proto != nil {
|
||||
return
|
||||
}
|
||||
file_grpc_gcp_transport_security_common_proto_init()
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_grpc_gcp_altscontext_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*AltsContext); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_grpc_gcp_altscontext_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 2,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_grpc_gcp_altscontext_proto_goTypes,
|
||||
DependencyIndexes: file_grpc_gcp_altscontext_proto_depIdxs,
|
||||
MessageInfos: file_grpc_gcp_altscontext_proto_msgTypes,
|
||||
}.Build()
|
||||
File_grpc_gcp_altscontext_proto = out.File
|
||||
file_grpc_gcp_altscontext_proto_rawDesc = nil
|
||||
file_grpc_gcp_altscontext_proto_goTypes = nil
|
||||
file_grpc_gcp_altscontext_proto_depIdxs = nil
|
||||
}
|
1426
vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go
generated
vendored
Normal file
1426
vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
149
vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go
generated
vendored
Normal file
149
vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,149 @@
|
|||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.1.0
|
||||
// - protoc v3.14.0
|
||||
// source: grpc/gcp/handshaker.proto
|
||||
|
||||
package grpc_gcp
|
||||
|
||||
import (
|
||||
context "context"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
// HandshakerServiceClient is the client API for HandshakerService service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
type HandshakerServiceClient interface {
|
||||
// Handshaker service accepts a stream of handshaker request, returning a
|
||||
// stream of handshaker response. Client is expected to send exactly one
|
||||
// message with either client_start or server_start followed by one or more
|
||||
// messages with next. Each time client sends a request, the handshaker
|
||||
// service expects to respond. Client does not have to wait for service's
|
||||
// response before sending next request.
|
||||
DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error)
|
||||
}
|
||||
|
||||
type handshakerServiceClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewHandshakerServiceClient(cc grpc.ClientConnInterface) HandshakerServiceClient {
|
||||
return &handshakerServiceClient{cc}
|
||||
}
|
||||
|
||||
func (c *handshakerServiceClient) DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &HandshakerService_ServiceDesc.Streams[0], "/grpc.gcp.HandshakerService/DoHandshake", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &handshakerServiceDoHandshakeClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type HandshakerService_DoHandshakeClient interface {
|
||||
Send(*HandshakerReq) error
|
||||
Recv() (*HandshakerResp, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type handshakerServiceDoHandshakeClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *handshakerServiceDoHandshakeClient) Send(m *HandshakerReq) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *handshakerServiceDoHandshakeClient) Recv() (*HandshakerResp, error) {
|
||||
m := new(HandshakerResp)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// HandshakerServiceServer is the server API for HandshakerService service.
|
||||
// All implementations must embed UnimplementedHandshakerServiceServer
|
||||
// for forward compatibility
|
||||
type HandshakerServiceServer interface {
|
||||
// Handshaker service accepts a stream of handshaker request, returning a
|
||||
// stream of handshaker response. Client is expected to send exactly one
|
||||
// message with either client_start or server_start followed by one or more
|
||||
// messages with next. Each time client sends a request, the handshaker
|
||||
// service expects to respond. Client does not have to wait for service's
|
||||
// response before sending next request.
|
||||
DoHandshake(HandshakerService_DoHandshakeServer) error
|
||||
mustEmbedUnimplementedHandshakerServiceServer()
|
||||
}
|
||||
|
||||
// UnimplementedHandshakerServiceServer must be embedded to have forward compatible implementations.
|
||||
type UnimplementedHandshakerServiceServer struct {
|
||||
}
|
||||
|
||||
func (UnimplementedHandshakerServiceServer) DoHandshake(HandshakerService_DoHandshakeServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method DoHandshake not implemented")
|
||||
}
|
||||
func (UnimplementedHandshakerServiceServer) mustEmbedUnimplementedHandshakerServiceServer() {}
|
||||
|
||||
// UnsafeHandshakerServiceServer may be embedded to opt out of forward compatibility for this service.
|
||||
// Use of this interface is not recommended, as added methods to HandshakerServiceServer will
|
||||
// result in compilation errors.
|
||||
type UnsafeHandshakerServiceServer interface {
|
||||
mustEmbedUnimplementedHandshakerServiceServer()
|
||||
}
|
||||
|
||||
func RegisterHandshakerServiceServer(s grpc.ServiceRegistrar, srv HandshakerServiceServer) {
|
||||
s.RegisterService(&HandshakerService_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
func _HandshakerService_DoHandshake_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
return srv.(HandshakerServiceServer).DoHandshake(&handshakerServiceDoHandshakeServer{stream})
|
||||
}
|
||||
|
||||
type HandshakerService_DoHandshakeServer interface {
|
||||
Send(*HandshakerResp) error
|
||||
Recv() (*HandshakerReq, error)
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type handshakerServiceDoHandshakeServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *handshakerServiceDoHandshakeServer) Send(m *HandshakerResp) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *handshakerServiceDoHandshakeServer) Recv() (*HandshakerReq, error) {
|
||||
m := new(HandshakerReq)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// HandshakerService_ServiceDesc is the grpc.ServiceDesc for HandshakerService service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
var HandshakerService_ServiceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "grpc.gcp.HandshakerService",
|
||||
HandlerType: (*HandshakerServiceServer)(nil),
|
||||
Methods: []grpc.MethodDesc{},
|
||||
Streams: []grpc.StreamDesc{
|
||||
{
|
||||
StreamName: "DoHandshake",
|
||||
Handler: _HandshakerService_DoHandshake_Handler,
|
||||
ServerStreams: true,
|
||||
ClientStreams: true,
|
||||
},
|
||||
},
|
||||
Metadata: "grpc/gcp/handshaker.proto",
|
||||
}
|
326
vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go
generated
vendored
Normal file
326
vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,326 @@
|
|||
// Copyright 2018 The gRPC Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// The canonical version of this proto can be found at
|
||||
// https://github.com/grpc/grpc-proto/blob/master/grpc/gcp/transport_security_common.proto
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.25.0
|
||||
// protoc v3.14.0
|
||||
// source: grpc/gcp/transport_security_common.proto
|
||||
|
||||
package grpc_gcp
|
||||
|
||||
import (
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// This is a compile-time assertion that a sufficiently up-to-date version
|
||||
// of the legacy proto package is being used.
|
||||
const _ = proto.ProtoPackageIsVersion4
|
||||
|
||||
// The security level of the created channel. The list is sorted in increasing
|
||||
// level of security. This order must always be maintained.
|
||||
type SecurityLevel int32
|
||||
|
||||
const (
|
||||
SecurityLevel_SECURITY_NONE SecurityLevel = 0
|
||||
SecurityLevel_INTEGRITY_ONLY SecurityLevel = 1
|
||||
SecurityLevel_INTEGRITY_AND_PRIVACY SecurityLevel = 2
|
||||
)
|
||||
|
||||
// Enum value maps for SecurityLevel.
|
||||
var (
|
||||
SecurityLevel_name = map[int32]string{
|
||||
0: "SECURITY_NONE",
|
||||
1: "INTEGRITY_ONLY",
|
||||
2: "INTEGRITY_AND_PRIVACY",
|
||||
}
|
||||
SecurityLevel_value = map[string]int32{
|
||||
"SECURITY_NONE": 0,
|
||||
"INTEGRITY_ONLY": 1,
|
||||
"INTEGRITY_AND_PRIVACY": 2,
|
||||
}
|
||||
)
|
||||
|
||||
func (x SecurityLevel) Enum() *SecurityLevel {
|
||||
p := new(SecurityLevel)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
|
||||
func (x SecurityLevel) String() string {
|
||||
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
|
||||
}
|
||||
|
||||
func (SecurityLevel) Descriptor() protoreflect.EnumDescriptor {
|
||||
return file_grpc_gcp_transport_security_common_proto_enumTypes[0].Descriptor()
|
||||
}
|
||||
|
||||
func (SecurityLevel) Type() protoreflect.EnumType {
|
||||
return &file_grpc_gcp_transport_security_common_proto_enumTypes[0]
|
||||
}
|
||||
|
||||
func (x SecurityLevel) Number() protoreflect.EnumNumber {
|
||||
return protoreflect.EnumNumber(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use SecurityLevel.Descriptor instead.
|
||||
func (SecurityLevel) EnumDescriptor() ([]byte, []int) {
|
||||
return file_grpc_gcp_transport_security_common_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
// Max and min supported RPC protocol versions.
|
||||
type RpcProtocolVersions struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Maximum supported RPC version.
|
||||
MaxRpcVersion *RpcProtocolVersions_Version `protobuf:"bytes,1,opt,name=max_rpc_version,json=maxRpcVersion,proto3" json:"max_rpc_version,omitempty"`
|
||||
// Minimum supported RPC version.
|
||||
MinRpcVersion *RpcProtocolVersions_Version `protobuf:"bytes,2,opt,name=min_rpc_version,json=minRpcVersion,proto3" json:"min_rpc_version,omitempty"`
|
||||
}
|
||||
|
||||
func (x *RpcProtocolVersions) Reset() {
|
||||
*x = RpcProtocolVersions{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *RpcProtocolVersions) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*RpcProtocolVersions) ProtoMessage() {}
|
||||
|
||||
func (x *RpcProtocolVersions) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use RpcProtocolVersions.ProtoReflect.Descriptor instead.
|
||||
func (*RpcProtocolVersions) Descriptor() ([]byte, []int) {
|
||||
return file_grpc_gcp_transport_security_common_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *RpcProtocolVersions) GetMaxRpcVersion() *RpcProtocolVersions_Version {
|
||||
if x != nil {
|
||||
return x.MaxRpcVersion
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *RpcProtocolVersions) GetMinRpcVersion() *RpcProtocolVersions_Version {
|
||||
if x != nil {
|
||||
return x.MinRpcVersion
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RPC version contains a major version and a minor version.
|
||||
type RpcProtocolVersions_Version struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Major uint32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"`
|
||||
Minor uint32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"`
|
||||
}
|
||||
|
||||
func (x *RpcProtocolVersions_Version) Reset() {
|
||||
*x = RpcProtocolVersions_Version{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *RpcProtocolVersions_Version) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*RpcProtocolVersions_Version) ProtoMessage() {}
|
||||
|
||||
func (x *RpcProtocolVersions_Version) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use RpcProtocolVersions_Version.ProtoReflect.Descriptor instead.
|
||||
func (*RpcProtocolVersions_Version) Descriptor() ([]byte, []int) {
|
||||
return file_grpc_gcp_transport_security_common_proto_rawDescGZIP(), []int{0, 0}
|
||||
}
|
||||
|
||||
func (x *RpcProtocolVersions_Version) GetMajor() uint32 {
|
||||
if x != nil {
|
||||
return x.Major
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *RpcProtocolVersions_Version) GetMinor() uint32 {
|
||||
if x != nil {
|
||||
return x.Minor
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
var File_grpc_gcp_transport_security_common_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_grpc_gcp_transport_security_common_proto_rawDesc = []byte{
|
||||
0x0a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x63, 0x70, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73,
|
||||
0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x63, 0x6f,
|
||||
0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x67, 0x72, 0x70, 0x63,
|
||||
0x2e, 0x67, 0x63, 0x70, 0x22, 0xea, 0x01, 0x0a, 0x13, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4d, 0x0a, 0x0f,
|
||||
0x6d, 0x61, 0x78, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70,
|
||||
0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73,
|
||||
0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61,
|
||||
0x78, 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x0f, 0x6d,
|
||||
0x69, 0x6e, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02,
|
||||
0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e,
|
||||
0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69,
|
||||
0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e,
|
||||
0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65,
|
||||
0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d,
|
||||
0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f,
|
||||
0x72, 0x2a, 0x51, 0x0a, 0x0d, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x65, 0x76,
|
||||
0x65, 0x6c, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x45, 0x43, 0x55, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e,
|
||||
0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x49, 0x4e, 0x54, 0x45, 0x47, 0x52, 0x49,
|
||||
0x54, 0x59, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x49, 0x4e, 0x54,
|
||||
0x45, 0x47, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x50, 0x52, 0x49, 0x56, 0x41,
|
||||
0x43, 0x59, 0x10, 0x02, 0x42, 0x78, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e,
|
||||
0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x1c, 0x54,
|
||||
0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79,
|
||||
0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67,
|
||||
0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c,
|
||||
0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, 0x70, 0x62, 0x06,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_grpc_gcp_transport_security_common_proto_rawDescOnce sync.Once
|
||||
file_grpc_gcp_transport_security_common_proto_rawDescData = file_grpc_gcp_transport_security_common_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_grpc_gcp_transport_security_common_proto_rawDescGZIP() []byte {
|
||||
file_grpc_gcp_transport_security_common_proto_rawDescOnce.Do(func() {
|
||||
file_grpc_gcp_transport_security_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_gcp_transport_security_common_proto_rawDescData)
|
||||
})
|
||||
return file_grpc_gcp_transport_security_common_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_grpc_gcp_transport_security_common_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
|
||||
var file_grpc_gcp_transport_security_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_grpc_gcp_transport_security_common_proto_goTypes = []interface{}{
|
||||
(SecurityLevel)(0), // 0: grpc.gcp.SecurityLevel
|
||||
(*RpcProtocolVersions)(nil), // 1: grpc.gcp.RpcProtocolVersions
|
||||
(*RpcProtocolVersions_Version)(nil), // 2: grpc.gcp.RpcProtocolVersions.Version
|
||||
}
|
||||
var file_grpc_gcp_transport_security_common_proto_depIdxs = []int32{
|
||||
2, // 0: grpc.gcp.RpcProtocolVersions.max_rpc_version:type_name -> grpc.gcp.RpcProtocolVersions.Version
|
||||
2, // 1: grpc.gcp.RpcProtocolVersions.min_rpc_version:type_name -> grpc.gcp.RpcProtocolVersions.Version
|
||||
2, // [2:2] is the sub-list for method output_type
|
||||
2, // [2:2] is the sub-list for method input_type
|
||||
2, // [2:2] is the sub-list for extension type_name
|
||||
2, // [2:2] is the sub-list for extension extendee
|
||||
0, // [0:2] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_grpc_gcp_transport_security_common_proto_init() }
|
||||
func file_grpc_gcp_transport_security_common_proto_init() {
|
||||
if File_grpc_gcp_transport_security_common_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_grpc_gcp_transport_security_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*RpcProtocolVersions); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_grpc_gcp_transport_security_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*RpcProtocolVersions_Version); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_grpc_gcp_transport_security_common_proto_rawDesc,
|
||||
NumEnums: 1,
|
||||
NumMessages: 2,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_grpc_gcp_transport_security_common_proto_goTypes,
|
||||
DependencyIndexes: file_grpc_gcp_transport_security_common_proto_depIdxs,
|
||||
EnumInfos: file_grpc_gcp_transport_security_common_proto_enumTypes,
|
||||
MessageInfos: file_grpc_gcp_transport_security_common_proto_msgTypes,
|
||||
}.Build()
|
||||
File_grpc_gcp_transport_security_common_proto = out.File
|
||||
file_grpc_gcp_transport_security_common_proto_rawDesc = nil
|
||||
file_grpc_gcp_transport_security_common_proto_goTypes = nil
|
||||
file_grpc_gcp_transport_security_common_proto_depIdxs = nil
|
||||
}
|
70
vendor/google.golang.org/grpc/credentials/alts/utils.go
generated
vendored
Normal file
70
vendor/google.golang.org/grpc/credentials/alts/utils.go
generated
vendored
Normal file
|
@ -0,0 +1,70 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package alts
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// AuthInfoFromContext extracts the alts.AuthInfo object from the given context,
|
||||
// if it exists. This API should be used by gRPC server RPC handlers to get
|
||||
// information about the communicating peer. For client-side, use grpc.Peer()
|
||||
// CallOption.
|
||||
func AuthInfoFromContext(ctx context.Context) (AuthInfo, error) {
|
||||
p, ok := peer.FromContext(ctx)
|
||||
if !ok {
|
||||
return nil, errors.New("no Peer found in Context")
|
||||
}
|
||||
return AuthInfoFromPeer(p)
|
||||
}
|
||||
|
||||
// AuthInfoFromPeer extracts the alts.AuthInfo object from the given peer, if it
|
||||
// exists. This API should be used by gRPC clients after obtaining a peer object
|
||||
// using the grpc.Peer() CallOption.
|
||||
func AuthInfoFromPeer(p *peer.Peer) (AuthInfo, error) {
|
||||
altsAuthInfo, ok := p.AuthInfo.(AuthInfo)
|
||||
if !ok {
|
||||
return nil, errors.New("no alts.AuthInfo found in Peer")
|
||||
}
|
||||
return altsAuthInfo, nil
|
||||
}
|
||||
|
||||
// ClientAuthorizationCheck checks whether the client is authorized to access
|
||||
// the requested resources based on the given expected client service accounts.
|
||||
// This API should be used by gRPC server RPC handlers. This API should not be
|
||||
// used by clients.
|
||||
func ClientAuthorizationCheck(ctx context.Context, expectedServiceAccounts []string) error {
|
||||
authInfo, err := AuthInfoFromContext(ctx)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.PermissionDenied, "The context is not an ALTS-compatible context: %v", err)
|
||||
}
|
||||
peer := authInfo.PeerServiceAccount()
|
||||
for _, sa := range expectedServiceAccounts {
|
||||
if strings.EqualFold(peer, sa) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return status.Errorf(codes.PermissionDenied, "Client %v is not authorized", peer)
|
||||
}
|
136
vendor/google.golang.org/grpc/credentials/google/google.go
generated
vendored
Normal file
136
vendor/google.golang.org/grpc/credentials/google/google.go
generated
vendored
Normal file
|
@ -0,0 +1,136 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package google defines credentials for google cloud services.
|
||||
package google
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/credentials/alts"
|
||||
"google.golang.org/grpc/credentials/oauth"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal"
|
||||
)
|
||||
|
||||
const tokenRequestTimeout = 30 * time.Second
|
||||
|
||||
var logger = grpclog.Component("credentials")
|
||||
|
||||
// NewDefaultCredentials returns a credentials bundle that is configured to work
|
||||
// with google services.
|
||||
//
|
||||
// This API is experimental.
|
||||
func NewDefaultCredentials() credentials.Bundle {
|
||||
c := &creds{
|
||||
newPerRPCCreds: func() credentials.PerRPCCredentials {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), tokenRequestTimeout)
|
||||
defer cancel()
|
||||
perRPCCreds, err := oauth.NewApplicationDefault(ctx)
|
||||
if err != nil {
|
||||
logger.Warningf("google default creds: failed to create application oauth: %v", err)
|
||||
}
|
||||
return perRPCCreds
|
||||
},
|
||||
}
|
||||
bundle, err := c.NewWithMode(internal.CredsBundleModeFallback)
|
||||
if err != nil {
|
||||
logger.Warningf("google default creds: failed to create new creds: %v", err)
|
||||
}
|
||||
return bundle
|
||||
}
|
||||
|
||||
// NewComputeEngineCredentials returns a credentials bundle that is configured to work
|
||||
// with google services. This API must only be used when running on GCE. Authentication configured
|
||||
// by this API represents the GCE VM's default service account.
|
||||
//
|
||||
// This API is experimental.
|
||||
func NewComputeEngineCredentials() credentials.Bundle {
|
||||
c := &creds{
|
||||
newPerRPCCreds: func() credentials.PerRPCCredentials {
|
||||
return oauth.NewComputeEngine()
|
||||
},
|
||||
}
|
||||
bundle, err := c.NewWithMode(internal.CredsBundleModeFallback)
|
||||
if err != nil {
|
||||
logger.Warningf("compute engine creds: failed to create new creds: %v", err)
|
||||
}
|
||||
return bundle
|
||||
}
|
||||
|
||||
// creds implements credentials.Bundle.
|
||||
type creds struct {
|
||||
// Supported modes are defined in internal/internal.go.
|
||||
mode string
|
||||
// The transport credentials associated with this bundle.
|
||||
transportCreds credentials.TransportCredentials
|
||||
// The per RPC credentials associated with this bundle.
|
||||
perRPCCreds credentials.PerRPCCredentials
|
||||
// Creates new per RPC credentials
|
||||
newPerRPCCreds func() credentials.PerRPCCredentials
|
||||
}
|
||||
|
||||
func (c *creds) TransportCredentials() credentials.TransportCredentials {
|
||||
return c.transportCreds
|
||||
}
|
||||
|
||||
func (c *creds) PerRPCCredentials() credentials.PerRPCCredentials {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return c.perRPCCreds
|
||||
}
|
||||
|
||||
var (
|
||||
newTLS = func() credentials.TransportCredentials {
|
||||
return credentials.NewTLS(nil)
|
||||
}
|
||||
newALTS = func() credentials.TransportCredentials {
|
||||
return alts.NewClientCreds(alts.DefaultClientOptions())
|
||||
}
|
||||
)
|
||||
|
||||
// NewWithMode should make a copy of Bundle, and switch mode. Modifying the
|
||||
// existing Bundle may cause races.
|
||||
func (c *creds) NewWithMode(mode string) (credentials.Bundle, error) {
|
||||
newCreds := &creds{
|
||||
mode: mode,
|
||||
newPerRPCCreds: c.newPerRPCCreds,
|
||||
}
|
||||
|
||||
// Create transport credentials.
|
||||
switch mode {
|
||||
case internal.CredsBundleModeFallback:
|
||||
newCreds.transportCreds = newClusterTransportCreds(newTLS(), newALTS())
|
||||
case internal.CredsBundleModeBackendFromBalancer, internal.CredsBundleModeBalancer:
|
||||
// Only the clients can use google default credentials, so we only need
|
||||
// to create new ALTS client creds here.
|
||||
newCreds.transportCreds = newALTS()
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported mode: %v", mode)
|
||||
}
|
||||
|
||||
if mode == internal.CredsBundleModeFallback || mode == internal.CredsBundleModeBackendFromBalancer {
|
||||
newCreds.perRPCCreds = newCreds.newPerRPCCreds()
|
||||
}
|
||||
|
||||
return newCreds, nil
|
||||
}
|
90
vendor/google.golang.org/grpc/credentials/google/xds.go
generated
vendored
Normal file
90
vendor/google.golang.org/grpc/credentials/google/xds.go
generated
vendored
Normal file
|
@ -0,0 +1,90 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2021 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package google
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal"
|
||||
)
|
||||
|
||||
const cfeClusterName = "google-cfe"
|
||||
|
||||
// clusterTransportCreds is a combo of TLS + ALTS.
|
||||
//
|
||||
// On the client, ClientHandshake picks TLS or ALTS based on address attributes.
|
||||
// - if attributes has cluster name
|
||||
// - if cluster name is "google_cfe", use TLS
|
||||
// - otherwise, use ALTS
|
||||
// - else, do TLS
|
||||
//
|
||||
// On the server, ServerHandshake always does TLS.
|
||||
type clusterTransportCreds struct {
|
||||
tls credentials.TransportCredentials
|
||||
alts credentials.TransportCredentials
|
||||
}
|
||||
|
||||
func newClusterTransportCreds(tls, alts credentials.TransportCredentials) *clusterTransportCreds {
|
||||
return &clusterTransportCreds{
|
||||
tls: tls,
|
||||
alts: alts,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *clusterTransportCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
|
||||
chi := credentials.ClientHandshakeInfoFromContext(ctx)
|
||||
if chi.Attributes == nil {
|
||||
return c.tls.ClientHandshake(ctx, authority, rawConn)
|
||||
}
|
||||
cn, ok := internal.GetXDSHandshakeClusterName(chi.Attributes)
|
||||
if !ok || cn == cfeClusterName {
|
||||
return c.tls.ClientHandshake(ctx, authority, rawConn)
|
||||
}
|
||||
// If attributes have cluster name, and cluster name is not cfe, it's a
|
||||
// backend address, use ALTS.
|
||||
return c.alts.ClientHandshake(ctx, authority, rawConn)
|
||||
}
|
||||
|
||||
func (c *clusterTransportCreds) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
|
||||
return c.tls.ServerHandshake(conn)
|
||||
}
|
||||
|
||||
func (c *clusterTransportCreds) Info() credentials.ProtocolInfo {
|
||||
// TODO: this always returns tls.Info now, because we don't have a cluster
|
||||
// name to check when this method is called. This method doesn't affect
|
||||
// anything important now. We may want to revisit this if it becomes more
|
||||
// important later.
|
||||
return c.tls.Info()
|
||||
}
|
||||
|
||||
func (c *clusterTransportCreds) Clone() credentials.TransportCredentials {
|
||||
return &clusterTransportCreds{
|
||||
tls: c.tls.Clone(),
|
||||
alts: c.alts.Clone(),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *clusterTransportCreds) OverrideServerName(s string) error {
|
||||
if err := c.tls.OverrideServerName(s); err != nil {
|
||||
return err
|
||||
}
|
||||
return c.alts.OverrideServerName(s)
|
||||
}
|
225
vendor/google.golang.org/grpc/credentials/oauth/oauth.go
generated
vendored
Normal file
225
vendor/google.golang.org/grpc/credentials/oauth/oauth.go
generated
vendored
Normal file
|
@ -0,0 +1,225 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2015 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package oauth implements gRPC credentials using OAuth.
|
||||
package oauth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"golang.org/x/oauth2/jwt"
|
||||
"google.golang.org/grpc/credentials"
|
||||
)
|
||||
|
||||
// TokenSource supplies PerRPCCredentials from an oauth2.TokenSource.
|
||||
type TokenSource struct {
|
||||
oauth2.TokenSource
|
||||
}
|
||||
|
||||
// GetRequestMetadata gets the request metadata as a map from a TokenSource.
|
||||
func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
|
||||
token, err := ts.Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ri, _ := credentials.RequestInfoFromContext(ctx)
|
||||
if err = credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil {
|
||||
return nil, fmt.Errorf("unable to transfer TokenSource PerRPCCredentials: %v", err)
|
||||
}
|
||||
return map[string]string{
|
||||
"authorization": token.Type() + " " + token.AccessToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// RequireTransportSecurity indicates whether the credentials requires transport security.
|
||||
func (ts TokenSource) RequireTransportSecurity() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type jwtAccess struct {
|
||||
jsonKey []byte
|
||||
}
|
||||
|
||||
// NewJWTAccessFromFile creates PerRPCCredentials from the given keyFile.
|
||||
func NewJWTAccessFromFile(keyFile string) (credentials.PerRPCCredentials, error) {
|
||||
jsonKey, err := ioutil.ReadFile(keyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err)
|
||||
}
|
||||
return NewJWTAccessFromKey(jsonKey)
|
||||
}
|
||||
|
||||
// NewJWTAccessFromKey creates PerRPCCredentials from the given jsonKey.
|
||||
func NewJWTAccessFromKey(jsonKey []byte) (credentials.PerRPCCredentials, error) {
|
||||
return jwtAccess{jsonKey}, nil
|
||||
}
|
||||
|
||||
func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
|
||||
// TODO: the returned TokenSource is reusable. Store it in a sync.Map, with
|
||||
// uri as the key, to avoid recreating for every RPC.
|
||||
ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, uri[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
token, err := ts.Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ri, _ := credentials.RequestInfoFromContext(ctx)
|
||||
if err = credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil {
|
||||
return nil, fmt.Errorf("unable to transfer jwtAccess PerRPCCredentials: %v", err)
|
||||
}
|
||||
return map[string]string{
|
||||
"authorization": token.Type() + " " + token.AccessToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (j jwtAccess) RequireTransportSecurity() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// oauthAccess supplies PerRPCCredentials from a given token.
|
||||
type oauthAccess struct {
|
||||
token oauth2.Token
|
||||
}
|
||||
|
||||
// NewOauthAccess constructs the PerRPCCredentials using a given token.
|
||||
func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials {
|
||||
return oauthAccess{token: *token}
|
||||
}
|
||||
|
||||
func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
|
||||
ri, _ := credentials.RequestInfoFromContext(ctx)
|
||||
if err := credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil {
|
||||
return nil, fmt.Errorf("unable to transfer oauthAccess PerRPCCredentials: %v", err)
|
||||
}
|
||||
return map[string]string{
|
||||
"authorization": oa.token.Type() + " " + oa.token.AccessToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (oa oauthAccess) RequireTransportSecurity() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// NewComputeEngine constructs the PerRPCCredentials that fetches access tokens from
|
||||
// Google Compute Engine (GCE)'s metadata server. It is only valid to use this
|
||||
// if your program is running on a GCE instance.
|
||||
// TODO(dsymonds): Deprecate and remove this.
|
||||
func NewComputeEngine() credentials.PerRPCCredentials {
|
||||
return TokenSource{google.ComputeTokenSource("")}
|
||||
}
|
||||
|
||||
// serviceAccount represents PerRPCCredentials via JWT signing key.
|
||||
type serviceAccount struct {
|
||||
mu sync.Mutex
|
||||
config *jwt.Config
|
||||
t *oauth2.Token
|
||||
}
|
||||
|
||||
func (s *serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if !s.t.Valid() {
|
||||
var err error
|
||||
s.t, err = s.config.TokenSource(ctx).Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
ri, _ := credentials.RequestInfoFromContext(ctx)
|
||||
if err := credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil {
|
||||
return nil, fmt.Errorf("unable to transfer serviceAccount PerRPCCredentials: %v", err)
|
||||
}
|
||||
return map[string]string{
|
||||
"authorization": s.t.Type() + " " + s.t.AccessToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *serviceAccount) RequireTransportSecurity() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// NewServiceAccountFromKey constructs the PerRPCCredentials using the JSON key slice
|
||||
// from a Google Developers service account.
|
||||
func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.PerRPCCredentials, error) {
|
||||
config, err := google.JWTConfigFromJSON(jsonKey, scope...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &serviceAccount{config: config}, nil
|
||||
}
|
||||
|
||||
// NewServiceAccountFromFile constructs the PerRPCCredentials using the JSON key file
|
||||
// of a Google Developers service account.
|
||||
func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.PerRPCCredentials, error) {
|
||||
jsonKey, err := ioutil.ReadFile(keyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err)
|
||||
}
|
||||
return NewServiceAccountFromKey(jsonKey, scope...)
|
||||
}
|
||||
|
||||
// NewApplicationDefault returns "Application Default Credentials". For more
|
||||
// detail, see https://developers.google.com/accounts/docs/application-default-credentials.
|
||||
func NewApplicationDefault(ctx context.Context, scope ...string) (credentials.PerRPCCredentials, error) {
|
||||
creds, err := google.FindDefaultCredentials(ctx, scope...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If JSON is nil, the authentication is provided by the environment and not
|
||||
// with a credentials file, e.g. when code is running on Google Cloud
|
||||
// Platform. Use the returned token source.
|
||||
if creds.JSON == nil {
|
||||
return TokenSource{creds.TokenSource}, nil
|
||||
}
|
||||
|
||||
// If auth is provided by env variable or creds file, the behavior will be
|
||||
// different based on whether scope is set. Because the returned
|
||||
// creds.TokenSource does oauth with jwt by default, and it requires scope.
|
||||
// We can only use it if scope is not empty, otherwise it will fail with
|
||||
// missing scope error.
|
||||
//
|
||||
// If scope is set, use it, it should just work.
|
||||
//
|
||||
// If scope is not set, we try to use jwt directly without oauth (this only
|
||||
// works if it's a service account).
|
||||
|
||||
if len(scope) != 0 {
|
||||
return TokenSource{creds.TokenSource}, nil
|
||||
}
|
||||
|
||||
// Try to convert JSON to a jwt config without setting the optional scope
|
||||
// parameter to check if it's a service account (the function errors if it's
|
||||
// not). This is necessary because the returned config doesn't show the type
|
||||
// of the account.
|
||||
if _, err := google.JWTConfigFromJSON(creds.JSON); err != nil {
|
||||
// If this fails, it's not a service account, return the original
|
||||
// TokenSource from above.
|
||||
return TokenSource{creds.TokenSource}, nil
|
||||
}
|
||||
|
||||
// If it's a service account, create a JWT only access with the key.
|
||||
return NewJWTAccessFromKey(creds.JSON)
|
||||
}
|
128
vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go
generated
vendored
Normal file
128
vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go
generated
vendored
Normal file
|
@ -0,0 +1,128 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2021 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package googlecloud contains internal helpful functions for google cloud.
|
||||
package googlecloud
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
internalgrpclog "google.golang.org/grpc/internal/grpclog"
|
||||
)
|
||||
|
||||
const (
|
||||
linuxProductNameFile = "/sys/class/dmi/id/product_name"
|
||||
windowsCheckCommand = "powershell.exe"
|
||||
windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS"
|
||||
powershellOutputFilter = "Manufacturer"
|
||||
windowsManufacturerRegex = ":(.*)"
|
||||
|
||||
logPrefix = "[googlecloud]"
|
||||
)
|
||||
|
||||
var (
|
||||
// The following two variables will be reassigned in tests.
|
||||
runningOS = runtime.GOOS
|
||||
manufacturerReader = func() (io.Reader, error) {
|
||||
switch runningOS {
|
||||
case "linux":
|
||||
return os.Open(linuxProductNameFile)
|
||||
case "windows":
|
||||
cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") {
|
||||
if strings.HasPrefix(line, powershellOutputFilter) {
|
||||
re := regexp.MustCompile(windowsManufacturerRegex)
|
||||
name := re.FindString(line)
|
||||
name = strings.TrimLeft(name, ":")
|
||||
return strings.NewReader(name), nil
|
||||
}
|
||||
}
|
||||
return nil, errors.New("cannot determine the machine's manufacturer")
|
||||
default:
|
||||
return nil, fmt.Errorf("%s is not supported", runningOS)
|
||||
}
|
||||
}
|
||||
|
||||
vmOnGCEOnce sync.Once
|
||||
vmOnGCE bool
|
||||
|
||||
logger = internalgrpclog.NewPrefixLogger(grpclog.Component("googlecloud"), logPrefix)
|
||||
)
|
||||
|
||||
// OnGCE returns whether the client is running on GCE.
|
||||
//
|
||||
// It provides similar functionality as metadata.OnGCE from the cloud library
|
||||
// package. We keep this to avoid depending on the cloud library module.
|
||||
func OnGCE() bool {
|
||||
vmOnGCEOnce.Do(func() {
|
||||
vmOnGCE = isRunningOnGCE()
|
||||
})
|
||||
return vmOnGCE
|
||||
}
|
||||
|
||||
// isRunningOnGCE checks whether the local system, without doing a network request is
|
||||
// running on GCP.
|
||||
func isRunningOnGCE() bool {
|
||||
manufacturer, err := readManufacturer()
|
||||
if err != nil {
|
||||
logger.Infof("failed to read manufacturer %v, returning OnGCE=false", err)
|
||||
return false
|
||||
}
|
||||
name := string(manufacturer)
|
||||
switch runningOS {
|
||||
case "linux":
|
||||
name = strings.TrimSpace(name)
|
||||
return name == "Google" || name == "Google Compute Engine"
|
||||
case "windows":
|
||||
name = strings.Replace(name, " ", "", -1)
|
||||
name = strings.Replace(name, "\n", "", -1)
|
||||
name = strings.Replace(name, "\r", "", -1)
|
||||
return name == "Google"
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func readManufacturer() ([]byte, error) {
|
||||
reader, err := manufacturerReader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if reader == nil {
|
||||
return nil, errors.New("got nil reader")
|
||||
}
|
||||
manufacturer, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed reading %v: %v", linuxProductNameFile, err)
|
||||
}
|
||||
return manufacturer, nil
|
||||
}
|
168
vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
generated
vendored
Normal file
168
vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,168 @@
|
|||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/protobuf/empty.proto
|
||||
|
||||
package emptypb
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
// A generic empty message that you can re-use to avoid defining duplicated
|
||||
// empty messages in your APIs. A typical example is to use it as the request
|
||||
// or the response type of an API method. For instance:
|
||||
//
|
||||
// service Foo {
|
||||
// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
|
||||
// }
|
||||
//
|
||||
// The JSON representation for `Empty` is empty JSON object `{}`.
|
||||
type Empty struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *Empty) Reset() {
|
||||
*x = Empty{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_protobuf_empty_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Empty) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Empty) ProtoMessage() {}
|
||||
|
||||
func (x *Empty) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_protobuf_empty_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Empty.ProtoReflect.Descriptor instead.
|
||||
func (*Empty) Descriptor() ([]byte, []int) {
|
||||
return file_google_protobuf_empty_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
var File_google_protobuf_empty_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_google_protobuf_empty_proto_rawDesc = []byte{
|
||||
0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
|
||||
0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07,
|
||||
0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x7d, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0a,
|
||||
0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2e, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b,
|
||||
0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2,
|
||||
0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77,
|
||||
0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_google_protobuf_empty_proto_rawDescOnce sync.Once
|
||||
file_google_protobuf_empty_proto_rawDescData = file_google_protobuf_empty_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_google_protobuf_empty_proto_rawDescGZIP() []byte {
|
||||
file_google_protobuf_empty_proto_rawDescOnce.Do(func() {
|
||||
file_google_protobuf_empty_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_empty_proto_rawDescData)
|
||||
})
|
||||
return file_google_protobuf_empty_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_google_protobuf_empty_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||
var file_google_protobuf_empty_proto_goTypes = []interface{}{
|
||||
(*Empty)(nil), // 0: google.protobuf.Empty
|
||||
}
|
||||
var file_google_protobuf_empty_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_google_protobuf_empty_proto_init() }
|
||||
func file_google_protobuf_empty_proto_init() {
|
||||
if File_google_protobuf_empty_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Empty); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_google_protobuf_empty_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 1,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_google_protobuf_empty_proto_goTypes,
|
||||
DependencyIndexes: file_google_protobuf_empty_proto_depIdxs,
|
||||
MessageInfos: file_google_protobuf_empty_proto_msgTypes,
|
||||
}.Build()
|
||||
File_google_protobuf_empty_proto = out.File
|
||||
file_google_protobuf_empty_proto_rawDesc = nil
|
||||
file_google_protobuf_empty_proto_goTypes = nil
|
||||
file_google_protobuf_empty_proto_depIdxs = nil
|
||||
}
|
591
vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
generated
vendored
Normal file
591
vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,591 @@
|
|||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/protobuf/field_mask.proto
|
||||
|
||||
// Package fieldmaskpb contains generated types for google/protobuf/field_mask.proto.
|
||||
//
|
||||
// The FieldMask message represents a set of symbolic field paths.
|
||||
// The paths are specific to some target message type,
|
||||
// which is not stored within the FieldMask message itself.
|
||||
//
|
||||
//
|
||||
// Constructing a FieldMask
|
||||
//
|
||||
// The New function is used construct a FieldMask:
|
||||
//
|
||||
// var messageType *descriptorpb.DescriptorProto
|
||||
// fm, err := fieldmaskpb.New(messageType, "field.name", "field.number")
|
||||
// if err != nil {
|
||||
// ... // handle error
|
||||
// }
|
||||
// ... // make use of fm
|
||||
//
|
||||
// The "field.name" and "field.number" paths are valid paths according to the
|
||||
// google.protobuf.DescriptorProto message. Use of a path that does not correlate
|
||||
// to valid fields reachable from DescriptorProto would result in an error.
|
||||
//
|
||||
// Once a FieldMask message has been constructed,
|
||||
// the Append method can be used to insert additional paths to the path set:
|
||||
//
|
||||
// var messageType *descriptorpb.DescriptorProto
|
||||
// if err := fm.Append(messageType, "options"); err != nil {
|
||||
// ... // handle error
|
||||
// }
|
||||
//
|
||||
//
|
||||
// Type checking a FieldMask
|
||||
//
|
||||
// In order to verify that a FieldMask represents a set of fields that are
|
||||
// reachable from some target message type, use the IsValid method:
|
||||
//
|
||||
// var messageType *descriptorpb.DescriptorProto
|
||||
// if fm.IsValid(messageType) {
|
||||
// ... // make use of fm
|
||||
// }
|
||||
//
|
||||
// IsValid needs to be passed the target message type as an input since the
|
||||
// FieldMask message itself does not store the message type that the set of paths
|
||||
// are for.
|
||||
package fieldmaskpb
|
||||
|
||||
import (
|
||||
proto "google.golang.org/protobuf/proto"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sort "sort"
|
||||
strings "strings"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
// `FieldMask` represents a set of symbolic field paths, for example:
|
||||
//
|
||||
// paths: "f.a"
|
||||
// paths: "f.b.d"
|
||||
//
|
||||
// Here `f` represents a field in some root message, `a` and `b`
|
||||
// fields in the message found in `f`, and `d` a field found in the
|
||||
// message in `f.b`.
|
||||
//
|
||||
// Field masks are used to specify a subset of fields that should be
|
||||
// returned by a get operation or modified by an update operation.
|
||||
// Field masks also have a custom JSON encoding (see below).
|
||||
//
|
||||
// # Field Masks in Projections
|
||||
//
|
||||
// When used in the context of a projection, a response message or
|
||||
// sub-message is filtered by the API to only contain those fields as
|
||||
// specified in the mask. For example, if the mask in the previous
|
||||
// example is applied to a response message as follows:
|
||||
//
|
||||
// f {
|
||||
// a : 22
|
||||
// b {
|
||||
// d : 1
|
||||
// x : 2
|
||||
// }
|
||||
// y : 13
|
||||
// }
|
||||
// z: 8
|
||||
//
|
||||
// The result will not contain specific values for fields x,y and z
|
||||
// (their value will be set to the default, and omitted in proto text
|
||||
// output):
|
||||
//
|
||||
//
|
||||
// f {
|
||||
// a : 22
|
||||
// b {
|
||||
// d : 1
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// A repeated field is not allowed except at the last position of a
|
||||
// paths string.
|
||||
//
|
||||
// If a FieldMask object is not present in a get operation, the
|
||||
// operation applies to all fields (as if a FieldMask of all fields
|
||||
// had been specified).
|
||||
//
|
||||
// Note that a field mask does not necessarily apply to the
|
||||
// top-level response message. In case of a REST get operation, the
|
||||
// field mask applies directly to the response, but in case of a REST
|
||||
// list operation, the mask instead applies to each individual message
|
||||
// in the returned resource list. In case of a REST custom method,
|
||||
// other definitions may be used. Where the mask applies will be
|
||||
// clearly documented together with its declaration in the API. In
|
||||
// any case, the effect on the returned resource/resources is required
|
||||
// behavior for APIs.
|
||||
//
|
||||
// # Field Masks in Update Operations
|
||||
//
|
||||
// A field mask in update operations specifies which fields of the
|
||||
// targeted resource are going to be updated. The API is required
|
||||
// to only change the values of the fields as specified in the mask
|
||||
// and leave the others untouched. If a resource is passed in to
|
||||
// describe the updated values, the API ignores the values of all
|
||||
// fields not covered by the mask.
|
||||
//
|
||||
// If a repeated field is specified for an update operation, new values will
|
||||
// be appended to the existing repeated field in the target resource. Note that
|
||||
// a repeated field is only allowed in the last position of a `paths` string.
|
||||
//
|
||||
// If a sub-message is specified in the last position of the field mask for an
|
||||
// update operation, then new value will be merged into the existing sub-message
|
||||
// in the target resource.
|
||||
//
|
||||
// For example, given the target message:
|
||||
//
|
||||
// f {
|
||||
// b {
|
||||
// d: 1
|
||||
// x: 2
|
||||
// }
|
||||
// c: [1]
|
||||
// }
|
||||
//
|
||||
// And an update message:
|
||||
//
|
||||
// f {
|
||||
// b {
|
||||
// d: 10
|
||||
// }
|
||||
// c: [2]
|
||||
// }
|
||||
//
|
||||
// then if the field mask is:
|
||||
//
|
||||
// paths: ["f.b", "f.c"]
|
||||
//
|
||||
// then the result will be:
|
||||
//
|
||||
// f {
|
||||
// b {
|
||||
// d: 10
|
||||
// x: 2
|
||||
// }
|
||||
// c: [1, 2]
|
||||
// }
|
||||
//
|
||||
// An implementation may provide options to override this default behavior for
|
||||
// repeated and message fields.
|
||||
//
|
||||
// In order to reset a field's value to the default, the field must
|
||||
// be in the mask and set to the default value in the provided resource.
|
||||
// Hence, in order to reset all fields of a resource, provide a default
|
||||
// instance of the resource and set all fields in the mask, or do
|
||||
// not provide a mask as described below.
|
||||
//
|
||||
// If a field mask is not present on update, the operation applies to
|
||||
// all fields (as if a field mask of all fields has been specified).
|
||||
// Note that in the presence of schema evolution, this may mean that
|
||||
// fields the client does not know and has therefore not filled into
|
||||
// the request will be reset to their default. If this is unwanted
|
||||
// behavior, a specific service may require a client to always specify
|
||||
// a field mask, producing an error if not.
|
||||
//
|
||||
// As with get operations, the location of the resource which
|
||||
// describes the updated values in the request message depends on the
|
||||
// operation kind. In any case, the effect of the field mask is
|
||||
// required to be honored by the API.
|
||||
//
|
||||
// ## Considerations for HTTP REST
|
||||
//
|
||||
// The HTTP kind of an update operation which uses a field mask must
|
||||
// be set to PATCH instead of PUT in order to satisfy HTTP semantics
|
||||
// (PUT must only be used for full updates).
|
||||
//
|
||||
// # JSON Encoding of Field Masks
|
||||
//
|
||||
// In JSON, a field mask is encoded as a single string where paths are
|
||||
// separated by a comma. Fields name in each path are converted
|
||||
// to/from lower-camel naming conventions.
|
||||
//
|
||||
// As an example, consider the following message declarations:
|
||||
//
|
||||
// message Profile {
|
||||
// User user = 1;
|
||||
// Photo photo = 2;
|
||||
// }
|
||||
// message User {
|
||||
// string display_name = 1;
|
||||
// string address = 2;
|
||||
// }
|
||||
//
|
||||
// In proto a field mask for `Profile` may look as such:
|
||||
//
|
||||
// mask {
|
||||
// paths: "user.display_name"
|
||||
// paths: "photo"
|
||||
// }
|
||||
//
|
||||
// In JSON, the same mask is represented as below:
|
||||
//
|
||||
// {
|
||||
// mask: "user.displayName,photo"
|
||||
// }
|
||||
//
|
||||
// # Field Masks and Oneof Fields
|
||||
//
|
||||
// Field masks treat fields in oneofs just as regular fields. Consider the
|
||||
// following message:
|
||||
//
|
||||
// message SampleMessage {
|
||||
// oneof test_oneof {
|
||||
// string name = 4;
|
||||
// SubMessage sub_message = 9;
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// The field mask can be:
|
||||
//
|
||||
// mask {
|
||||
// paths: "name"
|
||||
// }
|
||||
//
|
||||
// Or:
|
||||
//
|
||||
// mask {
|
||||
// paths: "sub_message"
|
||||
// }
|
||||
//
|
||||
// Note that oneof type names ("test_oneof" in this case) cannot be used in
|
||||
// paths.
|
||||
//
|
||||
// ## Field Mask Verification
|
||||
//
|
||||
// The implementation of any API method which has a FieldMask type field in the
|
||||
// request should verify the included field paths, and return an
|
||||
// `INVALID_ARGUMENT` error if any path is unmappable.
|
||||
type FieldMask struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The set of field mask paths.
|
||||
Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"`
|
||||
}
|
||||
|
||||
// New constructs a field mask from a list of paths and verifies that
|
||||
// each one is valid according to the specified message type.
|
||||
func New(m proto.Message, paths ...string) (*FieldMask, error) {
|
||||
x := new(FieldMask)
|
||||
return x, x.Append(m, paths...)
|
||||
}
|
||||
|
||||
// Union returns the union of all the paths in the input field masks.
|
||||
func Union(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask {
|
||||
var out []string
|
||||
out = append(out, mx.GetPaths()...)
|
||||
out = append(out, my.GetPaths()...)
|
||||
for _, m := range ms {
|
||||
out = append(out, m.GetPaths()...)
|
||||
}
|
||||
return &FieldMask{Paths: normalizePaths(out)}
|
||||
}
|
||||
|
||||
// Intersect returns the intersection of all the paths in the input field masks.
|
||||
func Intersect(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask {
|
||||
var ss1, ss2 []string // reused buffers for performance
|
||||
intersect := func(out, in []string) []string {
|
||||
ss1 = normalizePaths(append(ss1[:0], in...))
|
||||
ss2 = normalizePaths(append(ss2[:0], out...))
|
||||
out = out[:0]
|
||||
for i1, i2 := 0, 0; i1 < len(ss1) && i2 < len(ss2); {
|
||||
switch s1, s2 := ss1[i1], ss2[i2]; {
|
||||
case hasPathPrefix(s1, s2):
|
||||
out = append(out, s1)
|
||||
i1++
|
||||
case hasPathPrefix(s2, s1):
|
||||
out = append(out, s2)
|
||||
i2++
|
||||
case lessPath(s1, s2):
|
||||
i1++
|
||||
case lessPath(s2, s1):
|
||||
i2++
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
out := Union(mx, my, ms...).GetPaths()
|
||||
out = intersect(out, mx.GetPaths())
|
||||
out = intersect(out, my.GetPaths())
|
||||
for _, m := range ms {
|
||||
out = intersect(out, m.GetPaths())
|
||||
}
|
||||
return &FieldMask{Paths: normalizePaths(out)}
|
||||
}
|
||||
|
||||
// IsValid reports whether all the paths are syntactically valid and
|
||||
// refer to known fields in the specified message type.
|
||||
// It reports false for a nil FieldMask.
|
||||
func (x *FieldMask) IsValid(m proto.Message) bool {
|
||||
paths := x.GetPaths()
|
||||
return x != nil && numValidPaths(m, paths) == len(paths)
|
||||
}
|
||||
|
||||
// Append appends a list of paths to the mask and verifies that each one
|
||||
// is valid according to the specified message type.
|
||||
// An invalid path is not appended and breaks insertion of subsequent paths.
|
||||
func (x *FieldMask) Append(m proto.Message, paths ...string) error {
|
||||
numValid := numValidPaths(m, paths)
|
||||
x.Paths = append(x.Paths, paths[:numValid]...)
|
||||
paths = paths[numValid:]
|
||||
if len(paths) > 0 {
|
||||
name := m.ProtoReflect().Descriptor().FullName()
|
||||
return protoimpl.X.NewError("invalid path %q for message %q", paths[0], name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func numValidPaths(m proto.Message, paths []string) int {
|
||||
md0 := m.ProtoReflect().Descriptor()
|
||||
for i, path := range paths {
|
||||
md := md0
|
||||
if !rangeFields(path, func(field string) bool {
|
||||
// Search the field within the message.
|
||||
if md == nil {
|
||||
return false // not within a message
|
||||
}
|
||||
fd := md.Fields().ByName(protoreflect.Name(field))
|
||||
// The real field name of a group is the message name.
|
||||
if fd == nil {
|
||||
gd := md.Fields().ByName(protoreflect.Name(strings.ToLower(field)))
|
||||
if gd != nil && gd.Kind() == protoreflect.GroupKind && string(gd.Message().Name()) == field {
|
||||
fd = gd
|
||||
}
|
||||
} else if fd.Kind() == protoreflect.GroupKind && string(fd.Message().Name()) != field {
|
||||
fd = nil
|
||||
}
|
||||
if fd == nil {
|
||||
return false // message has does not have this field
|
||||
}
|
||||
|
||||
// Identify the next message to search within.
|
||||
md = fd.Message() // may be nil
|
||||
|
||||
// Repeated fields are only allowed at the last postion.
|
||||
if fd.IsList() || fd.IsMap() {
|
||||
md = nil
|
||||
}
|
||||
|
||||
return true
|
||||
}) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return len(paths)
|
||||
}
|
||||
|
||||
// Normalize converts the mask to its canonical form where all paths are sorted
|
||||
// and redundant paths are removed.
|
||||
func (x *FieldMask) Normalize() {
|
||||
x.Paths = normalizePaths(x.Paths)
|
||||
}
|
||||
|
||||
func normalizePaths(paths []string) []string {
|
||||
sort.Slice(paths, func(i, j int) bool {
|
||||
return lessPath(paths[i], paths[j])
|
||||
})
|
||||
|
||||
// Elide any path that is a prefix match on the previous.
|
||||
out := paths[:0]
|
||||
for _, path := range paths {
|
||||
if len(out) > 0 && hasPathPrefix(path, out[len(out)-1]) {
|
||||
continue
|
||||
}
|
||||
out = append(out, path)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// hasPathPrefix is like strings.HasPrefix, but further checks for either
|
||||
// an exact matche or that the prefix is delimited by a dot.
|
||||
func hasPathPrefix(path, prefix string) bool {
|
||||
return strings.HasPrefix(path, prefix) && (len(path) == len(prefix) || path[len(prefix)] == '.')
|
||||
}
|
||||
|
||||
// lessPath is a lexicographical comparison where dot is specially treated
|
||||
// as the smallest symbol.
|
||||
func lessPath(x, y string) bool {
|
||||
for i := 0; i < len(x) && i < len(y); i++ {
|
||||
if x[i] != y[i] {
|
||||
return (x[i] - '.') < (y[i] - '.')
|
||||
}
|
||||
}
|
||||
return len(x) < len(y)
|
||||
}
|
||||
|
||||
// rangeFields is like strings.Split(path, "."), but avoids allocations by
|
||||
// iterating over each field in place and calling a iterator function.
|
||||
func rangeFields(path string, f func(field string) bool) bool {
|
||||
for {
|
||||
var field string
|
||||
if i := strings.IndexByte(path, '.'); i >= 0 {
|
||||
field, path = path[:i], path[i:]
|
||||
} else {
|
||||
field, path = path, ""
|
||||
}
|
||||
|
||||
if !f(field) {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(path) == 0 {
|
||||
return true
|
||||
}
|
||||
path = strings.TrimPrefix(path, ".")
|
||||
}
|
||||
}
|
||||
|
||||
func (x *FieldMask) Reset() {
|
||||
*x = FieldMask{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *FieldMask) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*FieldMask) ProtoMessage() {}
|
||||
|
||||
func (x *FieldMask) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use FieldMask.ProtoReflect.Descriptor instead.
|
||||
func (*FieldMask) Descriptor() ([]byte, []int) {
|
||||
return file_google_protobuf_field_mask_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *FieldMask) GetPaths() []string {
|
||||
if x != nil {
|
||||
return x.Paths
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_google_protobuf_field_mask_proto_rawDesc = []byte{
|
||||
0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
|
||||
0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x62, 0x75, 0x66, 0x22, 0x21, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b,
|
||||
0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
|
||||
0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x42, 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e,
|
||||
0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
|
||||
0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e,
|
||||
0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70,
|
||||
0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x6d, 0x61,
|
||||
0x73, 0x6b, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e,
|
||||
0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
|
||||
0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_google_protobuf_field_mask_proto_rawDescOnce sync.Once
|
||||
file_google_protobuf_field_mask_proto_rawDescData = file_google_protobuf_field_mask_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte {
|
||||
file_google_protobuf_field_mask_proto_rawDescOnce.Do(func() {
|
||||
file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_field_mask_proto_rawDescData)
|
||||
})
|
||||
return file_google_protobuf_field_mask_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_google_protobuf_field_mask_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||
var file_google_protobuf_field_mask_proto_goTypes = []interface{}{
|
||||
(*FieldMask)(nil), // 0: google.protobuf.FieldMask
|
||||
}
|
||||
var file_google_protobuf_field_mask_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_google_protobuf_field_mask_proto_init() }
|
||||
func file_google_protobuf_field_mask_proto_init() {
|
||||
if File_google_protobuf_field_mask_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*FieldMask); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_google_protobuf_field_mask_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 1,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_google_protobuf_field_mask_proto_goTypes,
|
||||
DependencyIndexes: file_google_protobuf_field_mask_proto_depIdxs,
|
||||
MessageInfos: file_google_protobuf_field_mask_proto_msgTypes,
|
||||
}.Build()
|
||||
File_google_protobuf_field_mask_proto = out.File
|
||||
file_google_protobuf_field_mask_proto_rawDesc = nil
|
||||
file_google_protobuf_field_mask_proto_goTypes = nil
|
||||
file_google_protobuf_field_mask_proto_depIdxs = nil
|
||||
}
|
29
vendor/modules.txt
vendored
29
vendor/modules.txt
vendored
|
@ -6,9 +6,10 @@ cloud.google.com/go/internal
|
|||
cloud.google.com/go/internal/optional
|
||||
cloud.google.com/go/internal/trace
|
||||
cloud.google.com/go/internal/version
|
||||
# cloud.google.com/go/storage v1.16.0
|
||||
# cloud.google.com/go/storage v1.16.1
|
||||
## explicit
|
||||
cloud.google.com/go/storage
|
||||
cloud.google.com/go/storage/internal/apiv2
|
||||
# github.com/VictoriaMetrics/fastcache v1.6.0
|
||||
## explicit
|
||||
github.com/VictoriaMetrics/fastcache
|
||||
|
@ -114,6 +115,12 @@ github.com/golang/protobuf/ptypes/timestamp
|
|||
# github.com/golang/snappy v0.0.4
|
||||
## explicit
|
||||
github.com/golang/snappy
|
||||
# github.com/google/go-cmp v0.5.6
|
||||
github.com/google/go-cmp/cmp
|
||||
github.com/google/go-cmp/cmp/internal/diff
|
||||
github.com/google/go-cmp/cmp/internal/flags
|
||||
github.com/google/go-cmp/cmp/internal/function
|
||||
github.com/google/go-cmp/cmp/internal/value
|
||||
# github.com/googleapis/gax-go/v2 v2.1.0
|
||||
github.com/googleapis/gax-go/v2
|
||||
github.com/googleapis/gax-go/v2/apierror
|
||||
|
@ -216,6 +223,7 @@ go.opencensus.io/internal
|
|||
go.opencensus.io/internal/tagencoding
|
||||
go.opencensus.io/metric/metricdata
|
||||
go.opencensus.io/metric/metricproducer
|
||||
go.opencensus.io/plugin/ocgrpc
|
||||
go.opencensus.io/plugin/ochttp
|
||||
go.opencensus.io/plugin/ochttp/propagation/b3
|
||||
go.opencensus.io/resource
|
||||
|
@ -278,6 +286,7 @@ google.golang.org/api/option
|
|||
google.golang.org/api/option/internaloption
|
||||
google.golang.org/api/storage/v1
|
||||
google.golang.org/api/transport/cert
|
||||
google.golang.org/api/transport/grpc
|
||||
google.golang.org/api/transport/http
|
||||
google.golang.org/api/transport/http/internal/propagation
|
||||
google.golang.org/api/transport/internal/dca
|
||||
|
@ -290,7 +299,9 @@ google.golang.org/appengine/internal/datastore
|
|||
google.golang.org/appengine/internal/log
|
||||
google.golang.org/appengine/internal/modules
|
||||
google.golang.org/appengine/internal/remote_api
|
||||
google.golang.org/appengine/internal/socket
|
||||
google.golang.org/appengine/internal/urlfetch
|
||||
google.golang.org/appengine/socket
|
||||
google.golang.org/appengine/urlfetch
|
||||
# google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2
|
||||
## explicit
|
||||
|
@ -299,6 +310,8 @@ google.golang.org/genproto/googleapis/iam/v1
|
|||
google.golang.org/genproto/googleapis/rpc/code
|
||||
google.golang.org/genproto/googleapis/rpc/errdetails
|
||||
google.golang.org/genproto/googleapis/rpc/status
|
||||
google.golang.org/genproto/googleapis/storage/v2
|
||||
google.golang.org/genproto/googleapis/type/date
|
||||
google.golang.org/genproto/googleapis/type/expr
|
||||
# google.golang.org/grpc v1.40.0
|
||||
google.golang.org/grpc
|
||||
|
@ -306,12 +319,23 @@ google.golang.org/grpc/attributes
|
|||
google.golang.org/grpc/backoff
|
||||
google.golang.org/grpc/balancer
|
||||
google.golang.org/grpc/balancer/base
|
||||
google.golang.org/grpc/balancer/grpclb
|
||||
google.golang.org/grpc/balancer/grpclb/grpc_lb_v1
|
||||
google.golang.org/grpc/balancer/grpclb/state
|
||||
google.golang.org/grpc/balancer/roundrobin
|
||||
google.golang.org/grpc/binarylog/grpc_binarylog_v1
|
||||
google.golang.org/grpc/codes
|
||||
google.golang.org/grpc/connectivity
|
||||
google.golang.org/grpc/credentials
|
||||
google.golang.org/grpc/credentials/alts
|
||||
google.golang.org/grpc/credentials/alts/internal
|
||||
google.golang.org/grpc/credentials/alts/internal/authinfo
|
||||
google.golang.org/grpc/credentials/alts/internal/conn
|
||||
google.golang.org/grpc/credentials/alts/internal/handshaker
|
||||
google.golang.org/grpc/credentials/alts/internal/handshaker/service
|
||||
google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp
|
||||
google.golang.org/grpc/credentials/google
|
||||
google.golang.org/grpc/credentials/oauth
|
||||
google.golang.org/grpc/encoding
|
||||
google.golang.org/grpc/encoding/proto
|
||||
google.golang.org/grpc/grpclog
|
||||
|
@ -323,6 +347,7 @@ google.golang.org/grpc/internal/buffer
|
|||
google.golang.org/grpc/internal/channelz
|
||||
google.golang.org/grpc/internal/credentials
|
||||
google.golang.org/grpc/internal/envconfig
|
||||
google.golang.org/grpc/internal/googlecloud
|
||||
google.golang.org/grpc/internal/grpclog
|
||||
google.golang.org/grpc/internal/grpcrand
|
||||
google.golang.org/grpc/internal/grpcsync
|
||||
|
@ -377,6 +402,8 @@ google.golang.org/protobuf/runtime/protoimpl
|
|||
google.golang.org/protobuf/types/descriptorpb
|
||||
google.golang.org/protobuf/types/known/anypb
|
||||
google.golang.org/protobuf/types/known/durationpb
|
||||
google.golang.org/protobuf/types/known/emptypb
|
||||
google.golang.org/protobuf/types/known/fieldmaskpb
|
||||
google.golang.org/protobuf/types/known/timestamppb
|
||||
# gopkg.in/yaml.v2 v2.4.0
|
||||
## explicit
|
||||
|
|
Loading…
Reference in a new issue