vendor: make vendor-update

This commit is contained in:
Aliaksandr Valialkin 2020-04-15 14:52:17 +03:00
parent ccb887c0f6
commit a59a7bcc5e
188 changed files with 41125 additions and 20843 deletions

7
go.mod
View file

@ -7,6 +7,7 @@ require (
github.com/VictoriaMetrics/metrics v1.11.2
github.com/aws/aws-sdk-go v1.30.7
github.com/cespare/xxhash/v2 v2.1.1
github.com/golang/protobuf v1.4.0 // indirect
github.com/golang/snappy v0.0.1
github.com/klauspost/compress v1.10.4
github.com/valyala/fasthttp v1.9.0
@ -15,10 +16,10 @@ require (
github.com/valyala/gozstd v1.6.4
github.com/valyala/histogram v1.0.1
github.com/valyala/quicktemplate v1.4.1
golang.org/x/sys v0.0.0-20200409092240-59c9f1ba88fa
golang.org/x/tools v0.0.0-20200410132612-ae9902aceb98 // indirect
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4
golang.org/x/tools v0.0.0-20200415034506-5d8e1897c761 // indirect
google.golang.org/api v0.21.0
google.golang.org/genproto v0.0.0-20200410110633-0848e9f44c36 // indirect
google.golang.org/genproto v0.0.0-20200413115906-b5235f65be36 // indirect
google.golang.org/grpc v1.28.1 // indirect
gopkg.in/yaml.v2 v2.2.8
)

24
go.sum
View file

@ -81,6 +81,12 @@ github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@ -257,8 +263,8 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepx
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200409092240-59c9f1ba88fa h1:mQTN3ECqfsViCNBgq+A40vdwhkGykrrQlYe3mPj6BoU=
golang.org/x/sys v0.0.0-20200409092240-59c9f1ba88fa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 h1:opSr2sbRXk5X5/givKrrKj9HXxFpW2sdCiP8MJSKLQY=
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -297,8 +303,8 @@ golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200410132612-ae9902aceb98 h1:ibc1eDGW5ajwA4qzFTj0WHlD9eofMe1gAre+A0a3Vhs=
golang.org/x/tools v0.0.0-20200410132612-ae9902aceb98/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200415034506-5d8e1897c761 h1:FVw4lelfGRNPqB3C8qX1m+QyeM2vzToIwlFhEZX42y8=
golang.org/x/tools v0.0.0-20200415034506-5d8e1897c761/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
@ -343,8 +349,8 @@ google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce h1:1mbrb1tUU+Zmt5C
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200410110633-0848e9f44c36 h1:QGM8iDIfHwTRMruKa7+aKDNRMQcWKeS6LDhdMiucYCE=
google.golang.org/genproto v0.0.0-20200410110633-0848e9f44c36/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200413115906-b5235f65be36 h1:j7CmVRD4Kec0+f8VuBAc2Ak2MFfXm5Q2/RxuJLL+76E=
google.golang.org/genproto v0.0.0-20200413115906-b5235f65be36/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@ -359,6 +365,12 @@ google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4=
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.28.1 h1:C1QC6KzgSiLyBabDi87BbjaGreoRgGUF5nOyvfrAZ1k=
google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View file

@ -0,0 +1,398 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gengogrpc contains the gRPC code generator.
package gengogrpc
import (
"fmt"
"strconv"
"strings"
"google.golang.org/protobuf/compiler/protogen"
"google.golang.org/protobuf/types/descriptorpb"
)
const (
contextPackage = protogen.GoImportPath("context")
grpcPackage = protogen.GoImportPath("google.golang.org/grpc")
codesPackage = protogen.GoImportPath("google.golang.org/grpc/codes")
statusPackage = protogen.GoImportPath("google.golang.org/grpc/status")
)
// GenerateFile generates a _grpc.pb.go file containing gRPC service definitions.
func GenerateFile(gen *protogen.Plugin, file *protogen.File) *protogen.GeneratedFile {
if len(file.Services) == 0 {
return nil
}
filename := file.GeneratedFilenamePrefix + "_grpc.pb.go"
g := gen.NewGeneratedFile(filename, file.GoImportPath)
g.P("// Code generated by protoc-gen-go-grpc. DO NOT EDIT.")
g.P()
g.P("package ", file.GoPackageName)
g.P()
GenerateFileContent(gen, file, g)
return g
}
// GenerateFileContent generates the gRPC service definitions, excluding the package statement.
func GenerateFileContent(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile) {
if len(file.Services) == 0 {
return
}
// TODO: Remove this. We don't need to include these references any more.
g.P("// Reference imports to suppress errors if they are not otherwise used.")
g.P("var _ ", contextPackage.Ident("Context"))
g.P("var _ ", grpcPackage.Ident("ClientConnInterface"))
g.P()
g.P("// This is a compile-time assertion to ensure that this generated file")
g.P("// is compatible with the grpc package it is being compiled against.")
g.P("const _ = ", grpcPackage.Ident("SupportPackageIsVersion6"))
g.P()
for _, service := range file.Services {
genService(gen, file, g, service)
}
}
func genService(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, service *protogen.Service) {
clientName := service.GoName + "Client"
g.P("// ", clientName, " is the client API for ", service.GoName, " service.")
g.P("//")
g.P("// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.")
// Client interface.
if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() {
g.P("//")
g.P(deprecationComment)
}
g.Annotate(clientName, service.Location)
g.P("type ", clientName, " interface {")
for _, method := range service.Methods {
g.Annotate(clientName+"."+method.GoName, method.Location)
if method.Desc.Options().(*descriptorpb.MethodOptions).GetDeprecated() {
g.P(deprecationComment)
}
g.P(method.Comments.Leading,
clientSignature(g, method))
}
g.P("}")
g.P()
// Client structure.
g.P("type ", unexport(clientName), " struct {")
g.P("cc ", grpcPackage.Ident("ClientConnInterface"))
g.P("}")
g.P()
// NewClient factory.
if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() {
g.P(deprecationComment)
}
g.P("func New", clientName, " (cc ", grpcPackage.Ident("ClientConnInterface"), ") ", clientName, " {")
g.P("return &", unexport(clientName), "{cc}")
g.P("}")
g.P()
var methodIndex, streamIndex int
// Client method implementations.
for _, method := range service.Methods {
if !method.Desc.IsStreamingServer() && !method.Desc.IsStreamingClient() {
// Unary RPC method
genClientMethod(gen, file, g, method, methodIndex)
methodIndex++
} else {
// Streaming RPC method
genClientMethod(gen, file, g, method, streamIndex)
streamIndex++
}
}
// Server interface.
serverType := service.GoName + "Server"
g.P("// ", serverType, " is the server API for ", service.GoName, " service.")
if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() {
g.P("//")
g.P(deprecationComment)
}
g.Annotate(serverType, service.Location)
g.P("type ", serverType, " interface {")
for _, method := range service.Methods {
g.Annotate(serverType+"."+method.GoName, method.Location)
if method.Desc.Options().(*descriptorpb.MethodOptions).GetDeprecated() {
g.P(deprecationComment)
}
g.P(method.Comments.Leading,
serverSignature(g, method))
}
g.P("}")
g.P()
// Server Unimplemented struct for forward compatibility.
g.P("// Unimplemented", serverType, " can be embedded to have forward compatible implementations.")
g.P("type Unimplemented", serverType, " struct {")
g.P("}")
g.P()
for _, method := range service.Methods {
nilArg := ""
if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() {
nilArg = "nil,"
}
g.P("func (*Unimplemented", serverType, ") ", serverSignature(g, method), "{")
g.P("return ", nilArg, statusPackage.Ident("Errorf"), "(", codesPackage.Ident("Unimplemented"), `, "method `, method.GoName, ` not implemented")`)
g.P("}")
}
g.P()
// Server registration.
if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() {
g.P(deprecationComment)
}
serviceDescVar := "_" + service.GoName + "_serviceDesc"
g.P("func Register", service.GoName, "Server(s *", grpcPackage.Ident("Server"), ", srv ", serverType, ") {")
g.P("s.RegisterService(&", serviceDescVar, `, srv)`)
g.P("}")
g.P()
// Server handler implementations.
var handlerNames []string
for _, method := range service.Methods {
hname := genServerMethod(gen, file, g, method)
handlerNames = append(handlerNames, hname)
}
// Service descriptor.
g.P("var ", serviceDescVar, " = ", grpcPackage.Ident("ServiceDesc"), " {")
g.P("ServiceName: ", strconv.Quote(string(service.Desc.FullName())), ",")
g.P("HandlerType: (*", serverType, ")(nil),")
g.P("Methods: []", grpcPackage.Ident("MethodDesc"), "{")
for i, method := range service.Methods {
if method.Desc.IsStreamingClient() || method.Desc.IsStreamingServer() {
continue
}
g.P("{")
g.P("MethodName: ", strconv.Quote(string(method.Desc.Name())), ",")
g.P("Handler: ", handlerNames[i], ",")
g.P("},")
}
g.P("},")
g.P("Streams: []", grpcPackage.Ident("StreamDesc"), "{")
for i, method := range service.Methods {
if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() {
continue
}
g.P("{")
g.P("StreamName: ", strconv.Quote(string(method.Desc.Name())), ",")
g.P("Handler: ", handlerNames[i], ",")
if method.Desc.IsStreamingServer() {
g.P("ServerStreams: true,")
}
if method.Desc.IsStreamingClient() {
g.P("ClientStreams: true,")
}
g.P("},")
}
g.P("},")
g.P("Metadata: \"", file.Desc.Path(), "\",")
g.P("}")
g.P()
}
func clientSignature(g *protogen.GeneratedFile, method *protogen.Method) string {
s := method.GoName + "(ctx " + g.QualifiedGoIdent(contextPackage.Ident("Context"))
if !method.Desc.IsStreamingClient() {
s += ", in *" + g.QualifiedGoIdent(method.Input.GoIdent)
}
s += ", opts ..." + g.QualifiedGoIdent(grpcPackage.Ident("CallOption")) + ") ("
if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() {
s += "*" + g.QualifiedGoIdent(method.Output.GoIdent)
} else {
s += method.Parent.GoName + "_" + method.GoName + "Client"
}
s += ", error)"
return s
}
func genClientMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, method *protogen.Method, index int) {
service := method.Parent
sname := fmt.Sprintf("/%s/%s", service.Desc.FullName(), method.Desc.Name())
if method.Desc.Options().(*descriptorpb.MethodOptions).GetDeprecated() {
g.P(deprecationComment)
}
g.P("func (c *", unexport(service.GoName), "Client) ", clientSignature(g, method), "{")
if !method.Desc.IsStreamingServer() && !method.Desc.IsStreamingClient() {
g.P("out := new(", method.Output.GoIdent, ")")
g.P(`err := c.cc.Invoke(ctx, "`, sname, `", in, out, opts...)`)
g.P("if err != nil { return nil, err }")
g.P("return out, nil")
g.P("}")
g.P()
return
}
streamType := unexport(service.GoName) + method.GoName + "Client"
serviceDescVar := "_" + service.GoName + "_serviceDesc"
g.P("stream, err := c.cc.NewStream(ctx, &", serviceDescVar, ".Streams[", index, `], "`, sname, `", opts...)`)
g.P("if err != nil { return nil, err }")
g.P("x := &", streamType, "{stream}")
if !method.Desc.IsStreamingClient() {
g.P("if err := x.ClientStream.SendMsg(in); err != nil { return nil, err }")
g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }")
}
g.P("return x, nil")
g.P("}")
g.P()
genSend := method.Desc.IsStreamingClient()
genRecv := method.Desc.IsStreamingServer()
genCloseAndRecv := !method.Desc.IsStreamingServer()
// Stream auxiliary types and methods.
g.P("type ", service.GoName, "_", method.GoName, "Client interface {")
if genSend {
g.P("Send(*", method.Input.GoIdent, ") error")
}
if genRecv {
g.P("Recv() (*", method.Output.GoIdent, ", error)")
}
if genCloseAndRecv {
g.P("CloseAndRecv() (*", method.Output.GoIdent, ", error)")
}
g.P(grpcPackage.Ident("ClientStream"))
g.P("}")
g.P()
g.P("type ", streamType, " struct {")
g.P(grpcPackage.Ident("ClientStream"))
g.P("}")
g.P()
if genSend {
g.P("func (x *", streamType, ") Send(m *", method.Input.GoIdent, ") error {")
g.P("return x.ClientStream.SendMsg(m)")
g.P("}")
g.P()
}
if genRecv {
g.P("func (x *", streamType, ") Recv() (*", method.Output.GoIdent, ", error) {")
g.P("m := new(", method.Output.GoIdent, ")")
g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }")
g.P("return m, nil")
g.P("}")
g.P()
}
if genCloseAndRecv {
g.P("func (x *", streamType, ") CloseAndRecv() (*", method.Output.GoIdent, ", error) {")
g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }")
g.P("m := new(", method.Output.GoIdent, ")")
g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }")
g.P("return m, nil")
g.P("}")
g.P()
}
}
func serverSignature(g *protogen.GeneratedFile, method *protogen.Method) string {
var reqArgs []string
ret := "error"
if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() {
reqArgs = append(reqArgs, g.QualifiedGoIdent(contextPackage.Ident("Context")))
ret = "(*" + g.QualifiedGoIdent(method.Output.GoIdent) + ", error)"
}
if !method.Desc.IsStreamingClient() {
reqArgs = append(reqArgs, "*"+g.QualifiedGoIdent(method.Input.GoIdent))
}
if method.Desc.IsStreamingClient() || method.Desc.IsStreamingServer() {
reqArgs = append(reqArgs, method.Parent.GoName+"_"+method.GoName+"Server")
}
return method.GoName + "(" + strings.Join(reqArgs, ", ") + ") " + ret
}
func genServerMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, method *protogen.Method) string {
service := method.Parent
hname := fmt.Sprintf("_%s_%s_Handler", service.GoName, method.GoName)
if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() {
g.P("func ", hname, "(srv interface{}, ctx ", contextPackage.Ident("Context"), ", dec func(interface{}) error, interceptor ", grpcPackage.Ident("UnaryServerInterceptor"), ") (interface{}, error) {")
g.P("in := new(", method.Input.GoIdent, ")")
g.P("if err := dec(in); err != nil { return nil, err }")
g.P("if interceptor == nil { return srv.(", service.GoName, "Server).", method.GoName, "(ctx, in) }")
g.P("info := &", grpcPackage.Ident("UnaryServerInfo"), "{")
g.P("Server: srv,")
g.P("FullMethod: ", strconv.Quote(fmt.Sprintf("/%s/%s", service.Desc.FullName(), method.GoName)), ",")
g.P("}")
g.P("handler := func(ctx ", contextPackage.Ident("Context"), ", req interface{}) (interface{}, error) {")
g.P("return srv.(", service.GoName, "Server).", method.GoName, "(ctx, req.(*", method.Input.GoIdent, "))")
g.P("}")
g.P("return interceptor(ctx, in, info, handler)")
g.P("}")
g.P()
return hname
}
streamType := unexport(service.GoName) + method.GoName + "Server"
g.P("func ", hname, "(srv interface{}, stream ", grpcPackage.Ident("ServerStream"), ") error {")
if !method.Desc.IsStreamingClient() {
g.P("m := new(", method.Input.GoIdent, ")")
g.P("if err := stream.RecvMsg(m); err != nil { return err }")
g.P("return srv.(", service.GoName, "Server).", method.GoName, "(m, &", streamType, "{stream})")
} else {
g.P("return srv.(", service.GoName, "Server).", method.GoName, "(&", streamType, "{stream})")
}
g.P("}")
g.P()
genSend := method.Desc.IsStreamingServer()
genSendAndClose := !method.Desc.IsStreamingServer()
genRecv := method.Desc.IsStreamingClient()
// Stream auxiliary types and methods.
g.P("type ", service.GoName, "_", method.GoName, "Server interface {")
if genSend {
g.P("Send(*", method.Output.GoIdent, ") error")
}
if genSendAndClose {
g.P("SendAndClose(*", method.Output.GoIdent, ") error")
}
if genRecv {
g.P("Recv() (*", method.Input.GoIdent, ", error)")
}
g.P(grpcPackage.Ident("ServerStream"))
g.P("}")
g.P()
g.P("type ", streamType, " struct {")
g.P(grpcPackage.Ident("ServerStream"))
g.P("}")
g.P()
if genSend {
g.P("func (x *", streamType, ") Send(m *", method.Output.GoIdent, ") error {")
g.P("return x.ServerStream.SendMsg(m)")
g.P("}")
g.P()
}
if genSendAndClose {
g.P("func (x *", streamType, ") SendAndClose(m *", method.Output.GoIdent, ") error {")
g.P("return x.ServerStream.SendMsg(m)")
g.P("}")
g.P()
}
if genRecv {
g.P("func (x *", streamType, ") Recv() (*", method.Input.GoIdent, ", error) {")
g.P("m := new(", method.Input.GoIdent, ")")
g.P("if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err }")
g.P("return m, nil")
g.P("}")
g.P()
}
return hname
}
const deprecationComment = "// Deprecated: Do not use."
func unexport(s string) string { return strings.ToLower(s[:1]) + s[1:] }

324
vendor/github.com/golang/protobuf/proto/buffer.go generated vendored Normal file
View file

@ -0,0 +1,324 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"errors"
"fmt"
"google.golang.org/protobuf/encoding/prototext"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/runtime/protoimpl"
)
const (
WireVarint = 0
WireFixed32 = 5
WireFixed64 = 1
WireBytes = 2
WireStartGroup = 3
WireEndGroup = 4
)
// EncodeVarint returns the varint encoded bytes of v.
func EncodeVarint(v uint64) []byte {
return protowire.AppendVarint(nil, v)
}
// SizeVarint returns the length of the varint encoded bytes of v.
// This is equal to len(EncodeVarint(v)).
func SizeVarint(v uint64) int {
return protowire.SizeVarint(v)
}
// DecodeVarint parses a varint encoded integer from b, returning the
// integer value and the length of the varint.
// It returns (0, 0) if there is a parse error.
func DecodeVarint(b []byte) (uint64, int) {
v, n := protowire.ConsumeVarint(b)
if n < 0 {
return 0, 0
}
return v, n
}
// Buffer is a buffer for encoding and decoding the protobuf wire format.
// It may be reused between invocations to reduce memory usage.
type Buffer struct {
buf []byte
idx int
deterministic bool
}
// NewBuffer allocates a new Buffer initialized with buf,
// where the contents of buf are considered the unread portion of the buffer.
func NewBuffer(buf []byte) *Buffer {
return &Buffer{buf: buf}
}
// SetDeterministic specifies whether to use deterministic serialization.
//
// Deterministic serialization guarantees that for a given binary, equal
// messages will always be serialized to the same bytes. This implies:
//
// - Repeated serialization of a message will return the same bytes.
// - Different processes of the same binary (which may be executing on
// different machines) will serialize equal messages to the same bytes.
//
// Note that the deterministic serialization is NOT canonical across
// languages. It is not guaranteed to remain stable over time. It is unstable
// across different builds with schema changes due to unknown fields.
// Users who need canonical serialization (e.g., persistent storage in a
// canonical form, fingerprinting, etc.) should define their own
// canonicalization specification and implement their own serializer rather
// than relying on this API.
//
// If deterministic serialization is requested, map entries will be sorted
// by keys in lexographical order. This is an implementation detail and
// subject to change.
func (b *Buffer) SetDeterministic(deterministic bool) {
b.deterministic = deterministic
}
// SetBuf sets buf as the internal buffer,
// where the contents of buf are considered the unread portion of the buffer.
func (b *Buffer) SetBuf(buf []byte) {
b.buf = buf
b.idx = 0
}
// Reset clears the internal buffer of all written and unread data.
func (b *Buffer) Reset() {
b.buf = b.buf[:0]
b.idx = 0
}
// Bytes returns the internal buffer.
func (b *Buffer) Bytes() []byte {
return b.buf
}
// Unread returns the unread portion of the buffer.
func (b *Buffer) Unread() []byte {
return b.buf[b.idx:]
}
// Marshal appends the wire-format encoding of m to the buffer.
func (b *Buffer) Marshal(m Message) error {
var err error
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
return err
}
// Unmarshal parses the wire-format message in the buffer and places the decoded results in m.
//
// Unlike proto.Unmarshal, this does not reset the message before starting to unmarshal.
func (b *Buffer) Unmarshal(m Message) error {
err := UnmarshalMerge(b.Unread(), m)
b.idx = len(b.buf)
return err
}
type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
func (m *unknownFields) String() string { panic("not implemented") }
func (m *unknownFields) Reset() { panic("not implemented") }
func (m *unknownFields) ProtoMessage() { panic("not implemented") }
// DebugPrint dumps the encoded bytes of b with a header and footer including s
// to stdout. This is only intended for debugging.
func (*Buffer) DebugPrint(s string, b []byte) {
m := MessageReflect(new(unknownFields))
m.SetUnknown(b)
b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
}
// EncodeVarint appends an unsigned varint encoding to the buffer.
func (b *Buffer) EncodeVarint(v uint64) error {
b.buf = protowire.AppendVarint(b.buf, v)
return nil
}
// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
func (b *Buffer) EncodeZigzag32(v uint64) error {
return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
}
// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
func (b *Buffer) EncodeZigzag64(v uint64) error {
return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
}
// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
func (b *Buffer) EncodeFixed32(v uint64) error {
b.buf = protowire.AppendFixed32(b.buf, uint32(v))
return nil
}
// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
func (b *Buffer) EncodeFixed64(v uint64) error {
b.buf = protowire.AppendFixed64(b.buf, uint64(v))
return nil
}
// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
func (b *Buffer) EncodeRawBytes(v []byte) error {
b.buf = protowire.AppendBytes(b.buf, v)
return nil
}
// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
// It does not validate whether v contains valid UTF-8.
func (b *Buffer) EncodeStringBytes(v string) error {
b.buf = protowire.AppendString(b.buf, v)
return nil
}
// EncodeMessage appends a length-prefixed encoded message to the buffer.
func (b *Buffer) EncodeMessage(m Message) error {
var err error
b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
return err
}
// DecodeVarint consumes an encoded unsigned varint from the buffer.
func (b *Buffer) DecodeVarint() (uint64, error) {
v, n := protowire.ConsumeVarint(b.buf[b.idx:])
if n < 0 {
return 0, protowire.ParseError(n)
}
b.idx += n
return uint64(v), nil
}
// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
func (b *Buffer) DecodeZigzag32() (uint64, error) {
v, err := b.DecodeVarint()
if err != nil {
return 0, err
}
return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
}
// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
func (b *Buffer) DecodeZigzag64() (uint64, error) {
v, err := b.DecodeVarint()
if err != nil {
return 0, err
}
return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
}
// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
func (b *Buffer) DecodeFixed32() (uint64, error) {
v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
if n < 0 {
return 0, protowire.ParseError(n)
}
b.idx += n
return uint64(v), nil
}
// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
func (b *Buffer) DecodeFixed64() (uint64, error) {
v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
if n < 0 {
return 0, protowire.ParseError(n)
}
b.idx += n
return uint64(v), nil
}
// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
// If alloc is specified, it returns a copy the raw bytes
// rather than a sub-slice of the buffer.
func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
v, n := protowire.ConsumeBytes(b.buf[b.idx:])
if n < 0 {
return nil, protowire.ParseError(n)
}
b.idx += n
if alloc {
v = append([]byte(nil), v...)
}
return v, nil
}
// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
// It does not validate whether the raw bytes contain valid UTF-8.
func (b *Buffer) DecodeStringBytes() (string, error) {
v, n := protowire.ConsumeString(b.buf[b.idx:])
if n < 0 {
return "", protowire.ParseError(n)
}
b.idx += n
return v, nil
}
// DecodeMessage consumes a length-prefixed message from the buffer.
// It does not reset m.
func (b *Buffer) DecodeMessage(m Message) error {
v, err := b.DecodeRawBytes(false)
if err != nil {
return err
}
return UnmarshalMerge(v, m)
}
// DecodeGroup consumes a message group from the buffer.
// It assumes that the start group marker has already been consumed and
// consumes all bytes until (and including the end group marker).
// It does not reset m.
func (b *Buffer) DecodeGroup(m Message) error {
v, n, err := consumeGroup(b.buf[b.idx:])
if err != nil {
return err
}
b.idx += n
return UnmarshalMerge(v, m)
}
// consumeGroup parses b until it finds an end group marker, returning
// the raw bytes of the message (excluding the end group marker) and the
// the total length of the message (including the end group marker).
func consumeGroup(b []byte) ([]byte, int, error) {
b0 := b
depth := 1 // assume this follows a start group marker
for {
_, wtyp, tagLen := protowire.ConsumeTag(b)
if tagLen < 0 {
return nil, 0, protowire.ParseError(tagLen)
}
b = b[tagLen:]
var valLen int
switch wtyp {
case protowire.VarintType:
_, valLen = protowire.ConsumeVarint(b)
case protowire.Fixed32Type:
_, valLen = protowire.ConsumeFixed32(b)
case protowire.Fixed64Type:
_, valLen = protowire.ConsumeFixed64(b)
case protowire.BytesType:
_, valLen = protowire.ConsumeBytes(b)
case protowire.StartGroupType:
depth++
case protowire.EndGroupType:
depth--
default:
return nil, 0, errors.New("proto: cannot parse reserved wire type")
}
if valLen < 0 {
return nil, 0, protowire.ParseError(valLen)
}
b = b[valLen:]
if depth == 0 {
return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
}
}
}

View file

@ -1,253 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2011 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Protocol buffer deep copy and merge.
// TODO: RawMessage.
package proto
import (
"fmt"
"log"
"reflect"
"strings"
)
// Clone returns a deep copy of a protocol buffer.
func Clone(src Message) Message {
in := reflect.ValueOf(src)
if in.IsNil() {
return src
}
out := reflect.New(in.Type().Elem())
dst := out.Interface().(Message)
Merge(dst, src)
return dst
}
// Merger is the interface representing objects that can merge messages of the same type.
type Merger interface {
// Merge merges src into this message.
// Required and optional fields that are set in src will be set to that value in dst.
// Elements of repeated fields will be appended.
//
// Merge may panic if called with a different argument type than the receiver.
Merge(src Message)
}
// generatedMerger is the custom merge method that generated protos will have.
// We must add this method since a generate Merge method will conflict with
// many existing protos that have a Merge data field already defined.
type generatedMerger interface {
XXX_Merge(src Message)
}
// Merge merges src into dst.
// Required and optional fields that are set in src will be set to that value in dst.
// Elements of repeated fields will be appended.
// Merge panics if src and dst are not the same type, or if dst is nil.
func Merge(dst, src Message) {
if m, ok := dst.(Merger); ok {
m.Merge(src)
return
}
in := reflect.ValueOf(src)
out := reflect.ValueOf(dst)
if out.IsNil() {
panic("proto: nil destination")
}
if in.Type() != out.Type() {
panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
}
if in.IsNil() {
return // Merge from nil src is a noop
}
if m, ok := dst.(generatedMerger); ok {
m.XXX_Merge(src)
return
}
mergeStruct(out.Elem(), in.Elem())
}
func mergeStruct(out, in reflect.Value) {
sprop := GetProperties(in.Type())
for i := 0; i < in.NumField(); i++ {
f := in.Type().Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
}
if emIn, err := extendable(in.Addr().Interface()); err == nil {
emOut, _ := extendable(out.Addr().Interface())
mIn, muIn := emIn.extensionsRead()
if mIn != nil {
mOut := emOut.extensionsWrite()
muIn.Lock()
mergeExtension(mOut, mIn)
muIn.Unlock()
}
}
uf := in.FieldByName("XXX_unrecognized")
if !uf.IsValid() {
return
}
uin := uf.Bytes()
if len(uin) > 0 {
out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
}
}
// mergeAny performs a merge between two values of the same type.
// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
// prop is set if this is a struct field (it may be nil).
func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
if in.Type() == protoMessageType {
if !in.IsNil() {
if out.IsNil() {
out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
} else {
Merge(out.Interface().(Message), in.Interface().(Message))
}
}
return
}
switch in.Kind() {
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
reflect.String, reflect.Uint32, reflect.Uint64:
if !viaPtr && isProto3Zero(in) {
return
}
out.Set(in)
case reflect.Interface:
// Probably a oneof field; copy non-nil values.
if in.IsNil() {
return
}
// Allocate destination if it is not set, or set to a different type.
// Otherwise we will merge as normal.
if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
}
mergeAny(out.Elem(), in.Elem(), false, nil)
case reflect.Map:
if in.Len() == 0 {
return
}
if out.IsNil() {
out.Set(reflect.MakeMap(in.Type()))
}
// For maps with value types of *T or []byte we need to deep copy each value.
elemKind := in.Type().Elem().Kind()
for _, key := range in.MapKeys() {
var val reflect.Value
switch elemKind {
case reflect.Ptr:
val = reflect.New(in.Type().Elem().Elem())
mergeAny(val, in.MapIndex(key), false, nil)
case reflect.Slice:
val = in.MapIndex(key)
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
default:
val = in.MapIndex(key)
}
out.SetMapIndex(key, val)
}
case reflect.Ptr:
if in.IsNil() {
return
}
if out.IsNil() {
out.Set(reflect.New(in.Elem().Type()))
}
mergeAny(out.Elem(), in.Elem(), true, nil)
case reflect.Slice:
if in.IsNil() {
return
}
if in.Type().Elem().Kind() == reflect.Uint8 {
// []byte is a scalar bytes field, not a repeated field.
// Edge case: if this is in a proto3 message, a zero length
// bytes field is considered the zero value, and should not
// be merged.
if prop != nil && prop.proto3 && in.Len() == 0 {
return
}
// Make a deep copy.
// Append to []byte{} instead of []byte(nil) so that we never end up
// with a nil result.
out.SetBytes(append([]byte{}, in.Bytes()...))
return
}
n := in.Len()
if out.IsNil() {
out.Set(reflect.MakeSlice(in.Type(), 0, n))
}
switch in.Type().Elem().Kind() {
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
reflect.String, reflect.Uint32, reflect.Uint64:
out.Set(reflect.AppendSlice(out, in))
default:
for i := 0; i < n; i++ {
x := reflect.Indirect(reflect.New(in.Type().Elem()))
mergeAny(x, in.Index(i), false, nil)
out.Set(reflect.Append(out, x))
}
}
case reflect.Struct:
mergeStruct(out, in)
default:
// unknown type, so not a protocol buffer
log.Printf("proto: don't know how to copy %v", in)
}
}
func mergeExtension(out, in map[int32]Extension) {
for extNum, eIn := range in {
eOut := Extension{desc: eIn.desc}
if eIn.value != nil {
v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
eOut.value = v.Interface()
}
if eIn.enc != nil {
eOut.enc = make([]byte, len(eIn.enc))
copy(eOut.enc, eIn.enc)
}
out[extNum] = eOut
}
}

View file

@ -1,427 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Routines for decoding protocol buffer data to construct in-memory representations.
*/
import (
"errors"
"fmt"
"io"
)
// errOverflow is returned when an integer is too large to be represented.
var errOverflow = errors.New("proto: integer overflow")
// ErrInternalBadWireType is returned by generated code when an incorrect
// wire type is encountered. It does not get returned to user code.
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
// DecodeVarint reads a varint-encoded integer from the slice.
// It returns the integer and the number of bytes consumed, or
// zero if there is not enough.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func DecodeVarint(buf []byte) (x uint64, n int) {
for shift := uint(0); shift < 64; shift += 7 {
if n >= len(buf) {
return 0, 0
}
b := uint64(buf[n])
n++
x |= (b & 0x7F) << shift
if (b & 0x80) == 0 {
return x, n
}
}
// The number is too large to represent in a 64-bit value.
return 0, 0
}
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
i := p.index
l := len(p.buf)
for shift := uint(0); shift < 64; shift += 7 {
if i >= l {
err = io.ErrUnexpectedEOF
return
}
b := p.buf[i]
i++
x |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
p.index = i
return
}
}
// The number is too large to represent in a 64-bit value.
err = errOverflow
return
}
// DecodeVarint reads a varint-encoded integer from the Buffer.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func (p *Buffer) DecodeVarint() (x uint64, err error) {
i := p.index
buf := p.buf
if i >= len(buf) {
return 0, io.ErrUnexpectedEOF
} else if buf[i] < 0x80 {
p.index++
return uint64(buf[i]), nil
} else if len(buf)-i < 10 {
return p.decodeVarintSlow()
}
var b uint64
// we already checked the first byte
x = uint64(buf[i]) - 0x80
i++
b = uint64(buf[i])
i++
x += b << 7
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 7
b = uint64(buf[i])
i++
x += b << 14
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 14
b = uint64(buf[i])
i++
x += b << 21
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 21
b = uint64(buf[i])
i++
x += b << 28
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 28
b = uint64(buf[i])
i++
x += b << 35
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 35
b = uint64(buf[i])
i++
x += b << 42
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 42
b = uint64(buf[i])
i++
x += b << 49
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 49
b = uint64(buf[i])
i++
x += b << 56
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 56
b = uint64(buf[i])
i++
x += b << 63
if b&0x80 == 0 {
goto done
}
return 0, errOverflow
done:
p.index = i
return x, nil
}
// DecodeFixed64 reads a 64-bit integer from the Buffer.
// This is the format for the
// fixed64, sfixed64, and double protocol buffer types.
func (p *Buffer) DecodeFixed64() (x uint64, err error) {
// x, err already 0
i := p.index + 8
if i < 0 || i > len(p.buf) {
err = io.ErrUnexpectedEOF
return
}
p.index = i
x = uint64(p.buf[i-8])
x |= uint64(p.buf[i-7]) << 8
x |= uint64(p.buf[i-6]) << 16
x |= uint64(p.buf[i-5]) << 24
x |= uint64(p.buf[i-4]) << 32
x |= uint64(p.buf[i-3]) << 40
x |= uint64(p.buf[i-2]) << 48
x |= uint64(p.buf[i-1]) << 56
return
}
// DecodeFixed32 reads a 32-bit integer from the Buffer.
// This is the format for the
// fixed32, sfixed32, and float protocol buffer types.
func (p *Buffer) DecodeFixed32() (x uint64, err error) {
// x, err already 0
i := p.index + 4
if i < 0 || i > len(p.buf) {
err = io.ErrUnexpectedEOF
return
}
p.index = i
x = uint64(p.buf[i-4])
x |= uint64(p.buf[i-3]) << 8
x |= uint64(p.buf[i-2]) << 16
x |= uint64(p.buf[i-1]) << 24
return
}
// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
// from the Buffer.
// This is the format used for the sint64 protocol buffer type.
func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
x, err = p.DecodeVarint()
if err != nil {
return
}
x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
return
}
// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
// from the Buffer.
// This is the format used for the sint32 protocol buffer type.
func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
x, err = p.DecodeVarint()
if err != nil {
return
}
x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
return
}
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
// This is the format used for the bytes protocol buffer
// type and for embedded messages.
func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
n, err := p.DecodeVarint()
if err != nil {
return nil, err
}
nb := int(n)
if nb < 0 {
return nil, fmt.Errorf("proto: bad byte length %d", nb)
}
end := p.index + nb
if end < p.index || end > len(p.buf) {
return nil, io.ErrUnexpectedEOF
}
if !alloc {
// todo: check if can get more uses of alloc=false
buf = p.buf[p.index:end]
p.index += nb
return
}
buf = make([]byte, nb)
copy(buf, p.buf[p.index:])
p.index += nb
return
}
// DecodeStringBytes reads an encoded string from the Buffer.
// This is the format used for the proto2 string type.
func (p *Buffer) DecodeStringBytes() (s string, err error) {
buf, err := p.DecodeRawBytes(false)
if err != nil {
return
}
return string(buf), nil
}
// Unmarshaler is the interface representing objects that can
// unmarshal themselves. The argument points to data that may be
// overwritten, so implementations should not keep references to the
// buffer.
// Unmarshal implementations should not clear the receiver.
// Any unmarshaled data should be merged into the receiver.
// Callers of Unmarshal that do not want to retain existing data
// should Reset the receiver before calling Unmarshal.
type Unmarshaler interface {
Unmarshal([]byte) error
}
// newUnmarshaler is the interface representing objects that can
// unmarshal themselves. The semantics are identical to Unmarshaler.
//
// This exists to support protoc-gen-go generated messages.
// The proto package will stop type-asserting to this interface in the future.
//
// DO NOT DEPEND ON THIS.
type newUnmarshaler interface {
XXX_Unmarshal([]byte) error
}
// Unmarshal parses the protocol buffer representation in buf and places the
// decoded result in pb. If the struct underlying pb does not match
// the data in buf, the results can be unpredictable.
//
// Unmarshal resets pb before starting to unmarshal, so any
// existing data in pb is always removed. Use UnmarshalMerge
// to preserve and append to existing data.
func Unmarshal(buf []byte, pb Message) error {
pb.Reset()
if u, ok := pb.(newUnmarshaler); ok {
return u.XXX_Unmarshal(buf)
}
if u, ok := pb.(Unmarshaler); ok {
return u.Unmarshal(buf)
}
return NewBuffer(buf).Unmarshal(pb)
}
// UnmarshalMerge parses the protocol buffer representation in buf and
// writes the decoded result to pb. If the struct underlying pb does not match
// the data in buf, the results can be unpredictable.
//
// UnmarshalMerge merges into existing data in pb.
// Most code should use Unmarshal instead.
func UnmarshalMerge(buf []byte, pb Message) error {
if u, ok := pb.(newUnmarshaler); ok {
return u.XXX_Unmarshal(buf)
}
if u, ok := pb.(Unmarshaler); ok {
// NOTE: The history of proto have unfortunately been inconsistent
// whether Unmarshaler should or should not implicitly clear itself.
// Some implementations do, most do not.
// Thus, calling this here may or may not do what people want.
//
// See https://github.com/golang/protobuf/issues/424
return u.Unmarshal(buf)
}
return NewBuffer(buf).Unmarshal(pb)
}
// DecodeMessage reads a count-delimited message from the Buffer.
func (p *Buffer) DecodeMessage(pb Message) error {
enc, err := p.DecodeRawBytes(false)
if err != nil {
return err
}
return NewBuffer(enc).Unmarshal(pb)
}
// DecodeGroup reads a tag-delimited group from the Buffer.
// StartGroup tag is already consumed. This function consumes
// EndGroup tag.
func (p *Buffer) DecodeGroup(pb Message) error {
b := p.buf[p.index:]
x, y := findEndGroup(b)
if x < 0 {
return io.ErrUnexpectedEOF
}
err := Unmarshal(b[:x], pb)
p.index += y
return err
}
// Unmarshal parses the protocol buffer representation in the
// Buffer and places the decoded result in pb. If the struct
// underlying pb does not match the data in the buffer, the results can be
// unpredictable.
//
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
func (p *Buffer) Unmarshal(pb Message) error {
// If the object can unmarshal itself, let it.
if u, ok := pb.(newUnmarshaler); ok {
err := u.XXX_Unmarshal(p.buf[p.index:])
p.index = len(p.buf)
return err
}
if u, ok := pb.(Unmarshaler); ok {
// NOTE: The history of proto have unfortunately been inconsistent
// whether Unmarshaler should or should not implicitly clear itself.
// Some implementations do, most do not.
// Thus, calling this here may or may not do what people want.
//
// See https://github.com/golang/protobuf/issues/424
err := u.Unmarshal(p.buf[p.index:])
p.index = len(p.buf)
return err
}
// Slow workaround for messages that aren't Unmarshalers.
// This includes some hand-coded .pb.go files and
// bootstrap protos.
// TODO: fix all of those and then add Unmarshal to
// the Message interface. Then:
// The cast above and code below can be deleted.
// The old unmarshaler can be deleted.
// Clients can call Unmarshal directly (can already do that, actually).
var info InternalMessageInfo
err := info.Unmarshal(pb, p.buf[p.index:])
p.index = len(p.buf)
return err
}

63
vendor/github.com/golang/protobuf/proto/defaults.go generated vendored Normal file
View file

@ -0,0 +1,63 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"google.golang.org/protobuf/reflect/protoreflect"
)
// SetDefaults sets unpopulated scalar fields to their default values.
// Fields within a oneof are not set even if they have a default value.
// SetDefaults is recursively called upon any populated message fields.
func SetDefaults(m Message) {
if m != nil {
setDefaults(MessageReflect(m))
}
}
func setDefaults(m protoreflect.Message) {
fds := m.Descriptor().Fields()
for i := 0; i < fds.Len(); i++ {
fd := fds.Get(i)
if !m.Has(fd) {
if fd.HasDefault() && fd.ContainingOneof() == nil {
v := fd.Default()
if fd.Kind() == protoreflect.BytesKind {
v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
}
m.Set(fd, v)
}
continue
}
}
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
switch {
// Handle singular message.
case fd.Cardinality() != protoreflect.Repeated:
if fd.Message() != nil {
setDefaults(m.Get(fd).Message())
}
// Handle list of messages.
case fd.IsList():
if fd.Message() != nil {
ls := m.Get(fd).List()
for i := 0; i < ls.Len(); i++ {
setDefaults(ls.Get(i).Message())
}
}
// Handle map of messages.
case fd.IsMap():
if fd.MapValue().Message() != nil {
ms := m.Get(fd).Map()
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
setDefaults(v.Message())
return true
})
}
}
return true
})
}

View file

@ -1,63 +1,92 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2018 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import "errors"
import (
"encoding/json"
"errors"
"fmt"
"strconv"
)
// Deprecated: do not use.
var (
// Deprecated: No longer returned.
ErrNil = errors.New("proto: Marshal called with nil")
// Deprecated: No longer returned.
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
// Deprecated: No longer returned.
ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
)
// Deprecated: Do not use.
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
// Deprecated: do not use.
// Deprecated: Do not use.
func GetStats() Stats { return Stats{} }
// Deprecated: do not use.
// Deprecated: Do not use.
func MarshalMessageSet(interface{}) ([]byte, error) {
return nil, errors.New("proto: not implemented")
}
// Deprecated: do not use.
// Deprecated: Do not use.
func UnmarshalMessageSet([]byte, interface{}) error {
return errors.New("proto: not implemented")
}
// Deprecated: do not use.
// Deprecated: Do not use.
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
return nil, errors.New("proto: not implemented")
}
// Deprecated: do not use.
// Deprecated: Do not use.
func UnmarshalMessageSetJSON([]byte, interface{}) error {
return errors.New("proto: not implemented")
}
// Deprecated: do not use.
// Deprecated: Do not use.
func RegisterMessageSetType(Message, int32, string) {}
// Deprecated: Do not use.
func EnumName(m map[int32]string, v int32) string {
s, ok := m[v]
if ok {
return s
}
return strconv.Itoa(int(v))
}
// Deprecated: Do not use.
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
if data[0] == '"' {
// New style: enums are strings.
var repr string
if err := json.Unmarshal(data, &repr); err != nil {
return -1, err
}
val, ok := m[repr]
if !ok {
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
}
return val, nil
}
// Old style: enums are ints.
var val int32
if err := json.Unmarshal(data, &val); err != nil {
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
}
return val, nil
}
// Deprecated: Do not use.
type InternalMessageInfo struct{}
func (*InternalMessageInfo) DiscardUnknown(Message) { panic("not implemented") }
func (*InternalMessageInfo) Marshal([]byte, Message, bool) ([]byte, error) { panic("not implemented") }
func (*InternalMessageInfo) Merge(Message, Message) { panic("not implemented") }
func (*InternalMessageInfo) Size(Message) int { panic("not implemented") }
func (*InternalMessageInfo) Unmarshal(Message, []byte) error { panic("not implemented") }

View file

@ -1,48 +1,13 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2017 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"fmt"
"reflect"
"strings"
"sync"
"sync/atomic"
"google.golang.org/protobuf/reflect/protoreflect"
)
type generatedDiscarder interface {
XXX_DiscardUnknown()
}
// DiscardUnknown recursively discards all unknown fields from this message
// and all embedded messages.
//
@ -51,300 +16,43 @@ type generatedDiscarder interface {
// marshal to be able to produce a message that continues to have those
// unrecognized fields. To avoid this, DiscardUnknown is used to
// explicitly clear the unknown fields after unmarshaling.
//
// For proto2 messages, the unknown fields of message extensions are only
// discarded from messages that have been accessed via GetExtension.
func DiscardUnknown(m Message) {
if m, ok := m.(generatedDiscarder); ok {
m.XXX_DiscardUnknown()
return
if m != nil {
discardUnknown(MessageReflect(m))
}
// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
// but the master branch has no implementation for InternalMessageInfo,
// so it would be more work to replicate that approach.
discardLegacy(m)
}
// DiscardUnknown recursively discards all unknown fields.
func (a *InternalMessageInfo) DiscardUnknown(m Message) {
di := atomicLoadDiscardInfo(&a.discard)
if di == nil {
di = getDiscardInfo(reflect.TypeOf(m).Elem())
atomicStoreDiscardInfo(&a.discard, di)
}
di.discard(toPointer(&m))
}
type discardInfo struct {
typ reflect.Type
initialized int32 // 0: only typ is valid, 1: everything is valid
lock sync.Mutex
fields []discardFieldInfo
unrecognized field
}
type discardFieldInfo struct {
field field // Offset of field, guaranteed to be valid
discard func(src pointer)
}
var (
discardInfoMap = map[reflect.Type]*discardInfo{}
discardInfoLock sync.Mutex
)
func getDiscardInfo(t reflect.Type) *discardInfo {
discardInfoLock.Lock()
defer discardInfoLock.Unlock()
di := discardInfoMap[t]
if di == nil {
di = &discardInfo{typ: t}
discardInfoMap[t] = di
}
return di
}
func (di *discardInfo) discard(src pointer) {
if src.isNil() {
return // Nothing to do.
}
if atomic.LoadInt32(&di.initialized) == 0 {
di.computeDiscardInfo()
}
for _, fi := range di.fields {
sfp := src.offset(fi.field)
fi.discard(sfp)
}
// For proto2 messages, only discard unknown fields in message extensions
// that have been accessed via GetExtension.
if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
// Ignore lock since DiscardUnknown is not concurrency safe.
emm, _ := em.extensionsRead()
for _, mx := range emm {
if m, ok := mx.value.(Message); ok {
DiscardUnknown(m)
func discardUnknown(m protoreflect.Message) {
m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
switch {
// Handle singular message.
case fd.Cardinality() != protoreflect.Repeated:
if fd.Message() != nil {
discardUnknown(m.Get(fd).Message())
}
}
}
if di.unrecognized.IsValid() {
*src.offset(di.unrecognized).toBytes() = nil
}
}
func (di *discardInfo) computeDiscardInfo() {
di.lock.Lock()
defer di.lock.Unlock()
if di.initialized != 0 {
return
}
t := di.typ
n := t.NumField()
for i := 0; i < n; i++ {
f := t.Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
dfi := discardFieldInfo{field: toField(&f)}
tf := f.Type
// Unwrap tf to get its most basic type.
var isPointer, isSlice bool
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
isSlice = true
tf = tf.Elem()
}
if tf.Kind() == reflect.Ptr {
isPointer = true
tf = tf.Elem()
}
if isPointer && isSlice && tf.Kind() != reflect.Struct {
panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
}
switch tf.Kind() {
case reflect.Struct:
switch {
case !isPointer:
panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
case isSlice: // E.g., []*pb.T
di := getDiscardInfo(tf)
dfi.discard = func(src pointer) {
sps := src.getPointerSlice()
for _, sp := range sps {
if !sp.isNil() {
di.discard(sp)
}
}
}
default: // E.g., *pb.T
di := getDiscardInfo(tf)
dfi.discard = func(src pointer) {
sp := src.getPointer()
if !sp.isNil() {
di.discard(sp)
}
// Handle list of messages.
case fd.IsList():
if fd.Message() != nil {
ls := m.Get(fd).List()
for i := 0; i < ls.Len(); i++ {
discardUnknown(ls.Get(i).Message())
}
}
case reflect.Map:
switch {
case isPointer || isSlice:
panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
default: // E.g., map[K]V
if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
dfi.discard = func(src pointer) {
sm := src.asPointerTo(tf).Elem()
if sm.Len() == 0 {
return
}
for _, key := range sm.MapKeys() {
val := sm.MapIndex(key)
DiscardUnknown(val.Interface().(Message))
}
}
} else {
dfi.discard = func(pointer) {} // Noop
}
}
case reflect.Interface:
// Must be oneof field.
switch {
case isPointer || isSlice:
panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
default: // E.g., interface{}
// TODO: Make this faster?
dfi.discard = func(src pointer) {
su := src.asPointerTo(tf).Elem()
if !su.IsNil() {
sv := su.Elem().Elem().Field(0)
if sv.Kind() == reflect.Ptr && sv.IsNil() {
return
}
switch sv.Type().Kind() {
case reflect.Ptr: // Proto struct (e.g., *T)
DiscardUnknown(sv.Interface().(Message))
}
}
}
}
default:
continue
}
di.fields = append(di.fields, dfi)
}
di.unrecognized = invalidField
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
if f.Type != reflect.TypeOf([]byte{}) {
panic("expected XXX_unrecognized to be of type []byte")
}
di.unrecognized = toField(&f)
}
atomic.StoreInt32(&di.initialized, 1)
}
func discardLegacy(m Message) {
v := reflect.ValueOf(m)
if v.Kind() != reflect.Ptr || v.IsNil() {
return
}
v = v.Elem()
if v.Kind() != reflect.Struct {
return
}
t := v.Type()
for i := 0; i < v.NumField(); i++ {
f := t.Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
vf := v.Field(i)
tf := f.Type
// Unwrap tf to get its most basic type.
var isPointer, isSlice bool
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
isSlice = true
tf = tf.Elem()
}
if tf.Kind() == reflect.Ptr {
isPointer = true
tf = tf.Elem()
}
if isPointer && isSlice && tf.Kind() != reflect.Struct {
panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
}
switch tf.Kind() {
case reflect.Struct:
switch {
case !isPointer:
panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
case isSlice: // E.g., []*pb.T
for j := 0; j < vf.Len(); j++ {
discardLegacy(vf.Index(j).Interface().(Message))
}
default: // E.g., *pb.T
discardLegacy(vf.Interface().(Message))
}
case reflect.Map:
switch {
case isPointer || isSlice:
panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
default: // E.g., map[K]V
tv := vf.Type().Elem()
if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
for _, key := range vf.MapKeys() {
val := vf.MapIndex(key)
discardLegacy(val.Interface().(Message))
}
}
}
case reflect.Interface:
// Must be oneof field.
switch {
case isPointer || isSlice:
panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
default: // E.g., test_proto.isCommunique_Union interface
if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
vf = vf.Elem() // E.g., *test_proto.Communique_Msg
if !vf.IsNil() {
vf = vf.Elem() // E.g., test_proto.Communique_Msg
vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
if vf.Kind() == reflect.Ptr {
discardLegacy(vf.Interface().(Message))
}
}
}
// Handle map of messages.
case fd.IsMap():
if fd.MapValue().Message() != nil {
ms := m.Get(fd).Map()
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
discardUnknown(v.Message())
return true
})
}
}
}
return true
})
if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
if vf.Type() != reflect.TypeOf([]byte{}) {
panic("expected XXX_unrecognized to be of type []byte")
}
vf.Set(reflect.ValueOf([]byte(nil)))
}
// For proto2 messages, only discard unknown fields in message extensions
// that have been accessed via GetExtension.
if em, err := extendable(m); err == nil {
// Ignore lock since discardLegacy is not concurrency safe.
emm, _ := em.extensionsRead()
for _, mx := range emm {
if m, ok := mx.value.(Message); ok {
discardLegacy(m)
}
}
// Discard unknown fields.
if len(m.GetUnknown()) > 0 {
m.SetUnknown(nil)
}
}

View file

@ -1,203 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Routines for encoding data into the wire format for protocol buffers.
*/
import (
"errors"
"reflect"
)
var (
// errRepeatedHasNil is the error returned if Marshal is called with
// a struct with a repeated field containing a nil element.
errRepeatedHasNil = errors.New("proto: repeated field has nil element")
// errOneofHasNil is the error returned if Marshal is called with
// a struct with a oneof field containing a nil element.
errOneofHasNil = errors.New("proto: oneof field has nil value")
// ErrNil is the error returned if Marshal is called with nil.
ErrNil = errors.New("proto: Marshal called with nil")
// ErrTooLarge is the error returned if Marshal is called with a
// message that encodes to >2GB.
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
)
// The fundamental encoders that put bytes on the wire.
// Those that take integer types all accept uint64 and are
// therefore of type valueEncoder.
const maxVarintBytes = 10 // maximum length of a varint
// EncodeVarint returns the varint encoding of x.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
// Not used by the package itself, but helpful to clients
// wishing to use the same encoding.
func EncodeVarint(x uint64) []byte {
var buf [maxVarintBytes]byte
var n int
for n = 0; x > 127; n++ {
buf[n] = 0x80 | uint8(x&0x7F)
x >>= 7
}
buf[n] = uint8(x)
n++
return buf[0:n]
}
// EncodeVarint writes a varint-encoded integer to the Buffer.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func (p *Buffer) EncodeVarint(x uint64) error {
for x >= 1<<7 {
p.buf = append(p.buf, uint8(x&0x7f|0x80))
x >>= 7
}
p.buf = append(p.buf, uint8(x))
return nil
}
// SizeVarint returns the varint encoding size of an integer.
func SizeVarint(x uint64) int {
switch {
case x < 1<<7:
return 1
case x < 1<<14:
return 2
case x < 1<<21:
return 3
case x < 1<<28:
return 4
case x < 1<<35:
return 5
case x < 1<<42:
return 6
case x < 1<<49:
return 7
case x < 1<<56:
return 8
case x < 1<<63:
return 9
}
return 10
}
// EncodeFixed64 writes a 64-bit integer to the Buffer.
// This is the format for the
// fixed64, sfixed64, and double protocol buffer types.
func (p *Buffer) EncodeFixed64(x uint64) error {
p.buf = append(p.buf,
uint8(x),
uint8(x>>8),
uint8(x>>16),
uint8(x>>24),
uint8(x>>32),
uint8(x>>40),
uint8(x>>48),
uint8(x>>56))
return nil
}
// EncodeFixed32 writes a 32-bit integer to the Buffer.
// This is the format for the
// fixed32, sfixed32, and float protocol buffer types.
func (p *Buffer) EncodeFixed32(x uint64) error {
p.buf = append(p.buf,
uint8(x),
uint8(x>>8),
uint8(x>>16),
uint8(x>>24))
return nil
}
// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
// to the Buffer.
// This is the format used for the sint64 protocol buffer type.
func (p *Buffer) EncodeZigzag64(x uint64) error {
// use signed number to get arithmetic right shift.
return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
// to the Buffer.
// This is the format used for the sint32 protocol buffer type.
func (p *Buffer) EncodeZigzag32(x uint64) error {
// use signed number to get arithmetic right shift.
return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
}
// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
// This is the format used for the bytes protocol buffer
// type and for embedded messages.
func (p *Buffer) EncodeRawBytes(b []byte) error {
p.EncodeVarint(uint64(len(b)))
p.buf = append(p.buf, b...)
return nil
}
// EncodeStringBytes writes an encoded string to the Buffer.
// This is the format used for the proto2 string type.
func (p *Buffer) EncodeStringBytes(s string) error {
p.EncodeVarint(uint64(len(s)))
p.buf = append(p.buf, s...)
return nil
}
// Marshaler is the interface representing objects that can marshal themselves.
type Marshaler interface {
Marshal() ([]byte, error)
}
// EncodeMessage writes the protocol buffer to the Buffer,
// prefixed by a varint-encoded length.
func (p *Buffer) EncodeMessage(pb Message) error {
siz := Size(pb)
p.EncodeVarint(uint64(siz))
return p.Marshal(pb)
}
// All protocol buffer fields are nillable, but be careful.
func isNil(v reflect.Value) bool {
switch v.Kind() {
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
return v.IsNil()
}
return false
}

View file

@ -1,301 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2011 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Protocol buffer comparison.
package proto
import (
"bytes"
"log"
"reflect"
"strings"
)
/*
Equal returns true iff protocol buffers a and b are equal.
The arguments must both be pointers to protocol buffer structs.
Equality is defined in this way:
- Two messages are equal iff they are the same type,
corresponding fields are equal, unknown field sets
are equal, and extensions sets are equal.
- Two set scalar fields are equal iff their values are equal.
If the fields are of a floating-point type, remember that
NaN != x for all x, including NaN. If the message is defined
in a proto3 .proto file, fields are not "set"; specifically,
zero length proto3 "bytes" fields are equal (nil == {}).
- Two repeated fields are equal iff their lengths are the same,
and their corresponding elements are equal. Note a "bytes" field,
although represented by []byte, is not a repeated field and the
rule for the scalar fields described above applies.
- Two unset fields are equal.
- Two unknown field sets are equal if their current
encoded state is equal.
- Two extension sets are equal iff they have corresponding
elements that are pairwise equal.
- Two map fields are equal iff their lengths are the same,
and they contain the same set of elements. Zero-length map
fields are equal.
- Every other combination of things are not equal.
The return value is undefined if a and b are not protocol buffers.
*/
func Equal(a, b Message) bool {
if a == nil || b == nil {
return a == b
}
v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
if v1.Type() != v2.Type() {
return false
}
if v1.Kind() == reflect.Ptr {
if v1.IsNil() {
return v2.IsNil()
}
if v2.IsNil() {
return false
}
v1, v2 = v1.Elem(), v2.Elem()
}
if v1.Kind() != reflect.Struct {
return false
}
return equalStruct(v1, v2)
}
// v1 and v2 are known to have the same type.
func equalStruct(v1, v2 reflect.Value) bool {
sprop := GetProperties(v1.Type())
for i := 0; i < v1.NumField(); i++ {
f := v1.Type().Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
f1, f2 := v1.Field(i), v2.Field(i)
if f.Type.Kind() == reflect.Ptr {
if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
// both unset
continue
} else if n1 != n2 {
// set/unset mismatch
return false
}
f1, f2 = f1.Elem(), f2.Elem()
}
if !equalAny(f1, f2, sprop.Prop[i]) {
return false
}
}
if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
em2 := v2.FieldByName("XXX_InternalExtensions")
if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
return false
}
}
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
em2 := v2.FieldByName("XXX_extensions")
if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
return false
}
}
uf := v1.FieldByName("XXX_unrecognized")
if !uf.IsValid() {
return true
}
u1 := uf.Bytes()
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
return bytes.Equal(u1, u2)
}
// v1 and v2 are known to have the same type.
// prop may be nil.
func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
if v1.Type() == protoMessageType {
m1, _ := v1.Interface().(Message)
m2, _ := v2.Interface().(Message)
return Equal(m1, m2)
}
switch v1.Kind() {
case reflect.Bool:
return v1.Bool() == v2.Bool()
case reflect.Float32, reflect.Float64:
return v1.Float() == v2.Float()
case reflect.Int32, reflect.Int64:
return v1.Int() == v2.Int()
case reflect.Interface:
// Probably a oneof field; compare the inner values.
n1, n2 := v1.IsNil(), v2.IsNil()
if n1 || n2 {
return n1 == n2
}
e1, e2 := v1.Elem(), v2.Elem()
if e1.Type() != e2.Type() {
return false
}
return equalAny(e1, e2, nil)
case reflect.Map:
if v1.Len() != v2.Len() {
return false
}
for _, key := range v1.MapKeys() {
val2 := v2.MapIndex(key)
if !val2.IsValid() {
// This key was not found in the second map.
return false
}
if !equalAny(v1.MapIndex(key), val2, nil) {
return false
}
}
return true
case reflect.Ptr:
// Maps may have nil values in them, so check for nil.
if v1.IsNil() && v2.IsNil() {
return true
}
if v1.IsNil() != v2.IsNil() {
return false
}
return equalAny(v1.Elem(), v2.Elem(), prop)
case reflect.Slice:
if v1.Type().Elem().Kind() == reflect.Uint8 {
// short circuit: []byte
// Edge case: if this is in a proto3 message, a zero length
// bytes field is considered the zero value.
if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
return true
}
if v1.IsNil() != v2.IsNil() {
return false
}
return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
}
if v1.Len() != v2.Len() {
return false
}
for i := 0; i < v1.Len(); i++ {
if !equalAny(v1.Index(i), v2.Index(i), prop) {
return false
}
}
return true
case reflect.String:
return v1.Interface().(string) == v2.Interface().(string)
case reflect.Struct:
return equalStruct(v1, v2)
case reflect.Uint32, reflect.Uint64:
return v1.Uint() == v2.Uint()
}
// unknown type, so not a protocol buffer
log.Printf("proto: don't know how to compare %v", v1)
return false
}
// base is the struct type that the extensions are based on.
// x1 and x2 are InternalExtensions.
func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
em1, _ := x1.extensionsRead()
em2, _ := x2.extensionsRead()
return equalExtMap(base, em1, em2)
}
func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
if len(em1) != len(em2) {
return false
}
for extNum, e1 := range em1 {
e2, ok := em2[extNum]
if !ok {
return false
}
m1 := extensionAsLegacyType(e1.value)
m2 := extensionAsLegacyType(e2.value)
if m1 == nil && m2 == nil {
// Both have only encoded form.
if bytes.Equal(e1.enc, e2.enc) {
continue
}
// The bytes are different, but the extensions might still be
// equal. We need to decode them to compare.
}
if m1 != nil && m2 != nil {
// Both are unencoded.
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
return false
}
continue
}
// At least one is encoded. To do a semantically correct comparison
// we need to unmarshal them first.
var desc *ExtensionDesc
if m := extensionMaps[base]; m != nil {
desc = m[extNum]
}
if desc == nil {
// If both have only encoded form and the bytes are the same,
// it is handled above. We get here when the bytes are different.
// We don't know how to decode it, so just compare them as byte
// slices.
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
return false
}
var err error
if m1 == nil {
m1, err = decodeExtension(e1.enc, desc)
}
if m2 == nil && err == nil {
m2, err = decodeExtension(e2.enc, desc)
}
if err != nil {
// The encoded form is invalid.
log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
return false
}
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
return false
}
}
return true
}

View file

@ -1,310 +1,111 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
/*
* Types and routines for supporting protocol buffer extensions.
*/
import (
"errors"
"fmt"
"io"
"reflect"
"strconv"
"sync"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/runtime/protoiface"
"google.golang.org/protobuf/runtime/protoimpl"
)
// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
type (
// ExtensionDesc represents an extension descriptor and
// is used to interact with an extension field in a message.
//
// Variables of this type are generated in code by protoc-gen-go.
ExtensionDesc = protoimpl.ExtensionInfo
// ExtensionRange represents a range of message extensions.
// Used in code generated by protoc-gen-go.
ExtensionRange = protoiface.ExtensionRangeV1
// Deprecated: Do not use; this is an internal type.
Extension = protoimpl.ExtensionFieldV1
// Deprecated: Do not use; this is an internal type.
XXX_InternalExtensions = protoimpl.ExtensionFields
)
// ErrMissingExtension reports whether the extension was not present.
var ErrMissingExtension = errors.New("proto: missing extension")
// ExtensionRange represents a range of message extensions for a protocol buffer.
// Used in code generated by the protocol compiler.
type ExtensionRange struct {
Start, End int32 // both inclusive
}
// extendableProto is an interface implemented by any protocol buffer generated by the current
// proto compiler that may be extended.
type extendableProto interface {
Message
ExtensionRangeArray() []ExtensionRange
extensionsWrite() map[int32]Extension
extensionsRead() (map[int32]Extension, sync.Locker)
}
// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
// version of the proto compiler that may be extended.
type extendableProtoV1 interface {
Message
ExtensionRangeArray() []ExtensionRange
ExtensionMap() map[int32]Extension
}
// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
type extensionAdapter struct {
extendableProtoV1
}
func (e extensionAdapter) extensionsWrite() map[int32]Extension {
return e.ExtensionMap()
}
func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
return e.ExtensionMap(), notLocker{}
}
// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
type notLocker struct{}
func (n notLocker) Lock() {}
func (n notLocker) Unlock() {}
// extendable returns the extendableProto interface for the given generated proto message.
// If the proto message has the old extension format, it returns a wrapper that implements
// the extendableProto interface.
func extendable(p interface{}) (extendableProto, error) {
switch p := p.(type) {
case extendableProto:
if isNilPtr(p) {
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
}
return p, nil
case extendableProtoV1:
if isNilPtr(p) {
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
}
return extensionAdapter{p}, nil
}
// Don't allocate a specific error containing %T:
// this is the hot path for Clone and MarshalText.
return nil, errNotExtendable
}
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
func isNilPtr(x interface{}) bool {
v := reflect.ValueOf(x)
return v.Kind() == reflect.Ptr && v.IsNil()
}
// XXX_InternalExtensions is an internal representation of proto extensions.
//
// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
//
// The methods of XXX_InternalExtensions are not concurrency safe in general,
// but calls to logically read-only methods such as has and get may be executed concurrently.
type XXX_InternalExtensions struct {
// The struct must be indirect so that if a user inadvertently copies a
// generated message and its embedded XXX_InternalExtensions, they
// avoid the mayhem of a copied mutex.
//
// The mutex serializes all logically read-only operations to p.extensionMap.
// It is up to the client to ensure that write operations to p.extensionMap are
// mutually exclusive with other accesses.
p *struct {
mu sync.Mutex
extensionMap map[int32]Extension
// HasExtension reports whether the extension field is present in m
// either as an explicitly populated field or as an unknown field.
func HasExtension(m Message, xt *ExtensionDesc) (has bool) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() {
return false
}
}
// extensionsWrite returns the extension map, creating it on first use.
func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
if e.p == nil {
e.p = new(struct {
mu sync.Mutex
extensionMap map[int32]Extension
// Check whether any populated known field matches the field number.
xtd := xt.TypeDescriptor()
if isValidExtension(mr.Descriptor(), xtd) {
has = mr.Has(xtd)
} else {
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
has = int32(fd.Number()) == xt.Field
return !has
})
e.p.extensionMap = make(map[int32]Extension)
}
return e.p.extensionMap
}
// extensionsRead returns the extensions map for read-only use. It may be nil.
// The caller must hold the returned mutex's lock when accessing Elements within the map.
func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
if e.p == nil {
return nil, nil
// Check whether any unknown field matches the field number.
for b := mr.GetUnknown(); !has && len(b) > 0; {
num, _, n := protowire.ConsumeField(b)
has = int32(num) == xt.Field
b = b[n:]
}
return e.p.extensionMap, &e.p.mu
return has
}
// ExtensionDesc represents an extension specification.
// Used in generated code from the protocol compiler.
type ExtensionDesc struct {
ExtendedType Message // nil pointer to the type that is being extended
ExtensionType interface{} // nil pointer to the extension type
Field int32 // field number
Name string // fully-qualified name of extension, for text formatting
Tag string // protobuf tag style
Filename string // name of the file in which the extension is defined
}
func (ed *ExtensionDesc) repeated() bool {
t := reflect.TypeOf(ed.ExtensionType)
return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
}
// Extension represents an extension in a message.
type Extension struct {
// When an extension is stored in a message using SetExtension
// only desc and value are set. When the message is marshaled
// enc will be set to the encoded form of the message.
//
// When a message is unmarshaled and contains extensions, each
// extension will have only enc set. When such an extension is
// accessed using GetExtension (or GetExtensions) desc and value
// will be set.
desc *ExtensionDesc
// value is a concrete value for the extension field. Let the type of
// desc.ExtensionType be the "API type" and the type of Extension.value
// be the "storage type". The API type and storage type are the same except:
// * For scalars (except []byte), the API type uses *T,
// while the storage type uses T.
// * For repeated fields, the API type uses []T, while the storage type
// uses *[]T.
//
// The reason for the divergence is so that the storage type more naturally
// matches what is expected of when retrieving the values through the
// protobuf reflection APIs.
//
// The value may only be populated if desc is also populated.
value interface{}
// enc is the raw bytes for the extension field.
enc []byte
}
// SetRawExtension is for testing only.
func SetRawExtension(base Message, id int32, b []byte) {
epb, err := extendable(base)
if err != nil {
// ClearExtension removes the the exntesion field from m
// either as an explicitly populated field or as an unknown field.
func ClearExtension(m Message, xt *ExtensionDesc) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() {
return
}
extmap := epb.extensionsWrite()
extmap[id] = Extension{enc: b}
}
// isExtensionField returns true iff the given field number is in an extension range.
func isExtensionField(pb extendableProto, field int32) bool {
for _, er := range pb.ExtensionRangeArray() {
if er.Start <= field && field <= er.End {
xtd := xt.TypeDescriptor()
if isValidExtension(mr.Descriptor(), xtd) {
mr.Clear(xtd)
} else {
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
if int32(fd.Number()) == xt.Field {
mr.Clear(fd)
return false
}
return true
}
})
}
return false
clearUnknown(mr, fieldNum(xt.Field))
}
// checkExtensionTypes checks that the given extension is valid for pb.
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
var pbi interface{} = pb
// Check the extended type.
if ea, ok := pbi.(extensionAdapter); ok {
pbi = ea.extendableProtoV1
}
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
}
// Check the range.
if !isExtensionField(pb, extension.Field) {
return errors.New("proto: bad extension number; not in declared ranges")
}
return nil
}
// extPropKey is sufficient to uniquely identify an extension.
type extPropKey struct {
base reflect.Type
field int32
}
var extProp = struct {
sync.RWMutex
m map[extPropKey]*Properties
}{
m: make(map[extPropKey]*Properties),
}
func extensionProperties(ed *ExtensionDesc) *Properties {
key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
extProp.RLock()
if prop, ok := extProp.m[key]; ok {
extProp.RUnlock()
return prop
}
extProp.RUnlock()
extProp.Lock()
defer extProp.Unlock()
// Check again.
if prop, ok := extProp.m[key]; ok {
return prop
}
prop := new(Properties)
prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
extProp.m[key] = prop
return prop
}
// HasExtension returns whether the given extension is present in pb.
func HasExtension(pb Message, extension *ExtensionDesc) bool {
// TODO: Check types, field numbers, etc.?
epb, err := extendable(pb)
if err != nil {
return false
}
extmap, mu := epb.extensionsRead()
if extmap == nil {
return false
}
mu.Lock()
_, ok := extmap[extension.Field]
mu.Unlock()
return ok
}
// ClearExtension removes the given extension from pb.
func ClearExtension(pb Message, extension *ExtensionDesc) {
epb, err := extendable(pb)
if err != nil {
// ClearAllExtensions clears all extensions from m.
// This includes populated fields and unknown fields in the extension range.
func ClearAllExtensions(m Message) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() {
return
}
// TODO: Check types, field numbers, etc.?
extmap := epb.extensionsWrite()
delete(extmap, extension.Field)
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
if fd.IsExtension() {
mr.Clear(fd)
}
return true
})
clearUnknown(mr, mr.Descriptor().ExtensionRanges())
}
// GetExtension retrieves a proto2 extended field from pb.
@ -314,294 +115,242 @@ func ClearExtension(pb Message, extension *ExtensionDesc) {
// If the field is not present, then the default value is returned (if one is specified),
// otherwise ErrMissingExtension is reported.
//
// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
// then GetExtension returns the raw encoded bytes of the field extension.
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
epb, err := extendable(pb)
if err != nil {
return nil, err
// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil),
// then GetExtension returns the raw encoded bytes for the extension field.
func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
return nil, errNotExtendable
}
if extension.ExtendedType != nil {
// can only check type if this is a complete descriptor
if err := checkExtensionTypes(epb, extension); err != nil {
// Retrieve the unknown fields for this extension field.
var bo protoreflect.RawFields
for bi := mr.GetUnknown(); len(bi) > 0; {
num, _, n := protowire.ConsumeField(bi)
if int32(num) == xt.Field {
bo = append(bo, bi[:n]...)
}
bi = bi[n:]
}
// For type incomplete descriptors, only retrieve the unknown fields.
if xt.ExtensionType == nil {
return []byte(bo), nil
}
// If the extension field only exists as unknown fields, unmarshal it.
// This is rarely done since proto.Unmarshal eagerly unmarshals extensions.
xtd := xt.TypeDescriptor()
if !isValidExtension(mr.Descriptor(), xtd) {
return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
}
if !mr.Has(xtd) && len(bo) > 0 {
m2 := mr.New()
if err := (proto.UnmarshalOptions{
Resolver: extensionResolver{xt},
}.Unmarshal(bo, m2.Interface())); err != nil {
return nil, err
}
}
emap, mu := epb.extensionsRead()
if emap == nil {
return defaultExtensionValue(extension)
}
mu.Lock()
defer mu.Unlock()
e, ok := emap[extension.Field]
if !ok {
// defaultExtensionValue returns the default value or
// ErrMissingExtension if there is no default.
return defaultExtensionValue(extension)
}
if e.value != nil {
// Already decoded. Check the descriptor, though.
if e.desc != extension {
// This shouldn't happen. If it does, it means that
// GetExtension was called twice with two different
// descriptors with the same field number.
return nil, errors.New("proto: descriptor conflict")
if m2.Has(xtd) {
mr.Set(xtd, m2.Get(xtd))
clearUnknown(mr, fieldNum(xt.Field))
}
return extensionAsLegacyType(e.value), nil
}
if extension.ExtensionType == nil {
// incomplete descriptor
return e.enc, nil
}
v, err := decodeExtension(e.enc, extension)
if err != nil {
return nil, err
}
// Remember the decoded version and drop the encoded version.
// That way it is safe to mutate what we return.
e.value = extensionAsStorageType(v)
e.desc = extension
e.enc = nil
emap[extension.Field] = e
return extensionAsLegacyType(e.value), nil
}
// defaultExtensionValue returns the default value for extension.
// If no default for an extension is defined ErrMissingExtension is returned.
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
if extension.ExtensionType == nil {
// incomplete descriptor, so no default
// Check whether the message has the extension field set or a default.
var pv protoreflect.Value
switch {
case mr.Has(xtd):
pv = mr.Get(xtd)
case xtd.HasDefault():
pv = xtd.Default()
default:
return nil, ErrMissingExtension
}
t := reflect.TypeOf(extension.ExtensionType)
props := extensionProperties(extension)
sf, _, err := fieldDefault(t, props)
if err != nil {
return nil, err
}
if sf == nil || sf.value == nil {
// There is no default value.
return nil, ErrMissingExtension
}
if t.Kind() != reflect.Ptr {
// We do not need to return a Ptr, we can directly return sf.value.
return sf.value, nil
}
// We need to return an interface{} that is a pointer to sf.value.
value := reflect.New(t).Elem()
value.Set(reflect.New(value.Type().Elem()))
if sf.kind == reflect.Int32 {
// We may have an int32 or an enum, but the underlying data is int32.
// Since we can't set an int32 into a non int32 reflect.value directly
// set it as a int32.
value.Elem().SetInt(int64(sf.value.(int32)))
} else {
value.Elem().Set(reflect.ValueOf(sf.value))
}
return value.Interface(), nil
}
// decodeExtension decodes an extension encoded in b.
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
t := reflect.TypeOf(extension.ExtensionType)
unmarshal := typeUnmarshaler(t, extension.Tag)
// t is a pointer to a struct, pointer to basic type or a slice.
// Allocate space to store the pointer/slice.
value := reflect.New(t).Elem()
var err error
for {
x, n := decodeVarint(b)
if n == 0 {
return nil, io.ErrUnexpectedEOF
}
b = b[n:]
wire := int(x) & 7
b, err = unmarshal(b, valToPointer(value.Addr()), wire)
if err != nil {
return nil, err
}
if len(b) == 0 {
break
}
}
return value.Interface(), nil
}
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
// The returned slice has the same length as es; missing extensions will appear as nil elements.
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
epb, err := extendable(pb)
if err != nil {
return nil, err
}
extensions = make([]interface{}, len(es))
for i, e := range es {
extensions[i], err = GetExtension(epb, e)
if err == ErrMissingExtension {
err = nil
}
if err != nil {
return
}
}
return
}
// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
// just the Field field, which defines the extension's field number.
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
epb, err := extendable(pb)
if err != nil {
return nil, err
}
registeredExtensions := RegisteredExtensions(pb)
emap, mu := epb.extensionsRead()
if emap == nil {
return nil, nil
}
mu.Lock()
defer mu.Unlock()
extensions := make([]*ExtensionDesc, 0, len(emap))
for extid, e := range emap {
desc := e.desc
if desc == nil {
desc = registeredExtensions[extid]
if desc == nil {
desc = &ExtensionDesc{Field: extid}
}
}
extensions = append(extensions, desc)
}
return extensions, nil
}
// SetExtension sets the specified extension of pb to the specified value.
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
epb, err := extendable(pb)
if err != nil {
return err
}
if err := checkExtensionTypes(epb, extension); err != nil {
return err
}
typ := reflect.TypeOf(extension.ExtensionType)
if typ != reflect.TypeOf(value) {
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
}
// nil extension values need to be caught early, because the
// encoder can't distinguish an ErrNil due to a nil extension
// from an ErrNil due to a missing field. Extensions are
// always optional, so the encoder would just swallow the error
// and drop all the extensions from the encoded message.
if reflect.ValueOf(value).IsNil() {
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
}
extmap := epb.extensionsWrite()
extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
return nil
}
// ClearAllExtensions clears all extensions from pb.
func ClearAllExtensions(pb Message) {
epb, err := extendable(pb)
if err != nil {
return
}
m := epb.extensionsWrite()
for k := range m {
delete(m, k)
}
}
// A global registry of extensions.
// The generated code will register the generated descriptors by calling RegisterExtension.
var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
// RegisterExtension is called from the generated code.
func RegisterExtension(desc *ExtensionDesc) {
st := reflect.TypeOf(desc.ExtendedType).Elem()
m := extensionMaps[st]
if m == nil {
m = make(map[int32]*ExtensionDesc)
extensionMaps[st] = m
}
if _, ok := m[desc.Field]; ok {
panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
}
m[desc.Field] = desc
}
// RegisteredExtensions returns a map of the registered extensions of a
// protocol buffer struct, indexed by the extension number.
// The argument pb should be a nil pointer to the struct type.
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
return extensionMaps[reflect.TypeOf(pb).Elem()]
}
// extensionAsLegacyType converts an value in the storage type as the API type.
// See Extension.value.
func extensionAsLegacyType(v interface{}) interface{} {
switch rv := reflect.ValueOf(v); rv.Kind() {
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
// Represent primitive types as a pointer to the value.
v := xt.InterfaceOf(pv)
rv := reflect.ValueOf(v)
if isScalarKind(rv.Kind()) {
rv2 := reflect.New(rv.Type())
rv2.Elem().Set(rv)
v = rv2.Interface()
case reflect.Ptr:
// Represent slice types as the value itself.
switch rv.Type().Elem().Kind() {
case reflect.Slice:
if rv.IsNil() {
v = reflect.Zero(rv.Type().Elem()).Interface()
} else {
v = rv.Elem().Interface()
}
}
}
return v
return v, nil
}
// extensionAsStorageType converts an value in the API type as the storage type.
// See Extension.value.
func extensionAsStorageType(v interface{}) interface{} {
switch rv := reflect.ValueOf(v); rv.Kind() {
case reflect.Ptr:
// Represent slice types as the value itself.
switch rv.Type().Elem().Kind() {
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
if rv.IsNil() {
v = reflect.Zero(rv.Type().Elem()).Interface()
} else {
v = rv.Elem().Interface()
// extensionResolver is a custom extension resolver that stores a single
// extension type that takes precedence over the global registry.
type extensionResolver struct{ xt protoreflect.ExtensionType }
func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field {
return r.xt, nil
}
return protoregistry.GlobalTypes.FindExtensionByName(field)
}
func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field {
return r.xt, nil
}
return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
}
// GetExtensions returns a list of the extensions values present in m,
// corresponding with the provided list of extension descriptors, xts.
// If an extension is missing in m, the corresponding value is nil.
func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() {
return nil, errNotExtendable
}
vs := make([]interface{}, len(xts))
for i, xt := range xts {
v, err := GetExtension(m, xt)
if err != nil {
if err == ErrMissingExtension {
continue
}
return vs, err
}
case reflect.Slice:
// Represent slice types as a pointer to the value.
if rv.Type().Elem().Kind() != reflect.Uint8 {
rv2 := reflect.New(rv.Type())
rv2.Elem().Set(rv)
v = rv2.Interface()
vs[i] = v
}
return vs, nil
}
// SetExtension sets an extension field in m to the provided value.
func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
return errNotExtendable
}
rv := reflect.ValueOf(v)
if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) {
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType)
}
if rv.Kind() == reflect.Ptr {
if rv.IsNil() {
return fmt.Errorf("proto: SetExtension called with nil value of type %T", v)
}
if isScalarKind(rv.Elem().Kind()) {
v = rv.Elem().Interface()
}
}
return v
xtd := xt.TypeDescriptor()
if !isValidExtension(mr.Descriptor(), xtd) {
return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
}
mr.Set(xtd, xt.ValueOf(v))
clearUnknown(mr, fieldNum(xt.Field))
return nil
}
// SetRawExtension inserts b into the unknown fields of m.
//
// Deprecated: Use Message.ProtoReflect.SetUnknown instead.
func SetRawExtension(m Message, fnum int32, b []byte) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() {
return
}
// Verify that the raw field is valid.
for b0 := b; len(b0) > 0; {
num, _, n := protowire.ConsumeField(b0)
if int32(num) != fnum {
panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum))
}
b0 = b0[n:]
}
ClearExtension(m, &ExtensionDesc{Field: fnum})
mr.SetUnknown(append(mr.GetUnknown(), b...))
}
// ExtensionDescs returns a list of extension descriptors found in m,
// containing descriptors for both populated extension fields in m and
// also unknown fields of m that are in the extension range.
// For the later case, an type incomplete descriptor is provided where only
// the ExtensionDesc.Field field is populated.
// The order of the extension descriptors is undefined.
func ExtensionDescs(m Message) ([]*ExtensionDesc, error) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
return nil, errNotExtendable
}
// Collect a set of known extension descriptors.
extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc)
mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
if fd.IsExtension() {
xt := fd.(protoreflect.ExtensionTypeDescriptor)
if xd, ok := xt.Type().(*ExtensionDesc); ok {
extDescs[fd.Number()] = xd
}
}
return true
})
// Collect a set of unknown extension descriptors.
extRanges := mr.Descriptor().ExtensionRanges()
for b := mr.GetUnknown(); len(b) > 0; {
num, _, n := protowire.ConsumeField(b)
if extRanges.Has(num) && extDescs[num] == nil {
extDescs[num] = nil
}
b = b[n:]
}
// Transpose the set of descriptors into a list.
var xts []*ExtensionDesc
for num, xt := range extDescs {
if xt == nil {
xt = &ExtensionDesc{Field: int32(num)}
}
xts = append(xts, xt)
}
return xts, nil
}
// isValidExtension reports whether xtd is a valid extension descriptor for md.
func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool {
return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number())
}
// isScalarKind reports whether k is a protobuf scalar kind (except bytes).
// This function exists for historical reasons since the representation of
// scalars differs between v1 and v2, where v1 uses *T and v2 uses T.
func isScalarKind(k reflect.Kind) bool {
switch k {
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
return true
default:
return false
}
}
// clearUnknown removes unknown fields from m where remover.Has reports true.
func clearUnknown(m protoreflect.Message, remover interface {
Has(protoreflect.FieldNumber) bool
}) {
var bo protoreflect.RawFields
for bi := m.GetUnknown(); len(bi) > 0; {
num, _, n := protowire.ConsumeField(bi)
if !remover.Has(num) {
bo = append(bo, bi[:n]...)
}
bi = bi[n:]
}
if bi := m.GetUnknown(); len(bi) != len(bo) {
m.SetUnknown(bo)
}
}
type fieldNum protoreflect.FieldNumber
func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool {
return protoreflect.FieldNumber(n1) == n2
}

View file

@ -1,965 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
Package proto converts data structures to and from the wire format of
protocol buffers. It works in concert with the Go source code generated
for .proto files by the protocol compiler.
A summary of the properties of the protocol buffer interface
for a protocol buffer variable v:
- Names are turned from camel_case to CamelCase for export.
- There are no methods on v to set fields; just treat
them as structure fields.
- There are getters that return a field's value if set,
and return the field's default value if unset.
The getters work even if the receiver is a nil message.
- The zero value for a struct is its correct initialization state.
All desired fields must be set before marshaling.
- A Reset() method will restore a protobuf struct to its zero state.
- Non-repeated fields are pointers to the values; nil means unset.
That is, optional or required field int32 f becomes F *int32.
- Repeated fields are slices.
- Helper functions are available to aid the setting of fields.
msg.Foo = proto.String("hello") // set field
- Constants are defined to hold the default values of all fields that
have them. They have the form Default_StructName_FieldName.
Because the getter methods handle defaulted values,
direct use of these constants should be rare.
- Enums are given type names and maps from names to values.
Enum values are prefixed by the enclosing message's name, or by the
enum's type name if it is a top-level enum. Enum types have a String
method, and a Enum method to assist in message construction.
- Nested messages, groups and enums have type names prefixed with the name of
the surrounding message type.
- Extensions are given descriptor names that start with E_,
followed by an underscore-delimited list of the nested messages
that contain it (if any) followed by the CamelCased name of the
extension field itself. HasExtension, ClearExtension, GetExtension
and SetExtension are functions for manipulating extensions.
- Oneof field sets are given a single field in their message,
with distinguished wrapper types for each possible field value.
- Marshal and Unmarshal are functions to encode and decode the wire format.
When the .proto file specifies `syntax="proto3"`, there are some differences:
- Non-repeated fields of non-message type are values instead of pointers.
- Enum types do not get an Enum method.
The simplest way to describe this is to see an example.
Given file test.proto, containing
package example;
enum FOO { X = 17; }
message Test {
required string label = 1;
optional int32 type = 2 [default=77];
repeated int64 reps = 3;
optional group OptionalGroup = 4 {
required string RequiredField = 5;
}
oneof union {
int32 number = 6;
string name = 7;
}
}
The resulting file, test.pb.go, is:
package example
import proto "github.com/golang/protobuf/proto"
import math "math"
type FOO int32
const (
FOO_X FOO = 17
)
var FOO_name = map[int32]string{
17: "X",
}
var FOO_value = map[string]int32{
"X": 17,
}
func (x FOO) Enum() *FOO {
p := new(FOO)
*p = x
return p
}
func (x FOO) String() string {
return proto.EnumName(FOO_name, int32(x))
}
func (x *FOO) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(FOO_value, data)
if err != nil {
return err
}
*x = FOO(value)
return nil
}
type Test struct {
Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
// Types that are valid to be assigned to Union:
// *Test_Number
// *Test_Name
Union isTest_Union `protobuf_oneof:"union"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Test) Reset() { *m = Test{} }
func (m *Test) String() string { return proto.CompactTextString(m) }
func (*Test) ProtoMessage() {}
type isTest_Union interface {
isTest_Union()
}
type Test_Number struct {
Number int32 `protobuf:"varint,6,opt,name=number"`
}
type Test_Name struct {
Name string `protobuf:"bytes,7,opt,name=name"`
}
func (*Test_Number) isTest_Union() {}
func (*Test_Name) isTest_Union() {}
func (m *Test) GetUnion() isTest_Union {
if m != nil {
return m.Union
}
return nil
}
const Default_Test_Type int32 = 77
func (m *Test) GetLabel() string {
if m != nil && m.Label != nil {
return *m.Label
}
return ""
}
func (m *Test) GetType() int32 {
if m != nil && m.Type != nil {
return *m.Type
}
return Default_Test_Type
}
func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
if m != nil {
return m.Optionalgroup
}
return nil
}
type Test_OptionalGroup struct {
RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
}
func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
func (m *Test_OptionalGroup) GetRequiredField() string {
if m != nil && m.RequiredField != nil {
return *m.RequiredField
}
return ""
}
func (m *Test) GetNumber() int32 {
if x, ok := m.GetUnion().(*Test_Number); ok {
return x.Number
}
return 0
}
func (m *Test) GetName() string {
if x, ok := m.GetUnion().(*Test_Name); ok {
return x.Name
}
return ""
}
func init() {
proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
}
To create and play with a Test object:
package main
import (
"log"
"github.com/golang/protobuf/proto"
pb "./example.pb"
)
func main() {
test := &pb.Test{
Label: proto.String("hello"),
Type: proto.Int32(17),
Reps: []int64{1, 2, 3},
Optionalgroup: &pb.Test_OptionalGroup{
RequiredField: proto.String("good bye"),
},
Union: &pb.Test_Name{"fred"},
}
data, err := proto.Marshal(test)
if err != nil {
log.Fatal("marshaling error: ", err)
}
newTest := &pb.Test{}
err = proto.Unmarshal(data, newTest)
if err != nil {
log.Fatal("unmarshaling error: ", err)
}
// Now test and newTest contain the same data.
if test.GetLabel() != newTest.GetLabel() {
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
}
// Use a type switch to determine which oneof was set.
switch u := test.Union.(type) {
case *pb.Test_Number: // u.Number contains the number.
case *pb.Test_Name: // u.Name contains the string.
}
// etc.
}
*/
package proto
import (
"encoding/json"
"fmt"
"log"
"reflect"
"sort"
"strconv"
"sync"
)
// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
// Marshal reports this when a required field is not initialized.
// Unmarshal reports this when a required field is missing from the wire data.
type RequiredNotSetError struct{ field string }
func (e *RequiredNotSetError) Error() string {
if e.field == "" {
return fmt.Sprintf("proto: required field not set")
}
return fmt.Sprintf("proto: required field %q not set", e.field)
}
func (e *RequiredNotSetError) RequiredNotSet() bool {
return true
}
type invalidUTF8Error struct{ field string }
func (e *invalidUTF8Error) Error() string {
if e.field == "" {
return "proto: invalid UTF-8 detected"
}
return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
}
func (e *invalidUTF8Error) InvalidUTF8() bool {
return true
}
// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
// This error should not be exposed to the external API as such errors should
// be recreated with the field information.
var errInvalidUTF8 = &invalidUTF8Error{}
// isNonFatal reports whether the error is either a RequiredNotSet error
// or a InvalidUTF8 error.
func isNonFatal(err error) bool {
if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
return true
}
if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
return true
}
return false
}
type nonFatal struct{ E error }
// Merge merges err into nf and reports whether it was successful.
// Otherwise it returns false for any fatal non-nil errors.
func (nf *nonFatal) Merge(err error) (ok bool) {
if err == nil {
return true // not an error
}
if !isNonFatal(err) {
return false // fatal error
}
if nf.E == nil {
nf.E = err // store first instance of non-fatal error
}
return true
}
// Message is implemented by generated protocol buffer messages.
type Message interface {
Reset()
String() string
ProtoMessage()
}
// A Buffer is a buffer manager for marshaling and unmarshaling
// protocol buffers. It may be reused between invocations to
// reduce memory usage. It is not necessary to use a Buffer;
// the global functions Marshal and Unmarshal create a
// temporary Buffer and are fine for most applications.
type Buffer struct {
buf []byte // encode/decode byte stream
index int // read point
deterministic bool
}
// NewBuffer allocates a new Buffer and initializes its internal data to
// the contents of the argument slice.
func NewBuffer(e []byte) *Buffer {
return &Buffer{buf: e}
}
// Reset resets the Buffer, ready for marshaling a new protocol buffer.
func (p *Buffer) Reset() {
p.buf = p.buf[0:0] // for reading/writing
p.index = 0 // for reading
}
// SetBuf replaces the internal buffer with the slice,
// ready for unmarshaling the contents of the slice.
func (p *Buffer) SetBuf(s []byte) {
p.buf = s
p.index = 0
}
// Bytes returns the contents of the Buffer.
func (p *Buffer) Bytes() []byte { return p.buf }
// SetDeterministic sets whether to use deterministic serialization.
//
// Deterministic serialization guarantees that for a given binary, equal
// messages will always be serialized to the same bytes. This implies:
//
// - Repeated serialization of a message will return the same bytes.
// - Different processes of the same binary (which may be executing on
// different machines) will serialize equal messages to the same bytes.
//
// Note that the deterministic serialization is NOT canonical across
// languages. It is not guaranteed to remain stable over time. It is unstable
// across different builds with schema changes due to unknown fields.
// Users who need canonical serialization (e.g., persistent storage in a
// canonical form, fingerprinting, etc.) should define their own
// canonicalization specification and implement their own serializer rather
// than relying on this API.
//
// If deterministic serialization is requested, map entries will be sorted
// by keys in lexicographical order. This is an implementation detail and
// subject to change.
func (p *Buffer) SetDeterministic(deterministic bool) {
p.deterministic = deterministic
}
/*
* Helper routines for simplifying the creation of optional fields of basic type.
*/
// Bool is a helper routine that allocates a new bool value
// to store v and returns a pointer to it.
func Bool(v bool) *bool {
return &v
}
// Int32 is a helper routine that allocates a new int32 value
// to store v and returns a pointer to it.
func Int32(v int32) *int32 {
return &v
}
// Int is a helper routine that allocates a new int32 value
// to store v and returns a pointer to it, but unlike Int32
// its argument value is an int.
func Int(v int) *int32 {
p := new(int32)
*p = int32(v)
return p
}
// Int64 is a helper routine that allocates a new int64 value
// to store v and returns a pointer to it.
func Int64(v int64) *int64 {
return &v
}
// Float32 is a helper routine that allocates a new float32 value
// to store v and returns a pointer to it.
func Float32(v float32) *float32 {
return &v
}
// Float64 is a helper routine that allocates a new float64 value
// to store v and returns a pointer to it.
func Float64(v float64) *float64 {
return &v
}
// Uint32 is a helper routine that allocates a new uint32 value
// to store v and returns a pointer to it.
func Uint32(v uint32) *uint32 {
return &v
}
// Uint64 is a helper routine that allocates a new uint64 value
// to store v and returns a pointer to it.
func Uint64(v uint64) *uint64 {
return &v
}
// String is a helper routine that allocates a new string value
// to store v and returns a pointer to it.
func String(v string) *string {
return &v
}
// EnumName is a helper function to simplify printing protocol buffer enums
// by name. Given an enum map and a value, it returns a useful string.
func EnumName(m map[int32]string, v int32) string {
s, ok := m[v]
if ok {
return s
}
return strconv.Itoa(int(v))
}
// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
// from their JSON-encoded representation. Given a map from the enum's symbolic
// names to its int values, and a byte buffer containing the JSON-encoded
// value, it returns an int32 that can be cast to the enum type by the caller.
//
// The function can deal with both JSON representations, numeric and symbolic.
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
if data[0] == '"' {
// New style: enums are strings.
var repr string
if err := json.Unmarshal(data, &repr); err != nil {
return -1, err
}
val, ok := m[repr]
if !ok {
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
}
return val, nil
}
// Old style: enums are ints.
var val int32
if err := json.Unmarshal(data, &val); err != nil {
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
}
return val, nil
}
// DebugPrint dumps the encoded data in b in a debugging format with a header
// including the string s. Used in testing but made available for general debugging.
func (p *Buffer) DebugPrint(s string, b []byte) {
var u uint64
obuf := p.buf
index := p.index
p.buf = b
p.index = 0
depth := 0
fmt.Printf("\n--- %s ---\n", s)
out:
for {
for i := 0; i < depth; i++ {
fmt.Print(" ")
}
index := p.index
if index == len(p.buf) {
break
}
op, err := p.DecodeVarint()
if err != nil {
fmt.Printf("%3d: fetching op err %v\n", index, err)
break out
}
tag := op >> 3
wire := op & 7
switch wire {
default:
fmt.Printf("%3d: t=%3d unknown wire=%d\n",
index, tag, wire)
break out
case WireBytes:
var r []byte
r, err = p.DecodeRawBytes(false)
if err != nil {
break out
}
fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
if len(r) <= 6 {
for i := 0; i < len(r); i++ {
fmt.Printf(" %.2x", r[i])
}
} else {
for i := 0; i < 3; i++ {
fmt.Printf(" %.2x", r[i])
}
fmt.Printf(" ..")
for i := len(r) - 3; i < len(r); i++ {
fmt.Printf(" %.2x", r[i])
}
}
fmt.Printf("\n")
case WireFixed32:
u, err = p.DecodeFixed32()
if err != nil {
fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
case WireFixed64:
u, err = p.DecodeFixed64()
if err != nil {
fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
case WireVarint:
u, err = p.DecodeVarint()
if err != nil {
fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
case WireStartGroup:
fmt.Printf("%3d: t=%3d start\n", index, tag)
depth++
case WireEndGroup:
depth--
fmt.Printf("%3d: t=%3d end\n", index, tag)
}
}
if depth != 0 {
fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
}
fmt.Printf("\n")
p.buf = obuf
p.index = index
}
// SetDefaults sets unset protocol buffer fields to their default values.
// It only modifies fields that are both unset and have defined defaults.
// It recursively sets default values in any non-nil sub-messages.
func SetDefaults(pb Message) {
setDefaults(reflect.ValueOf(pb), true, false)
}
// v is a pointer to a struct.
func setDefaults(v reflect.Value, recur, zeros bool) {
v = v.Elem()
defaultMu.RLock()
dm, ok := defaults[v.Type()]
defaultMu.RUnlock()
if !ok {
dm = buildDefaultMessage(v.Type())
defaultMu.Lock()
defaults[v.Type()] = dm
defaultMu.Unlock()
}
for _, sf := range dm.scalars {
f := v.Field(sf.index)
if !f.IsNil() {
// field already set
continue
}
dv := sf.value
if dv == nil && !zeros {
// no explicit default, and don't want to set zeros
continue
}
fptr := f.Addr().Interface() // **T
// TODO: Consider batching the allocations we do here.
switch sf.kind {
case reflect.Bool:
b := new(bool)
if dv != nil {
*b = dv.(bool)
}
*(fptr.(**bool)) = b
case reflect.Float32:
f := new(float32)
if dv != nil {
*f = dv.(float32)
}
*(fptr.(**float32)) = f
case reflect.Float64:
f := new(float64)
if dv != nil {
*f = dv.(float64)
}
*(fptr.(**float64)) = f
case reflect.Int32:
// might be an enum
if ft := f.Type(); ft != int32PtrType {
// enum
f.Set(reflect.New(ft.Elem()))
if dv != nil {
f.Elem().SetInt(int64(dv.(int32)))
}
} else {
// int32 field
i := new(int32)
if dv != nil {
*i = dv.(int32)
}
*(fptr.(**int32)) = i
}
case reflect.Int64:
i := new(int64)
if dv != nil {
*i = dv.(int64)
}
*(fptr.(**int64)) = i
case reflect.String:
s := new(string)
if dv != nil {
*s = dv.(string)
}
*(fptr.(**string)) = s
case reflect.Uint8:
// exceptional case: []byte
var b []byte
if dv != nil {
db := dv.([]byte)
b = make([]byte, len(db))
copy(b, db)
} else {
b = []byte{}
}
*(fptr.(*[]byte)) = b
case reflect.Uint32:
u := new(uint32)
if dv != nil {
*u = dv.(uint32)
}
*(fptr.(**uint32)) = u
case reflect.Uint64:
u := new(uint64)
if dv != nil {
*u = dv.(uint64)
}
*(fptr.(**uint64)) = u
default:
log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
}
}
for _, ni := range dm.nested {
f := v.Field(ni)
// f is *T or []*T or map[T]*T
switch f.Kind() {
case reflect.Ptr:
if f.IsNil() {
continue
}
setDefaults(f, recur, zeros)
case reflect.Slice:
for i := 0; i < f.Len(); i++ {
e := f.Index(i)
if e.IsNil() {
continue
}
setDefaults(e, recur, zeros)
}
case reflect.Map:
for _, k := range f.MapKeys() {
e := f.MapIndex(k)
if e.IsNil() {
continue
}
setDefaults(e, recur, zeros)
}
}
}
}
var (
// defaults maps a protocol buffer struct type to a slice of the fields,
// with its scalar fields set to their proto-declared non-zero default values.
defaultMu sync.RWMutex
defaults = make(map[reflect.Type]defaultMessage)
int32PtrType = reflect.TypeOf((*int32)(nil))
)
// defaultMessage represents information about the default values of a message.
type defaultMessage struct {
scalars []scalarField
nested []int // struct field index of nested messages
}
type scalarField struct {
index int // struct field index
kind reflect.Kind // element type (the T in *T or []T)
value interface{} // the proto-declared default value, or nil
}
// t is a struct type.
func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
sprop := GetProperties(t)
for _, prop := range sprop.Prop {
fi, ok := sprop.decoderTags.get(prop.Tag)
if !ok {
// XXX_unrecognized
continue
}
ft := t.Field(fi).Type
sf, nested, err := fieldDefault(ft, prop)
switch {
case err != nil:
log.Print(err)
case nested:
dm.nested = append(dm.nested, fi)
case sf != nil:
sf.index = fi
dm.scalars = append(dm.scalars, *sf)
}
}
return dm
}
// fieldDefault returns the scalarField for field type ft.
// sf will be nil if the field can not have a default.
// nestedMessage will be true if this is a nested message.
// Note that sf.index is not set on return.
func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
var canHaveDefault bool
switch ft.Kind() {
case reflect.Ptr:
if ft.Elem().Kind() == reflect.Struct {
nestedMessage = true
} else {
canHaveDefault = true // proto2 scalar field
}
case reflect.Slice:
switch ft.Elem().Kind() {
case reflect.Ptr:
nestedMessage = true // repeated message
case reflect.Uint8:
canHaveDefault = true // bytes field
}
case reflect.Map:
if ft.Elem().Kind() == reflect.Ptr {
nestedMessage = true // map with message values
}
}
if !canHaveDefault {
if nestedMessage {
return nil, true, nil
}
return nil, false, nil
}
// We now know that ft is a pointer or slice.
sf = &scalarField{kind: ft.Elem().Kind()}
// scalar fields without defaults
if !prop.HasDefault {
return sf, false, nil
}
// a scalar field: either *T or []byte
switch ft.Elem().Kind() {
case reflect.Bool:
x, err := strconv.ParseBool(prop.Default)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
}
sf.value = x
case reflect.Float32:
x, err := strconv.ParseFloat(prop.Default, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
}
sf.value = float32(x)
case reflect.Float64:
x, err := strconv.ParseFloat(prop.Default, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
}
sf.value = x
case reflect.Int32:
x, err := strconv.ParseInt(prop.Default, 10, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
}
sf.value = int32(x)
case reflect.Int64:
x, err := strconv.ParseInt(prop.Default, 10, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
}
sf.value = x
case reflect.String:
sf.value = prop.Default
case reflect.Uint8:
// []byte (not *uint8)
sf.value = []byte(prop.Default)
case reflect.Uint32:
x, err := strconv.ParseUint(prop.Default, 10, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
}
sf.value = uint32(x)
case reflect.Uint64:
x, err := strconv.ParseUint(prop.Default, 10, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
}
sf.value = x
default:
return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
}
return sf, false, nil
}
// mapKeys returns a sort.Interface to be used for sorting the map keys.
// Map fields may have key types of non-float scalars, strings and enums.
func mapKeys(vs []reflect.Value) sort.Interface {
s := mapKeySorter{vs: vs}
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
if len(vs) == 0 {
return s
}
switch vs[0].Kind() {
case reflect.Int32, reflect.Int64:
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
case reflect.Uint32, reflect.Uint64:
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
case reflect.Bool:
s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
case reflect.String:
s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
default:
panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
}
return s
}
type mapKeySorter struct {
vs []reflect.Value
less func(a, b reflect.Value) bool
}
func (s mapKeySorter) Len() int { return len(s.vs) }
func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
func (s mapKeySorter) Less(i, j int) bool {
return s.less(s.vs[i], s.vs[j])
}
// isProto3Zero reports whether v is a zero proto3 value.
func isProto3Zero(v reflect.Value) bool {
switch v.Kind() {
case reflect.Bool:
return !v.Bool()
case reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint32, reflect.Uint64:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.String:
return v.String() == ""
}
return false
}
const (
// ProtoPackageIsVersion3 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
ProtoPackageIsVersion3 = true
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
ProtoPackageIsVersion2 = true
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
ProtoPackageIsVersion1 = true
)
// InternalMessageInfo is a type used internally by generated .pb.go files.
// This type is not intended to be used by non-generated code.
// This type is not subject to any compatibility guarantee.
type InternalMessageInfo struct {
marshal *marshalInfo
unmarshal *unmarshalInfo
merge *mergeInfo
discard *discardInfo
}

View file

@ -1,181 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Support for message sets.
*/
import (
"errors"
)
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
// A message type ID is required for storing a protocol buffer in a message set.
var errNoMessageTypeID = errors.New("proto does not have a message type ID")
// The first two types (_MessageSet_Item and messageSet)
// model what the protocol compiler produces for the following protocol message:
// message MessageSet {
// repeated group Item = 1 {
// required int32 type_id = 2;
// required string message = 3;
// };
// }
// That is the MessageSet wire format. We can't use a proto to generate these
// because that would introduce a circular dependency between it and this package.
type _MessageSet_Item struct {
TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
Message []byte `protobuf:"bytes,3,req,name=message"`
}
type messageSet struct {
Item []*_MessageSet_Item `protobuf:"group,1,rep"`
XXX_unrecognized []byte
// TODO: caching?
}
// Make sure messageSet is a Message.
var _ Message = (*messageSet)(nil)
// messageTypeIder is an interface satisfied by a protocol buffer type
// that may be stored in a MessageSet.
type messageTypeIder interface {
MessageTypeId() int32
}
func (ms *messageSet) find(pb Message) *_MessageSet_Item {
mti, ok := pb.(messageTypeIder)
if !ok {
return nil
}
id := mti.MessageTypeId()
for _, item := range ms.Item {
if *item.TypeId == id {
return item
}
}
return nil
}
func (ms *messageSet) Has(pb Message) bool {
return ms.find(pb) != nil
}
func (ms *messageSet) Unmarshal(pb Message) error {
if item := ms.find(pb); item != nil {
return Unmarshal(item.Message, pb)
}
if _, ok := pb.(messageTypeIder); !ok {
return errNoMessageTypeID
}
return nil // TODO: return error instead?
}
func (ms *messageSet) Marshal(pb Message) error {
msg, err := Marshal(pb)
if err != nil {
return err
}
if item := ms.find(pb); item != nil {
// reuse existing item
item.Message = msg
return nil
}
mti, ok := pb.(messageTypeIder)
if !ok {
return errNoMessageTypeID
}
mtid := mti.MessageTypeId()
ms.Item = append(ms.Item, &_MessageSet_Item{
TypeId: &mtid,
Message: msg,
})
return nil
}
func (ms *messageSet) Reset() { *ms = messageSet{} }
func (ms *messageSet) String() string { return CompactTextString(ms) }
func (*messageSet) ProtoMessage() {}
// Support for the message_set_wire_format message option.
func skipVarint(buf []byte) []byte {
i := 0
for ; buf[i]&0x80 != 0; i++ {
}
return buf[i+1:]
}
// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
func unmarshalMessageSet(buf []byte, exts interface{}) error {
var m map[int32]Extension
switch exts := exts.(type) {
case *XXX_InternalExtensions:
m = exts.extensionsWrite()
case map[int32]Extension:
m = exts
default:
return errors.New("proto: not an extension map")
}
ms := new(messageSet)
if err := Unmarshal(buf, ms); err != nil {
return err
}
for _, item := range ms.Item {
id := *item.TypeId
msg := item.Message
// Restore wire type and field number varint, plus length varint.
// Be careful to preserve duplicate items.
b := EncodeVarint(uint64(id)<<3 | WireBytes)
if ext, ok := m[id]; ok {
// Existing data; rip off the tag and length varint
// so we join the new data correctly.
// We can assume that ext.enc is set because we are unmarshaling.
o := ext.enc[len(b):] // skip wire type and field number
_, n := DecodeVarint(o) // calculate length of length varint
o = o[n:] // skip length varint
msg = append(o, msg...) // join old data and new data
}
b = append(b, EncodeVarint(uint64(len(msg)))...)
b = append(b, msg...)
m[id] = Extension{enc: b}
}
return nil
}

View file

@ -1,360 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2012 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build purego appengine js
// This file contains an implementation of proto field accesses using package reflect.
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
// be used on App Engine.
package proto
import (
"reflect"
"sync"
)
const unsafeAllowed = false
// A field identifies a field in a struct, accessible from a pointer.
// In this implementation, a field is identified by the sequence of field indices
// passed to reflect's FieldByIndex.
type field []int
// toField returns a field equivalent to the given reflect field.
func toField(f *reflect.StructField) field {
return f.Index
}
// invalidField is an invalid field identifier.
var invalidField = field(nil)
// zeroField is a noop when calling pointer.offset.
var zeroField = field([]int{})
// IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool { return f != nil }
// The pointer type is for the table-driven decoder.
// The implementation here uses a reflect.Value of pointer type to
// create a generic pointer. In pointer_unsafe.go we use unsafe
// instead of reflect to implement the same (but faster) interface.
type pointer struct {
v reflect.Value
}
// toPointer converts an interface of pointer type to a pointer
// that points to the same target.
func toPointer(i *Message) pointer {
return pointer{v: reflect.ValueOf(*i)}
}
// toAddrPointer converts an interface to a pointer that points to
// the interface data.
func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
v := reflect.ValueOf(*i)
u := reflect.New(v.Type())
u.Elem().Set(v)
if deref {
u = u.Elem()
}
return pointer{v: u}
}
// valToPointer converts v to a pointer. v must be of pointer type.
func valToPointer(v reflect.Value) pointer {
return pointer{v: v}
}
// offset converts from a pointer to a structure to a pointer to
// one of its fields.
func (p pointer) offset(f field) pointer {
return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
}
func (p pointer) isNil() bool {
return p.v.IsNil()
}
// grow updates the slice s in place to make it one element longer.
// s must be addressable.
// Returns the (addressable) new element.
func grow(s reflect.Value) reflect.Value {
n, m := s.Len(), s.Cap()
if n < m {
s.SetLen(n + 1)
} else {
s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
}
return s.Index(n)
}
func (p pointer) toInt64() *int64 {
return p.v.Interface().(*int64)
}
func (p pointer) toInt64Ptr() **int64 {
return p.v.Interface().(**int64)
}
func (p pointer) toInt64Slice() *[]int64 {
return p.v.Interface().(*[]int64)
}
var int32ptr = reflect.TypeOf((*int32)(nil))
func (p pointer) toInt32() *int32 {
return p.v.Convert(int32ptr).Interface().(*int32)
}
// The toInt32Ptr/Slice methods don't work because of enums.
// Instead, we must use set/get methods for the int32ptr/slice case.
/*
func (p pointer) toInt32Ptr() **int32 {
return p.v.Interface().(**int32)
}
func (p pointer) toInt32Slice() *[]int32 {
return p.v.Interface().(*[]int32)
}
*/
func (p pointer) getInt32Ptr() *int32 {
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
// raw int32 type
return p.v.Elem().Interface().(*int32)
}
// an enum
return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
}
func (p pointer) setInt32Ptr(v int32) {
// Allocate value in a *int32. Possibly convert that to a *enum.
// Then assign it to a **int32 or **enum.
// Note: we can convert *int32 to *enum, but we can't convert
// **int32 to **enum!
p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
}
// getInt32Slice copies []int32 from p as a new slice.
// This behavior differs from the implementation in pointer_unsafe.go.
func (p pointer) getInt32Slice() []int32 {
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
// raw int32 type
return p.v.Elem().Interface().([]int32)
}
// an enum
// Allocate a []int32, then assign []enum's values into it.
// Note: we can't convert []enum to []int32.
slice := p.v.Elem()
s := make([]int32, slice.Len())
for i := 0; i < slice.Len(); i++ {
s[i] = int32(slice.Index(i).Int())
}
return s
}
// setInt32Slice copies []int32 into p as a new slice.
// This behavior differs from the implementation in pointer_unsafe.go.
func (p pointer) setInt32Slice(v []int32) {
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
// raw int32 type
p.v.Elem().Set(reflect.ValueOf(v))
return
}
// an enum
// Allocate a []enum, then assign []int32's values into it.
// Note: we can't convert []enum to []int32.
slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
for i, x := range v {
slice.Index(i).SetInt(int64(x))
}
p.v.Elem().Set(slice)
}
func (p pointer) appendInt32Slice(v int32) {
grow(p.v.Elem()).SetInt(int64(v))
}
func (p pointer) toUint64() *uint64 {
return p.v.Interface().(*uint64)
}
func (p pointer) toUint64Ptr() **uint64 {
return p.v.Interface().(**uint64)
}
func (p pointer) toUint64Slice() *[]uint64 {
return p.v.Interface().(*[]uint64)
}
func (p pointer) toUint32() *uint32 {
return p.v.Interface().(*uint32)
}
func (p pointer) toUint32Ptr() **uint32 {
return p.v.Interface().(**uint32)
}
func (p pointer) toUint32Slice() *[]uint32 {
return p.v.Interface().(*[]uint32)
}
func (p pointer) toBool() *bool {
return p.v.Interface().(*bool)
}
func (p pointer) toBoolPtr() **bool {
return p.v.Interface().(**bool)
}
func (p pointer) toBoolSlice() *[]bool {
return p.v.Interface().(*[]bool)
}
func (p pointer) toFloat64() *float64 {
return p.v.Interface().(*float64)
}
func (p pointer) toFloat64Ptr() **float64 {
return p.v.Interface().(**float64)
}
func (p pointer) toFloat64Slice() *[]float64 {
return p.v.Interface().(*[]float64)
}
func (p pointer) toFloat32() *float32 {
return p.v.Interface().(*float32)
}
func (p pointer) toFloat32Ptr() **float32 {
return p.v.Interface().(**float32)
}
func (p pointer) toFloat32Slice() *[]float32 {
return p.v.Interface().(*[]float32)
}
func (p pointer) toString() *string {
return p.v.Interface().(*string)
}
func (p pointer) toStringPtr() **string {
return p.v.Interface().(**string)
}
func (p pointer) toStringSlice() *[]string {
return p.v.Interface().(*[]string)
}
func (p pointer) toBytes() *[]byte {
return p.v.Interface().(*[]byte)
}
func (p pointer) toBytesSlice() *[][]byte {
return p.v.Interface().(*[][]byte)
}
func (p pointer) toExtensions() *XXX_InternalExtensions {
return p.v.Interface().(*XXX_InternalExtensions)
}
func (p pointer) toOldExtensions() *map[int32]Extension {
return p.v.Interface().(*map[int32]Extension)
}
func (p pointer) getPointer() pointer {
return pointer{v: p.v.Elem()}
}
func (p pointer) setPointer(q pointer) {
p.v.Elem().Set(q.v)
}
func (p pointer) appendPointer(q pointer) {
grow(p.v.Elem()).Set(q.v)
}
// getPointerSlice copies []*T from p as a new []pointer.
// This behavior differs from the implementation in pointer_unsafe.go.
func (p pointer) getPointerSlice() []pointer {
if p.v.IsNil() {
return nil
}
n := p.v.Elem().Len()
s := make([]pointer, n)
for i := 0; i < n; i++ {
s[i] = pointer{v: p.v.Elem().Index(i)}
}
return s
}
// setPointerSlice copies []pointer into p as a new []*T.
// This behavior differs from the implementation in pointer_unsafe.go.
func (p pointer) setPointerSlice(v []pointer) {
if v == nil {
p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
return
}
s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
for _, p := range v {
s = reflect.Append(s, p.v)
}
p.v.Elem().Set(s)
}
// getInterfacePointer returns a pointer that points to the
// interface data of the interface pointed by p.
func (p pointer) getInterfacePointer() pointer {
if p.v.Elem().IsNil() {
return pointer{v: p.v.Elem()}
}
return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
}
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
// TODO: check that p.v.Type().Elem() == t?
return p.v
}
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
atomicLock.Lock()
defer atomicLock.Unlock()
return *p
}
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
atomicLock.Lock()
defer atomicLock.Unlock()
*p = v
}
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
atomicLock.Lock()
defer atomicLock.Unlock()
return *p
}
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
atomicLock.Lock()
defer atomicLock.Unlock()
*p = v
}
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
atomicLock.Lock()
defer atomicLock.Unlock()
return *p
}
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
atomicLock.Lock()
defer atomicLock.Unlock()
*p = v
}
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
atomicLock.Lock()
defer atomicLock.Unlock()
return *p
}
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
atomicLock.Lock()
defer atomicLock.Unlock()
*p = v
}
var atomicLock sync.Mutex

View file

@ -1,313 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2012 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build !purego,!appengine,!js
// This file contains the implementation of the proto field accesses using package unsafe.
package proto
import (
"reflect"
"sync/atomic"
"unsafe"
)
const unsafeAllowed = true
// A field identifies a field in a struct, accessible from a pointer.
// In this implementation, a field is identified by its byte offset from the start of the struct.
type field uintptr
// toField returns a field equivalent to the given reflect field.
func toField(f *reflect.StructField) field {
return field(f.Offset)
}
// invalidField is an invalid field identifier.
const invalidField = ^field(0)
// zeroField is a noop when calling pointer.offset.
const zeroField = field(0)
// IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool {
return f != invalidField
}
// The pointer type below is for the new table-driven encoder/decoder.
// The implementation here uses unsafe.Pointer to create a generic pointer.
// In pointer_reflect.go we use reflect instead of unsafe to implement
// the same (but slower) interface.
type pointer struct {
p unsafe.Pointer
}
// size of pointer
var ptrSize = unsafe.Sizeof(uintptr(0))
// toPointer converts an interface of pointer type to a pointer
// that points to the same target.
func toPointer(i *Message) pointer {
// Super-tricky - read pointer out of data word of interface value.
// Saves ~25ns over the equivalent:
// return valToPointer(reflect.ValueOf(*i))
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
}
// toAddrPointer converts an interface to a pointer that points to
// the interface data.
func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
// Super-tricky - read or get the address of data word of interface value.
if isptr {
// The interface is of pointer type, thus it is a direct interface.
// The data word is the pointer data itself. We take its address.
p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
} else {
// The interface is not of pointer type. The data word is the pointer
// to the data.
p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
}
if deref {
p.p = *(*unsafe.Pointer)(p.p)
}
return p
}
// valToPointer converts v to a pointer. v must be of pointer type.
func valToPointer(v reflect.Value) pointer {
return pointer{p: unsafe.Pointer(v.Pointer())}
}
// offset converts from a pointer to a structure to a pointer to
// one of its fields.
func (p pointer) offset(f field) pointer {
// For safety, we should panic if !f.IsValid, however calling panic causes
// this to no longer be inlineable, which is a serious performance cost.
/*
if !f.IsValid() {
panic("invalid field")
}
*/
return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
}
func (p pointer) isNil() bool {
return p.p == nil
}
func (p pointer) toInt64() *int64 {
return (*int64)(p.p)
}
func (p pointer) toInt64Ptr() **int64 {
return (**int64)(p.p)
}
func (p pointer) toInt64Slice() *[]int64 {
return (*[]int64)(p.p)
}
func (p pointer) toInt32() *int32 {
return (*int32)(p.p)
}
// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
/*
func (p pointer) toInt32Ptr() **int32 {
return (**int32)(p.p)
}
func (p pointer) toInt32Slice() *[]int32 {
return (*[]int32)(p.p)
}
*/
func (p pointer) getInt32Ptr() *int32 {
return *(**int32)(p.p)
}
func (p pointer) setInt32Ptr(v int32) {
*(**int32)(p.p) = &v
}
// getInt32Slice loads a []int32 from p.
// The value returned is aliased with the original slice.
// This behavior differs from the implementation in pointer_reflect.go.
func (p pointer) getInt32Slice() []int32 {
return *(*[]int32)(p.p)
}
// setInt32Slice stores a []int32 to p.
// The value set is aliased with the input slice.
// This behavior differs from the implementation in pointer_reflect.go.
func (p pointer) setInt32Slice(v []int32) {
*(*[]int32)(p.p) = v
}
// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
func (p pointer) appendInt32Slice(v int32) {
s := (*[]int32)(p.p)
*s = append(*s, v)
}
func (p pointer) toUint64() *uint64 {
return (*uint64)(p.p)
}
func (p pointer) toUint64Ptr() **uint64 {
return (**uint64)(p.p)
}
func (p pointer) toUint64Slice() *[]uint64 {
return (*[]uint64)(p.p)
}
func (p pointer) toUint32() *uint32 {
return (*uint32)(p.p)
}
func (p pointer) toUint32Ptr() **uint32 {
return (**uint32)(p.p)
}
func (p pointer) toUint32Slice() *[]uint32 {
return (*[]uint32)(p.p)
}
func (p pointer) toBool() *bool {
return (*bool)(p.p)
}
func (p pointer) toBoolPtr() **bool {
return (**bool)(p.p)
}
func (p pointer) toBoolSlice() *[]bool {
return (*[]bool)(p.p)
}
func (p pointer) toFloat64() *float64 {
return (*float64)(p.p)
}
func (p pointer) toFloat64Ptr() **float64 {
return (**float64)(p.p)
}
func (p pointer) toFloat64Slice() *[]float64 {
return (*[]float64)(p.p)
}
func (p pointer) toFloat32() *float32 {
return (*float32)(p.p)
}
func (p pointer) toFloat32Ptr() **float32 {
return (**float32)(p.p)
}
func (p pointer) toFloat32Slice() *[]float32 {
return (*[]float32)(p.p)
}
func (p pointer) toString() *string {
return (*string)(p.p)
}
func (p pointer) toStringPtr() **string {
return (**string)(p.p)
}
func (p pointer) toStringSlice() *[]string {
return (*[]string)(p.p)
}
func (p pointer) toBytes() *[]byte {
return (*[]byte)(p.p)
}
func (p pointer) toBytesSlice() *[][]byte {
return (*[][]byte)(p.p)
}
func (p pointer) toExtensions() *XXX_InternalExtensions {
return (*XXX_InternalExtensions)(p.p)
}
func (p pointer) toOldExtensions() *map[int32]Extension {
return (*map[int32]Extension)(p.p)
}
// getPointerSlice loads []*T from p as a []pointer.
// The value returned is aliased with the original slice.
// This behavior differs from the implementation in pointer_reflect.go.
func (p pointer) getPointerSlice() []pointer {
// Super-tricky - p should point to a []*T where T is a
// message type. We load it as []pointer.
return *(*[]pointer)(p.p)
}
// setPointerSlice stores []pointer into p as a []*T.
// The value set is aliased with the input slice.
// This behavior differs from the implementation in pointer_reflect.go.
func (p pointer) setPointerSlice(v []pointer) {
// Super-tricky - p should point to a []*T where T is a
// message type. We store it as []pointer.
*(*[]pointer)(p.p) = v
}
// getPointer loads the pointer at p and returns it.
func (p pointer) getPointer() pointer {
return pointer{p: *(*unsafe.Pointer)(p.p)}
}
// setPointer stores the pointer q at p.
func (p pointer) setPointer(q pointer) {
*(*unsafe.Pointer)(p.p) = q.p
}
// append q to the slice pointed to by p.
func (p pointer) appendPointer(q pointer) {
s := (*[]unsafe.Pointer)(p.p)
*s = append(*s, q.p)
}
// getInterfacePointer returns a pointer that points to the
// interface data of the interface pointed by p.
func (p pointer) getInterfacePointer() pointer {
// Super-tricky - read pointer out of data word of interface value.
return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
}
// asPointerTo returns a reflect.Value that is a pointer to an
// object of type t stored at p.
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
return reflect.NewAt(t, p.p)
}
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
}
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
}
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
}
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
}
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
}
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
}
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
}
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
}

View file

@ -1,162 +1,104 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
/*
* Routines for encoding data into the wire format for protocol buffers.
*/
import (
"fmt"
"log"
"reflect"
"sort"
"strconv"
"strings"
"sync"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/runtime/protoimpl"
)
const debug bool = false
// Constants that identify the encoding of a value on the wire.
const (
WireVarint = 0
WireFixed64 = 1
WireBytes = 2
WireStartGroup = 3
WireEndGroup = 4
WireFixed32 = 5
)
// tagMap is an optimization over map[int]int for typical protocol buffer
// use-cases. Encoded protocol buffers are often in tag order with small tag
// numbers.
type tagMap struct {
fastTags []int
slowTags map[int]int
}
// tagMapFastLimit is the upper bound on the tag number that will be stored in
// the tagMap slice rather than its map.
const tagMapFastLimit = 1024
func (p *tagMap) get(t int) (int, bool) {
if t > 0 && t < tagMapFastLimit {
if t >= len(p.fastTags) {
return 0, false
}
fi := p.fastTags[t]
return fi, fi >= 0
}
fi, ok := p.slowTags[t]
return fi, ok
}
func (p *tagMap) put(t int, fi int) {
if t > 0 && t < tagMapFastLimit {
for len(p.fastTags) < t+1 {
p.fastTags = append(p.fastTags, -1)
}
p.fastTags[t] = fi
return
}
if p.slowTags == nil {
p.slowTags = make(map[int]int)
}
p.slowTags[t] = fi
}
// StructProperties represents properties for all the fields of a struct.
// decoderTags and decoderOrigNames should only be used by the decoder.
// StructProperties represents protocol buffer type information for a
// generated protobuf message in the open-struct API.
//
// Deprecated: Do not use.
type StructProperties struct {
Prop []*Properties // properties for each field
reqCount int // required count
decoderTags tagMap // map from proto tag to struct field number
decoderOrigNames map[string]int // map from original name to struct field number
order []int // list of struct field numbers in tag order
// Prop are the properties for each field.
//
// Fields belonging to a oneof are stored in OneofTypes instead, with a
// single Properties representing the parent oneof held here.
//
// The order of Prop matches the order of fields in the Go struct.
// Struct fields that are not related to protobufs have a "XXX_" prefix
// in the Properties.Name and must be ignored by the user.
Prop []*Properties
// OneofTypes contains information about the oneof fields in this message.
// It is keyed by the original name of a field.
// It is keyed by the protobuf field name.
OneofTypes map[string]*OneofProperties
}
// OneofProperties represents information about a specific field in a oneof.
type OneofProperties struct {
Type reflect.Type // pointer to generated struct type for this oneof field
Field int // struct field number of the containing oneof in the message
Prop *Properties
}
// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
// See encode.go, (*Buffer).enc_struct.
func (sp *StructProperties) Len() int { return len(sp.order) }
func (sp *StructProperties) Less(i, j int) bool {
return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
}
func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
// Properties represents the protocol-specific behavior of a single struct field.
// Properties represents the type information for a protobuf message field.
//
// Deprecated: Do not use.
type Properties struct {
Name string // name of the field, for error messages
OrigName string // original name before protocol compiler (always set)
JSONName string // name to use for JSON; determined by protoc
Wire string
// Name is a placeholder name with little meaningful semantic value.
// If the name has an "XXX_" prefix, the entire Properties must be ignored.
Name string
// OrigName is the protobuf field name or oneof name.
OrigName string
// JSONName is the JSON name for the protobuf field.
JSONName string
// Enum is a placeholder name for enums.
// For historical reasons, this is neither the Go name for the enum,
// nor the protobuf name for the enum.
Enum string // Deprecated: Do not use.
// Weak contains the full name of the weakly referenced message.
Weak string
// Wire is a string representation of the wire type.
Wire string
// WireType is the protobuf wire type for the field.
WireType int
Tag int
// Tag is the protobuf field number.
Tag int
// Required reports whether this is a required field.
Required bool
// Optional reports whether this is a optional field.
Optional bool
// Repeated reports whether this is a repeated field.
Repeated bool
Packed bool // relevant for repeated primitives only
Enum string // set for enum types only
proto3 bool // whether this is known to be a proto3 field
oneof bool // whether this is a oneof field
// Packed reports whether this is a packed repeated field of scalars.
Packed bool
// Proto3 reports whether this field operates under the proto3 syntax.
Proto3 bool
// Oneof reports whether this field belongs within a oneof.
Oneof bool
Default string // default value
HasDefault bool // whether an explicit default was provided
// Default is the default value in string form.
Default string
// HasDefault reports whether the field has a default value.
HasDefault bool
stype reflect.Type // set for struct types only
sprop *StructProperties // set for struct types only
// MapKeyProp is the properties for the key field for a map field.
MapKeyProp *Properties
// MapValProp is the properties for the value field for a map field.
MapValProp *Properties
}
mtype reflect.Type // set for map types only
MapKeyProp *Properties // set for map types only
MapValProp *Properties // set for map types only
// OneofProperties represents the type information for a protobuf oneof.
//
// Deprecated: Do not use.
type OneofProperties struct {
// Type is a pointer to the generated wrapper type for the field value.
// This is nil for messages that are not in the open-struct API.
Type reflect.Type
// Field is the index into StructProperties.Prop for the containing oneof.
Field int
// Prop is the properties for the field.
Prop *Properties
}
// String formats the properties in the protobuf struct field tag style.
func (p *Properties) String() string {
s := p.Wire
s += ","
s += strconv.Itoa(p.Tag)
s += "," + strconv.Itoa(p.Tag)
if p.Required {
s += ",req"
}
@ -170,18 +112,21 @@ func (p *Properties) String() string {
s += ",packed"
}
s += ",name=" + p.OrigName
if p.JSONName != p.OrigName {
if p.JSONName != "" {
s += ",json=" + p.JSONName
}
if p.proto3 {
s += ",proto3"
}
if p.oneof {
s += ",oneof"
}
if len(p.Enum) > 0 {
s += ",enum=" + p.Enum
}
if len(p.Weak) > 0 {
s += ",weak=" + p.Weak
}
if p.Proto3 {
s += ",proto3"
}
if p.Oneof {
s += ",oneof"
}
if p.HasDefault {
s += ",def=" + p.Default
}
@ -189,356 +134,173 @@ func (p *Properties) String() string {
}
// Parse populates p by parsing a string in the protobuf struct field tag style.
func (p *Properties) Parse(s string) {
// "bytes,49,opt,name=foo,def=hello!"
fields := strings.Split(s, ",") // breaks def=, but handled below.
if len(fields) < 2 {
log.Printf("proto: tag has too few fields: %q", s)
return
}
p.Wire = fields[0]
switch p.Wire {
case "varint":
p.WireType = WireVarint
case "fixed32":
p.WireType = WireFixed32
case "fixed64":
p.WireType = WireFixed64
case "zigzag32":
p.WireType = WireVarint
case "zigzag64":
p.WireType = WireVarint
case "bytes", "group":
p.WireType = WireBytes
// no numeric converter for non-numeric types
default:
log.Printf("proto: tag has unknown wire type: %q", s)
return
}
var err error
p.Tag, err = strconv.Atoi(fields[1])
if err != nil {
return
}
outer:
for i := 2; i < len(fields); i++ {
f := fields[i]
switch {
case f == "req":
p.Required = true
case f == "opt":
func (p *Properties) Parse(tag string) {
// For example: "bytes,49,opt,name=foo,def=hello!"
for len(tag) > 0 {
i := strings.IndexByte(tag, ',')
if i < 0 {
i = len(tag)
}
switch s := tag[:i]; {
case strings.HasPrefix(s, "name="):
p.OrigName = s[len("name="):]
case strings.HasPrefix(s, "json="):
p.JSONName = s[len("json="):]
case strings.HasPrefix(s, "enum="):
p.Enum = s[len("enum="):]
case strings.HasPrefix(s, "weak="):
p.Weak = s[len("weak="):]
case strings.Trim(s, "0123456789") == "":
n, _ := strconv.ParseUint(s, 10, 32)
p.Tag = int(n)
case s == "opt":
p.Optional = true
case f == "rep":
case s == "req":
p.Required = true
case s == "rep":
p.Repeated = true
case f == "packed":
case s == "varint" || s == "zigzag32" || s == "zigzag64":
p.Wire = s
p.WireType = WireVarint
case s == "fixed32":
p.Wire = s
p.WireType = WireFixed32
case s == "fixed64":
p.Wire = s
p.WireType = WireFixed64
case s == "bytes":
p.Wire = s
p.WireType = WireBytes
case s == "group":
p.Wire = s
p.WireType = WireStartGroup
case s == "packed":
p.Packed = true
case strings.HasPrefix(f, "name="):
p.OrigName = f[5:]
case strings.HasPrefix(f, "json="):
p.JSONName = f[5:]
case strings.HasPrefix(f, "enum="):
p.Enum = f[5:]
case f == "proto3":
p.proto3 = true
case f == "oneof":
p.oneof = true
case strings.HasPrefix(f, "def="):
case s == "proto3":
p.Proto3 = true
case s == "oneof":
p.Oneof = true
case strings.HasPrefix(s, "def="):
// The default tag is special in that everything afterwards is the
// default regardless of the presence of commas.
p.HasDefault = true
p.Default = f[4:] // rest of string
if i+1 < len(fields) {
// Commas aren't escaped, and def is always last.
p.Default += "," + strings.Join(fields[i+1:], ",")
break outer
}
p.Default, i = tag[len("def="):], len(tag)
}
tag = strings.TrimPrefix(tag[i:], ",")
}
}
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
// setFieldProps initializes the field properties for submessages and maps.
func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
switch t1 := typ; t1.Kind() {
case reflect.Ptr:
if t1.Elem().Kind() == reflect.Struct {
p.stype = t1.Elem()
}
case reflect.Slice:
if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
p.stype = t2.Elem()
}
case reflect.Map:
p.mtype = t1
p.MapKeyProp = &Properties{}
p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
p.MapValProp = &Properties{}
vtype := p.mtype.Elem()
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
// The value type is not a message (*T) or bytes ([]byte),
// so we need encoders for the pointer to this type.
vtype = reflect.PtrTo(vtype)
}
p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
}
if p.stype != nil {
if lockGetProp {
p.sprop = GetProperties(p.stype)
} else {
p.sprop = getPropertiesLocked(p.stype)
}
}
}
var (
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
)
// Init populates the properties from a protocol buffer struct tag.
//
// Deprecated: Do not use.
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
p.init(typ, name, tag, f, true)
}
func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
// "bytes,49,opt,def=hello!"
p.Name = name
p.OrigName = name
if tag == "" {
return
}
p.Parse(tag)
p.setFieldProps(typ, f, lockGetProp)
if typ != nil && typ.Kind() == reflect.Map {
p.MapKeyProp = new(Properties)
p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil)
p.MapValProp = new(Properties)
p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil)
}
}
var (
propertiesMu sync.RWMutex
propertiesMap = make(map[reflect.Type]*StructProperties)
)
var propertiesCache sync.Map // map[reflect.Type]*StructProperties
// GetProperties returns the list of properties for the type represented by t.
// t must represent a generated struct type of a protocol message.
// GetProperties returns the list of properties for the type represented by t,
// which must be a generated protocol buffer message in the open-struct API,
// where protobuf message fields are represented by exported Go struct fields.
//
// Deprecated: Use protobuf reflection instead.
func GetProperties(t reflect.Type) *StructProperties {
if t.Kind() != reflect.Struct {
panic("proto: type must have kind struct")
if p, ok := propertiesCache.Load(t); ok {
return p.(*StructProperties)
}
// Most calls to GetProperties in a long-running program will be
// retrieving details for types we have seen before.
propertiesMu.RLock()
sprop, ok := propertiesMap[t]
propertiesMu.RUnlock()
if ok {
return sprop
}
propertiesMu.Lock()
sprop = getPropertiesLocked(t)
propertiesMu.Unlock()
return sprop
p, _ := propertiesCache.LoadOrStore(t, newProperties(t))
return p.(*StructProperties)
}
type (
oneofFuncsIface interface {
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
}
oneofWrappersIface interface {
XXX_OneofWrappers() []interface{}
}
)
// getPropertiesLocked requires that propertiesMu is held.
func getPropertiesLocked(t reflect.Type) *StructProperties {
if prop, ok := propertiesMap[t]; ok {
return prop
func newProperties(t reflect.Type) *StructProperties {
if t.Kind() != reflect.Struct {
panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
}
var hasOneof bool
prop := new(StructProperties)
// in case of recursive protos, fill this in now.
propertiesMap[t] = prop
// build properties
prop.Prop = make([]*Properties, t.NumField())
prop.order = make([]int, t.NumField())
// Construct a list of properties for each field in the struct.
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
p := new(Properties)
name := f.Name
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
f := t.Field(i)
tagField := f.Tag.Get("protobuf")
p.Init(f.Type, f.Name, tagField, &f)
oneof := f.Tag.Get("protobuf_oneof") // special case
if oneof != "" {
// Oneof fields don't use the traditional protobuf tag.
p.OrigName = oneof
tagOneof := f.Tag.Get("protobuf_oneof")
if tagOneof != "" {
hasOneof = true
p.OrigName = tagOneof
}
prop.Prop[i] = p
prop.order[i] = i
if debug {
print(i, " ", f.Name, " ", t.String(), " ")
if p.Tag > 0 {
print(p.String())
// Rename unrelated struct fields with the "XXX_" prefix since so much
// user code simply checks for this to exclude special fields.
if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") {
p.Name = "XXX_" + p.Name
p.OrigName = "XXX_" + p.OrigName
} else if p.Weak != "" {
p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field
}
prop.Prop = append(prop.Prop, p)
}
// Construct a mapping of oneof field names to properties.
if hasOneof {
var oneofWrappers []interface{}
if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{})
}
if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{})
}
if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok {
if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok {
oneofWrappers = m.ProtoMessageInfo().OneofWrappers
}
print("\n")
}
}
// Re-order prop.order.
sort.Sort(prop)
var oots []interface{}
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
case oneofFuncsIface:
_, _, _, oots = m.XXX_OneofFuncs()
case oneofWrappersIface:
oots = m.XXX_OneofWrappers()
}
if len(oots) > 0 {
// Interpret oneof metadata.
prop.OneofTypes = make(map[string]*OneofProperties)
for _, oot := range oots {
oop := &OneofProperties{
Type: reflect.ValueOf(oot).Type(), // *T
for _, wrapper := range oneofWrappers {
p := &OneofProperties{
Type: reflect.ValueOf(wrapper).Type(), // *T
Prop: new(Properties),
}
sft := oop.Type.Elem().Field(0)
oop.Prop.Name = sft.Name
oop.Prop.Parse(sft.Tag.Get("protobuf"))
// There will be exactly one interface field that
// this new value is assignable to.
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if f.Type.Kind() != reflect.Interface {
continue
}
if !oop.Type.AssignableTo(f.Type) {
continue
}
oop.Field = i
break
}
prop.OneofTypes[oop.Prop.OrigName] = oop
}
}
f := p.Type.Elem().Field(0)
p.Prop.Name = f.Name
p.Prop.Parse(f.Tag.Get("protobuf"))
// build required counts
// build tags
reqCount := 0
prop.decoderOrigNames = make(map[string]int)
for i, p := range prop.Prop {
if strings.HasPrefix(p.Name, "XXX_") {
// Internal fields should not appear in tags/origNames maps.
// They are handled specially when encoding and decoding.
continue
// Determine the struct field that contains this oneof.
// Each wrapper is assignable to exactly one parent field.
var foundOneof bool
for i := 0; i < t.NumField() && !foundOneof; i++ {
if p.Type.AssignableTo(t.Field(i).Type) {
p.Field = i
foundOneof = true
}
}
if !foundOneof {
panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
}
prop.OneofTypes[p.Prop.OrigName] = p
}
if p.Required {
reqCount++
}
prop.decoderTags.put(p.Tag, i)
prop.decoderOrigNames[p.OrigName] = i
}
prop.reqCount = reqCount
return prop
}
// A global registry of enum types.
// The generated code will register the generated maps by calling RegisterEnum.
var enumValueMaps = make(map[string]map[string]int32)
// RegisterEnum is called from the generated code to install the enum descriptor
// maps into the global table to aid parsing text format protocol buffers.
func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
if _, ok := enumValueMaps[typeName]; ok {
panic("proto: duplicate enum registered: " + typeName)
}
enumValueMaps[typeName] = valueMap
}
// EnumValueMap returns the mapping from names to integers of the
// enum type enumType, or a nil if not found.
func EnumValueMap(enumType string) map[string]int32 {
return enumValueMaps[enumType]
}
// A registry of all linked message types.
// The string is a fully-qualified proto name ("pkg.Message").
var (
protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
revProtoTypes = make(map[reflect.Type]string)
)
// RegisterType is called from generated code and maps from the fully qualified
// proto name to the type (pointer to struct) of the protocol buffer.
func RegisterType(x Message, name string) {
if _, ok := protoTypedNils[name]; ok {
// TODO: Some day, make this a panic.
log.Printf("proto: duplicate proto type registered: %s", name)
return
}
t := reflect.TypeOf(x)
if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
// Generated code always calls RegisterType with nil x.
// This check is just for extra safety.
protoTypedNils[name] = x
} else {
protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
}
revProtoTypes[t] = name
}
// RegisterMapType is called from generated code and maps from the fully qualified
// proto name to the native map type of the proto map definition.
func RegisterMapType(x interface{}, name string) {
if reflect.TypeOf(x).Kind() != reflect.Map {
panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
}
if _, ok := protoMapTypes[name]; ok {
log.Printf("proto: duplicate proto type registered: %s", name)
return
}
t := reflect.TypeOf(x)
protoMapTypes[name] = t
revProtoTypes[t] = name
}
// MessageName returns the fully-qualified proto name for the given message type.
func MessageName(x Message) string {
type xname interface {
XXX_MessageName() string
}
if m, ok := x.(xname); ok {
return m.XXX_MessageName()
}
return revProtoTypes[reflect.TypeOf(x)]
}
// MessageType returns the message type (pointer to struct) for a named message.
// The type is not guaranteed to implement proto.Message if the name refers to a
// map entry.
func MessageType(name string) reflect.Type {
if t, ok := protoTypedNils[name]; ok {
return reflect.TypeOf(t)
}
return protoMapTypes[name]
}
// A registry of all linked proto files.
var (
protoFiles = make(map[string][]byte) // file name => fileDescriptor
)
// RegisterFile is called from generated code and maps from the
// full file name of a .proto file to its compressed FileDescriptorProto.
func RegisterFile(filename string, fileDescriptor []byte) {
protoFiles[filename] = fileDescriptor
}
// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
func FileDescriptor(filename string) []byte { return protoFiles[filename] }
func (sp *StructProperties) Len() int { return len(sp.Prop) }
func (sp *StructProperties) Less(i, j int) bool { return false }
func (sp *StructProperties) Swap(i, j int) { return }

167
vendor/github.com/golang/protobuf/proto/proto.go generated vendored Normal file
View file

@ -0,0 +1,167 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package proto provides functionality for handling protocol buffer messages.
// In particular, it provides marshaling and unmarshaling between a protobuf
// message and the binary wire format.
//
// See https://developers.google.com/protocol-buffers/docs/gotutorial for
// more information.
//
// Deprecated: Use the "google.golang.org/protobuf/proto" package instead.
package proto
import (
protoV2 "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/runtime/protoiface"
"google.golang.org/protobuf/runtime/protoimpl"
)
const (
ProtoPackageIsVersion1 = true
ProtoPackageIsVersion2 = true
ProtoPackageIsVersion3 = true
ProtoPackageIsVersion4 = true
)
// GeneratedEnum is any enum type generated by protoc-gen-go
// which is a named int32 kind.
// This type exists for documentation purposes.
type GeneratedEnum interface{}
// GeneratedMessage is any message type generated by protoc-gen-go
// which is a pointer to a named struct kind.
// This type exists for documentation purposes.
type GeneratedMessage interface{}
// Message is a protocol buffer message.
//
// This is the v1 version of the message interface and is marginally better
// than an empty interface as it lacks any method to programatically interact
// with the contents of the message.
//
// A v2 message is declared in "google.golang.org/protobuf/proto".Message and
// exposes protobuf reflection as a first-class feature of the interface.
//
// To convert a v1 message to a v2 message, use the MessageV2 function.
// To convert a v2 message to a v1 message, use the MessageV1 function.
type Message = protoiface.MessageV1
// MessageV1 converts either a v1 or v2 message to a v1 message.
// It returns nil if m is nil.
func MessageV1(m GeneratedMessage) protoiface.MessageV1 {
return protoimpl.X.ProtoMessageV1Of(m)
}
// MessageV2 converts either a v1 or v2 message to a v2 message.
// It returns nil if m is nil.
func MessageV2(m GeneratedMessage) protoV2.Message {
return protoimpl.X.ProtoMessageV2Of(m)
}
// MessageReflect returns a reflective view for a message.
// It returns nil if m is nil.
func MessageReflect(m Message) protoreflect.Message {
return protoimpl.X.MessageOf(m)
}
// Marshaler is implemented by messages that can marshal themselves.
// This interface is used by the following functions: Size, Marshal,
// Buffer.Marshal, and Buffer.EncodeMessage.
//
// Deprecated: Do not implement.
type Marshaler interface {
// Marshal formats the encoded bytes of the message.
// It should be deterministic and emit valid protobuf wire data.
// The caller takes ownership of the returned buffer.
Marshal() ([]byte, error)
}
// Unmarshaler is implemented by messages that can unmarshal themselves.
// This interface is used by the following functions: Unmarshal, UnmarshalMerge,
// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup.
//
// Deprecated: Do not implement.
type Unmarshaler interface {
// Unmarshal parses the encoded bytes of the protobuf wire input.
// The provided buffer is only valid for during method call.
// It should not reset the receiver message.
Unmarshal([]byte) error
}
// Merger is implemented by messages that can merge themselves.
// This interface is used by the following functions: Clone and Merge.
//
// Deprecated: Do not implement.
type Merger interface {
// Merge merges the contents of src into the receiver message.
// It clones all data structures in src such that it aliases no mutable
// memory referenced by src.
Merge(src Message)
}
// RequiredNotSetError is an error type returned when
// marshaling or unmarshaling a message with missing required fields.
type RequiredNotSetError struct {
err error
}
func (e *RequiredNotSetError) Error() string {
if e.err != nil {
return e.err.Error()
}
return "proto: required field not set"
}
func (e *RequiredNotSetError) RequiredNotSet() bool {
return true
}
func checkRequiredNotSet(m protoV2.Message) error {
if err := protoV2.CheckInitialized(m); err != nil {
return &RequiredNotSetError{err: err}
}
return nil
}
// Clone returns a deep copy of src.
func Clone(src Message) Message {
return MessageV1(protoV2.Clone(MessageV2(src)))
}
// Merge merges src into dst, which must be messages of the same type.
//
// Populated scalar fields in src are copied to dst, while populated
// singular messages in src are merged into dst by recursively calling Merge.
// The elements of every list field in src is appended to the corresponded
// list fields in dst. The entries of every map field in src is copied into
// the corresponding map field in dst, possibly replacing existing entries.
// The unknown fields of src are appended to the unknown fields of dst.
func Merge(dst, src Message) {
protoV2.Merge(MessageV2(dst), MessageV2(src))
}
// Equal reports whether two messages are equal.
// If two messages marshal to the same bytes under deterministic serialization,
// then Equal is guaranteed to report true.
//
// Two messages are equal if they are the same protobuf message type,
// have the same set of populated known and extension field values,
// and the same set of unknown fields values.
//
// Scalar values are compared with the equivalent of the == operator in Go,
// except bytes values which are compared using bytes.Equal and
// floating point values which specially treat NaNs as equal.
// Message values are compared by recursively calling Equal.
// Lists are equal if each element value is also equal.
// Maps are equal if they have the same set of keys, where the pair of values
// for each key is also equal.
func Equal(x, y Message) bool {
return protoV2.Equal(MessageV2(x), MessageV2(y))
}
func isMessageSet(md protoreflect.MessageDescriptor) bool {
ms, ok := md.(interface{ IsMessageSet() bool })
return ok && ms.IsMessageSet()
}

323
vendor/github.com/golang/protobuf/proto/registry.go generated vendored Normal file
View file

@ -0,0 +1,323 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"bytes"
"compress/gzip"
"fmt"
"io/ioutil"
"reflect"
"strings"
"sync"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/runtime/protoimpl"
)
// filePath is the path to the proto source file.
type filePath = string // e.g., "google/protobuf/descriptor.proto"
// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto.
type fileDescGZIP = []byte
var fileCache sync.Map // map[filePath]fileDescGZIP
// RegisterFile is called from generated code to register the compressed
// FileDescriptorProto with the file path for a proto source file.
//
// Deprecated: Use protoregistry.GlobalFiles.Register instead.
func RegisterFile(s filePath, d fileDescGZIP) {
// Decompress the descriptor.
zr, err := gzip.NewReader(bytes.NewReader(d))
if err != nil {
panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
}
b, err := ioutil.ReadAll(zr)
if err != nil {
panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
}
// Construct a protoreflect.FileDescriptor from the raw descriptor.
// Note that DescBuilder.Build automatically registers the constructed
// file descriptor with the v2 registry.
protoimpl.DescBuilder{RawDescriptor: b}.Build()
// Locally cache the raw descriptor form for the file.
fileCache.Store(s, d)
}
// FileDescriptor returns the compressed FileDescriptorProto given the file path
// for a proto source file. It returns nil if not found.
//
// Deprecated: Use protoregistry.GlobalFiles.RangeFilesByPath instead.
func FileDescriptor(s filePath) fileDescGZIP {
if v, ok := fileCache.Load(s); ok {
return v.(fileDescGZIP)
}
// Find the descriptor in the v2 registry.
var b []byte
if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok {
b = fd.ProtoLegacyRawDesc()
} else {
// TODO: Use protodesc.ToFileDescriptorProto to construct
// a descriptorpb.FileDescriptorProto and marshal it.
// However, doing so causes the proto package to have a dependency
// on descriptorpb, leading to cyclic dependency issues.
}
}
// Locally cache the raw descriptor form for the file.
if len(b) > 0 {
v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b))
return v.(fileDescGZIP)
}
return nil
}
// enumName is the name of an enum. For historical reasons, the enum name is
// neither the full Go name nor the full protobuf name of the enum.
// The name is the dot-separated combination of just the proto package that the
// enum is declared within followed by the Go type name of the generated enum.
type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum"
// enumsByName maps enum values by name to their numeric counterpart.
type enumsByName = map[string]int32
// enumsByNumber maps enum values by number to their name counterpart.
type enumsByNumber = map[int32]string
var enumCache sync.Map // map[enumName]enumsByName
var numFilesCache sync.Map // map[protoreflect.FullName]int
// RegisterEnum is called from the generated code to register the mapping of
// enum value names to enum numbers for the enum identified by s.
//
// Deprecated: Use protoregistry.GlobalTypes.Register instead.
func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) {
if _, ok := enumCache.Load(s); ok {
panic("proto: duplicate enum registered: " + s)
}
enumCache.Store(s, m)
// This does not forward registration to the v2 registry since this API
// lacks sufficient information to construct a complete v2 enum descriptor.
}
// EnumValueMap returns the mapping from enum value names to enum numbers for
// the enum of the given name. It returns nil if not found.
//
// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead.
func EnumValueMap(s enumName) enumsByName {
if v, ok := enumCache.Load(s); ok {
return v.(enumsByName)
}
// Check whether the cache is stale. If the number of files in the current
// package differs, then it means that some enums may have been recently
// registered upstream that we do not know about.
var protoPkg protoreflect.FullName
if i := strings.LastIndexByte(s, '.'); i >= 0 {
protoPkg = protoreflect.FullName(s[:i])
}
v, _ := numFilesCache.Load(protoPkg)
numFiles, _ := v.(int)
if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles {
return nil // cache is up-to-date; was not found earlier
}
// Update the enum cache for all enums declared in the given proto package.
numFiles = 0
protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool {
walkEnums(fd, func(ed protoreflect.EnumDescriptor) {
name := protoimpl.X.LegacyEnumName(ed)
if _, ok := enumCache.Load(name); !ok {
m := make(enumsByName)
evs := ed.Values()
for i := evs.Len() - 1; i >= 0; i-- {
ev := evs.Get(i)
m[string(ev.Name())] = int32(ev.Number())
}
enumCache.LoadOrStore(name, m)
}
})
numFiles++
return true
})
numFilesCache.Store(protoPkg, numFiles)
// Check cache again for enum map.
if v, ok := enumCache.Load(s); ok {
return v.(enumsByName)
}
return nil
}
// walkEnums recursively walks all enums declared in d.
func walkEnums(d interface {
Enums() protoreflect.EnumDescriptors
Messages() protoreflect.MessageDescriptors
}, f func(protoreflect.EnumDescriptor)) {
eds := d.Enums()
for i := eds.Len() - 1; i >= 0; i-- {
f(eds.Get(i))
}
mds := d.Messages()
for i := mds.Len() - 1; i >= 0; i-- {
walkEnums(mds.Get(i), f)
}
}
// messageName is the full name of protobuf message.
type messageName = string
var messageTypeCache sync.Map // map[messageName]reflect.Type
// RegisterType is called from generated code to register the message Go type
// for a message of the given name.
//
// Deprecated: Use protoregistry.GlobalTypes.Register instead.
func RegisterType(m Message, s messageName) {
mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s))
if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil {
panic(err)
}
messageTypeCache.Store(s, reflect.TypeOf(m))
}
// RegisterMapType is called from generated code to register the Go map type
// for a protobuf message representing a map entry.
//
// Deprecated: Do not use.
func RegisterMapType(m interface{}, s messageName) {
t := reflect.TypeOf(m)
if t.Kind() != reflect.Map {
panic(fmt.Sprintf("invalid map kind: %v", t))
}
if _, ok := messageTypeCache.Load(s); ok {
panic(fmt.Errorf("proto: duplicate proto message registered: %s", s))
}
messageTypeCache.Store(s, t)
}
// MessageType returns the message type for a named message.
// It returns nil if not found.
//
// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead.
func MessageType(s messageName) reflect.Type {
if v, ok := messageTypeCache.Load(s); ok {
return v.(reflect.Type)
}
// Derive the message type from the v2 registry.
var t reflect.Type
if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil {
t = messageGoType(mt)
}
// If we could not get a concrete type, it is possible that it is a
// pseudo-message for a map entry.
if t == nil {
d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s))
if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() {
kt := goTypeForField(md.Fields().ByNumber(1))
vt := goTypeForField(md.Fields().ByNumber(2))
t = reflect.MapOf(kt, vt)
}
}
// Locally cache the message type for the given name.
if t != nil {
v, _ := messageTypeCache.LoadOrStore(s, t)
return v.(reflect.Type)
}
return nil
}
func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type {
switch k := fd.Kind(); k {
case protoreflect.EnumKind:
if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil {
return enumGoType(et)
}
return reflect.TypeOf(protoreflect.EnumNumber(0))
case protoreflect.MessageKind, protoreflect.GroupKind:
if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil {
return messageGoType(mt)
}
return reflect.TypeOf((*protoreflect.Message)(nil)).Elem()
default:
return reflect.TypeOf(fd.Default().Interface())
}
}
func enumGoType(et protoreflect.EnumType) reflect.Type {
return reflect.TypeOf(et.New(0))
}
func messageGoType(mt protoreflect.MessageType) reflect.Type {
return reflect.TypeOf(MessageV1(mt.Zero().Interface()))
}
// MessageName returns the full protobuf name for the given message type.
//
// Deprecated: Use protoreflect.MessageDescriptor.FullName instead.
func MessageName(m Message) messageName {
if m == nil {
return ""
}
if m, ok := m.(interface{ XXX_MessageName() messageName }); ok {
return m.XXX_MessageName()
}
return messageName(protoimpl.X.MessageDescriptorOf(m).FullName())
}
// RegisterExtension is called from the generated code to register
// the extension descriptor.
//
// Deprecated: Use protoregistry.GlobalTypes.Register instead.
func RegisterExtension(d *ExtensionDesc) {
if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil {
panic(err)
}
}
type extensionsByNumber = map[int32]*ExtensionDesc
var extensionCache sync.Map // map[messageName]extensionsByNumber
// RegisteredExtensions returns a map of the registered extensions for the
// provided protobuf message, indexed by the extension field number.
//
// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead.
func RegisteredExtensions(m Message) extensionsByNumber {
// Check whether the cache is stale. If the number of extensions for
// the given message differs, then it means that some extensions were
// recently registered upstream that we do not know about.
s := MessageName(m)
v, _ := extensionCache.Load(s)
xs, _ := v.(extensionsByNumber)
if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) {
return xs // cache is up-to-date
}
// Cache is stale, re-compute the extensions map.
xs = make(extensionsByNumber)
protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool {
if xd, ok := xt.(*ExtensionDesc); ok {
xs[int32(xt.TypeDescriptor().Number())] = xd
} else {
// TODO: This implies that the protoreflect.ExtensionType is a
// custom type not generated by protoc-gen-go. We could try and
// convert the type to an ExtensionDesc.
}
return true
})
extensionCache.Store(s, xs)
return xs
}

File diff suppressed because it is too large Load diff

View file

@ -1,654 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
import (
"fmt"
"reflect"
"strings"
"sync"
"sync/atomic"
)
// Merge merges the src message into dst.
// This assumes that dst and src of the same type and are non-nil.
func (a *InternalMessageInfo) Merge(dst, src Message) {
mi := atomicLoadMergeInfo(&a.merge)
if mi == nil {
mi = getMergeInfo(reflect.TypeOf(dst).Elem())
atomicStoreMergeInfo(&a.merge, mi)
}
mi.merge(toPointer(&dst), toPointer(&src))
}
type mergeInfo struct {
typ reflect.Type
initialized int32 // 0: only typ is valid, 1: everything is valid
lock sync.Mutex
fields []mergeFieldInfo
unrecognized field // Offset of XXX_unrecognized
}
type mergeFieldInfo struct {
field field // Offset of field, guaranteed to be valid
// isPointer reports whether the value in the field is a pointer.
// This is true for the following situations:
// * Pointer to struct
// * Pointer to basic type (proto2 only)
// * Slice (first value in slice header is a pointer)
// * String (first value in string header is a pointer)
isPointer bool
// basicWidth reports the width of the field assuming that it is directly
// embedded in the struct (as is the case for basic types in proto3).
// The possible values are:
// 0: invalid
// 1: bool
// 4: int32, uint32, float32
// 8: int64, uint64, float64
basicWidth int
// Where dst and src are pointers to the types being merged.
merge func(dst, src pointer)
}
var (
mergeInfoMap = map[reflect.Type]*mergeInfo{}
mergeInfoLock sync.Mutex
)
func getMergeInfo(t reflect.Type) *mergeInfo {
mergeInfoLock.Lock()
defer mergeInfoLock.Unlock()
mi := mergeInfoMap[t]
if mi == nil {
mi = &mergeInfo{typ: t}
mergeInfoMap[t] = mi
}
return mi
}
// merge merges src into dst assuming they are both of type *mi.typ.
func (mi *mergeInfo) merge(dst, src pointer) {
if dst.isNil() {
panic("proto: nil destination")
}
if src.isNil() {
return // Nothing to do.
}
if atomic.LoadInt32(&mi.initialized) == 0 {
mi.computeMergeInfo()
}
for _, fi := range mi.fields {
sfp := src.offset(fi.field)
// As an optimization, we can avoid the merge function call cost
// if we know for sure that the source will have no effect
// by checking if it is the zero value.
if unsafeAllowed {
if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
continue
}
if fi.basicWidth > 0 {
switch {
case fi.basicWidth == 1 && !*sfp.toBool():
continue
case fi.basicWidth == 4 && *sfp.toUint32() == 0:
continue
case fi.basicWidth == 8 && *sfp.toUint64() == 0:
continue
}
}
}
dfp := dst.offset(fi.field)
fi.merge(dfp, sfp)
}
// TODO: Make this faster?
out := dst.asPointerTo(mi.typ).Elem()
in := src.asPointerTo(mi.typ).Elem()
if emIn, err := extendable(in.Addr().Interface()); err == nil {
emOut, _ := extendable(out.Addr().Interface())
mIn, muIn := emIn.extensionsRead()
if mIn != nil {
mOut := emOut.extensionsWrite()
muIn.Lock()
mergeExtension(mOut, mIn)
muIn.Unlock()
}
}
if mi.unrecognized.IsValid() {
if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
*dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
}
}
}
func (mi *mergeInfo) computeMergeInfo() {
mi.lock.Lock()
defer mi.lock.Unlock()
if mi.initialized != 0 {
return
}
t := mi.typ
n := t.NumField()
props := GetProperties(t)
for i := 0; i < n; i++ {
f := t.Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
mfi := mergeFieldInfo{field: toField(&f)}
tf := f.Type
// As an optimization, we can avoid the merge function call cost
// if we know for sure that the source will have no effect
// by checking if it is the zero value.
if unsafeAllowed {
switch tf.Kind() {
case reflect.Ptr, reflect.Slice, reflect.String:
// As a special case, we assume slices and strings are pointers
// since we know that the first field in the SliceSlice or
// StringHeader is a data pointer.
mfi.isPointer = true
case reflect.Bool:
mfi.basicWidth = 1
case reflect.Int32, reflect.Uint32, reflect.Float32:
mfi.basicWidth = 4
case reflect.Int64, reflect.Uint64, reflect.Float64:
mfi.basicWidth = 8
}
}
// Unwrap tf to get at its most basic type.
var isPointer, isSlice bool
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
isSlice = true
tf = tf.Elem()
}
if tf.Kind() == reflect.Ptr {
isPointer = true
tf = tf.Elem()
}
if isPointer && isSlice && tf.Kind() != reflect.Struct {
panic("both pointer and slice for basic type in " + tf.Name())
}
switch tf.Kind() {
case reflect.Int32:
switch {
case isSlice: // E.g., []int32
mfi.merge = func(dst, src pointer) {
// NOTE: toInt32Slice is not defined (see pointer_reflect.go).
/*
sfsp := src.toInt32Slice()
if *sfsp != nil {
dfsp := dst.toInt32Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []int64{}
}
}
*/
sfs := src.getInt32Slice()
if sfs != nil {
dfs := dst.getInt32Slice()
dfs = append(dfs, sfs...)
if dfs == nil {
dfs = []int32{}
}
dst.setInt32Slice(dfs)
}
}
case isPointer: // E.g., *int32
mfi.merge = func(dst, src pointer) {
// NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
/*
sfpp := src.toInt32Ptr()
if *sfpp != nil {
dfpp := dst.toInt32Ptr()
if *dfpp == nil {
*dfpp = Int32(**sfpp)
} else {
**dfpp = **sfpp
}
}
*/
sfp := src.getInt32Ptr()
if sfp != nil {
dfp := dst.getInt32Ptr()
if dfp == nil {
dst.setInt32Ptr(*sfp)
} else {
*dfp = *sfp
}
}
}
default: // E.g., int32
mfi.merge = func(dst, src pointer) {
if v := *src.toInt32(); v != 0 {
*dst.toInt32() = v
}
}
}
case reflect.Int64:
switch {
case isSlice: // E.g., []int64
mfi.merge = func(dst, src pointer) {
sfsp := src.toInt64Slice()
if *sfsp != nil {
dfsp := dst.toInt64Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []int64{}
}
}
}
case isPointer: // E.g., *int64
mfi.merge = func(dst, src pointer) {
sfpp := src.toInt64Ptr()
if *sfpp != nil {
dfpp := dst.toInt64Ptr()
if *dfpp == nil {
*dfpp = Int64(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., int64
mfi.merge = func(dst, src pointer) {
if v := *src.toInt64(); v != 0 {
*dst.toInt64() = v
}
}
}
case reflect.Uint32:
switch {
case isSlice: // E.g., []uint32
mfi.merge = func(dst, src pointer) {
sfsp := src.toUint32Slice()
if *sfsp != nil {
dfsp := dst.toUint32Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []uint32{}
}
}
}
case isPointer: // E.g., *uint32
mfi.merge = func(dst, src pointer) {
sfpp := src.toUint32Ptr()
if *sfpp != nil {
dfpp := dst.toUint32Ptr()
if *dfpp == nil {
*dfpp = Uint32(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., uint32
mfi.merge = func(dst, src pointer) {
if v := *src.toUint32(); v != 0 {
*dst.toUint32() = v
}
}
}
case reflect.Uint64:
switch {
case isSlice: // E.g., []uint64
mfi.merge = func(dst, src pointer) {
sfsp := src.toUint64Slice()
if *sfsp != nil {
dfsp := dst.toUint64Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []uint64{}
}
}
}
case isPointer: // E.g., *uint64
mfi.merge = func(dst, src pointer) {
sfpp := src.toUint64Ptr()
if *sfpp != nil {
dfpp := dst.toUint64Ptr()
if *dfpp == nil {
*dfpp = Uint64(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., uint64
mfi.merge = func(dst, src pointer) {
if v := *src.toUint64(); v != 0 {
*dst.toUint64() = v
}
}
}
case reflect.Float32:
switch {
case isSlice: // E.g., []float32
mfi.merge = func(dst, src pointer) {
sfsp := src.toFloat32Slice()
if *sfsp != nil {
dfsp := dst.toFloat32Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []float32{}
}
}
}
case isPointer: // E.g., *float32
mfi.merge = func(dst, src pointer) {
sfpp := src.toFloat32Ptr()
if *sfpp != nil {
dfpp := dst.toFloat32Ptr()
if *dfpp == nil {
*dfpp = Float32(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., float32
mfi.merge = func(dst, src pointer) {
if v := *src.toFloat32(); v != 0 {
*dst.toFloat32() = v
}
}
}
case reflect.Float64:
switch {
case isSlice: // E.g., []float64
mfi.merge = func(dst, src pointer) {
sfsp := src.toFloat64Slice()
if *sfsp != nil {
dfsp := dst.toFloat64Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []float64{}
}
}
}
case isPointer: // E.g., *float64
mfi.merge = func(dst, src pointer) {
sfpp := src.toFloat64Ptr()
if *sfpp != nil {
dfpp := dst.toFloat64Ptr()
if *dfpp == nil {
*dfpp = Float64(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., float64
mfi.merge = func(dst, src pointer) {
if v := *src.toFloat64(); v != 0 {
*dst.toFloat64() = v
}
}
}
case reflect.Bool:
switch {
case isSlice: // E.g., []bool
mfi.merge = func(dst, src pointer) {
sfsp := src.toBoolSlice()
if *sfsp != nil {
dfsp := dst.toBoolSlice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []bool{}
}
}
}
case isPointer: // E.g., *bool
mfi.merge = func(dst, src pointer) {
sfpp := src.toBoolPtr()
if *sfpp != nil {
dfpp := dst.toBoolPtr()
if *dfpp == nil {
*dfpp = Bool(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., bool
mfi.merge = func(dst, src pointer) {
if v := *src.toBool(); v {
*dst.toBool() = v
}
}
}
case reflect.String:
switch {
case isSlice: // E.g., []string
mfi.merge = func(dst, src pointer) {
sfsp := src.toStringSlice()
if *sfsp != nil {
dfsp := dst.toStringSlice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []string{}
}
}
}
case isPointer: // E.g., *string
mfi.merge = func(dst, src pointer) {
sfpp := src.toStringPtr()
if *sfpp != nil {
dfpp := dst.toStringPtr()
if *dfpp == nil {
*dfpp = String(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., string
mfi.merge = func(dst, src pointer) {
if v := *src.toString(); v != "" {
*dst.toString() = v
}
}
}
case reflect.Slice:
isProto3 := props.Prop[i].proto3
switch {
case isPointer:
panic("bad pointer in byte slice case in " + tf.Name())
case tf.Elem().Kind() != reflect.Uint8:
panic("bad element kind in byte slice case in " + tf.Name())
case isSlice: // E.g., [][]byte
mfi.merge = func(dst, src pointer) {
sbsp := src.toBytesSlice()
if *sbsp != nil {
dbsp := dst.toBytesSlice()
for _, sb := range *sbsp {
if sb == nil {
*dbsp = append(*dbsp, nil)
} else {
*dbsp = append(*dbsp, append([]byte{}, sb...))
}
}
if *dbsp == nil {
*dbsp = [][]byte{}
}
}
}
default: // E.g., []byte
mfi.merge = func(dst, src pointer) {
sbp := src.toBytes()
if *sbp != nil {
dbp := dst.toBytes()
if !isProto3 || len(*sbp) > 0 {
*dbp = append([]byte{}, *sbp...)
}
}
}
}
case reflect.Struct:
switch {
case !isPointer:
panic(fmt.Sprintf("message field %s without pointer", tf))
case isSlice: // E.g., []*pb.T
mi := getMergeInfo(tf)
mfi.merge = func(dst, src pointer) {
sps := src.getPointerSlice()
if sps != nil {
dps := dst.getPointerSlice()
for _, sp := range sps {
var dp pointer
if !sp.isNil() {
dp = valToPointer(reflect.New(tf))
mi.merge(dp, sp)
}
dps = append(dps, dp)
}
if dps == nil {
dps = []pointer{}
}
dst.setPointerSlice(dps)
}
}
default: // E.g., *pb.T
mi := getMergeInfo(tf)
mfi.merge = func(dst, src pointer) {
sp := src.getPointer()
if !sp.isNil() {
dp := dst.getPointer()
if dp.isNil() {
dp = valToPointer(reflect.New(tf))
dst.setPointer(dp)
}
mi.merge(dp, sp)
}
}
}
case reflect.Map:
switch {
case isPointer || isSlice:
panic("bad pointer or slice in map case in " + tf.Name())
default: // E.g., map[K]V
mfi.merge = func(dst, src pointer) {
sm := src.asPointerTo(tf).Elem()
if sm.Len() == 0 {
return
}
dm := dst.asPointerTo(tf).Elem()
if dm.IsNil() {
dm.Set(reflect.MakeMap(tf))
}
switch tf.Elem().Kind() {
case reflect.Ptr: // Proto struct (e.g., *T)
for _, key := range sm.MapKeys() {
val := sm.MapIndex(key)
val = reflect.ValueOf(Clone(val.Interface().(Message)))
dm.SetMapIndex(key, val)
}
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
for _, key := range sm.MapKeys() {
val := sm.MapIndex(key)
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
dm.SetMapIndex(key, val)
}
default: // Basic type (e.g., string)
for _, key := range sm.MapKeys() {
val := sm.MapIndex(key)
dm.SetMapIndex(key, val)
}
}
}
}
case reflect.Interface:
// Must be oneof field.
switch {
case isPointer || isSlice:
panic("bad pointer or slice in interface case in " + tf.Name())
default: // E.g., interface{}
// TODO: Make this faster?
mfi.merge = func(dst, src pointer) {
su := src.asPointerTo(tf).Elem()
if !su.IsNil() {
du := dst.asPointerTo(tf).Elem()
typ := su.Elem().Type()
if du.IsNil() || du.Elem().Type() != typ {
du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
}
sv := su.Elem().Elem().Field(0)
if sv.Kind() == reflect.Ptr && sv.IsNil() {
return
}
dv := du.Elem().Elem().Field(0)
if dv.Kind() == reflect.Ptr && dv.IsNil() {
dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
}
switch sv.Type().Kind() {
case reflect.Ptr: // Proto struct (e.g., *T)
Merge(dv.Interface().(Message), sv.Interface().(Message))
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
default: // Basic type (e.g., string)
dv.Set(sv)
}
}
}
}
default:
panic(fmt.Sprintf("merger not found for type:%s", tf))
}
mi.fields = append(mi.fields, mfi)
}
mi.unrecognized = invalidField
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
if f.Type != reflect.TypeOf([]byte{}) {
panic("expected XXX_unrecognized to be of type []byte")
}
mi.unrecognized = toField(&f)
}
atomic.StoreInt32(&mi.initialized, 1)
}

File diff suppressed because it is too large Load diff

View file

@ -1,845 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
// Functions for writing the text protocol buffer format.
import (
"bufio"
"bytes"
"encoding"
"errors"
"fmt"
"io"
"log"
"math"
"reflect"
"sort"
"strings"
)
var (
newline = []byte("\n")
spaces = []byte(" ")
endBraceNewline = []byte("}\n")
backslashN = []byte{'\\', 'n'}
backslashR = []byte{'\\', 'r'}
backslashT = []byte{'\\', 't'}
backslashDQ = []byte{'\\', '"'}
backslashBS = []byte{'\\', '\\'}
posInf = []byte("inf")
negInf = []byte("-inf")
nan = []byte("nan")
)
type writer interface {
io.Writer
WriteByte(byte) error
}
// textWriter is an io.Writer that tracks its indentation level.
type textWriter struct {
ind int
complete bool // if the current position is a complete line
compact bool // whether to write out as a one-liner
w writer
}
func (w *textWriter) WriteString(s string) (n int, err error) {
if !strings.Contains(s, "\n") {
if !w.compact && w.complete {
w.writeIndent()
}
w.complete = false
return io.WriteString(w.w, s)
}
// WriteString is typically called without newlines, so this
// codepath and its copy are rare. We copy to avoid
// duplicating all of Write's logic here.
return w.Write([]byte(s))
}
func (w *textWriter) Write(p []byte) (n int, err error) {
newlines := bytes.Count(p, newline)
if newlines == 0 {
if !w.compact && w.complete {
w.writeIndent()
}
n, err = w.w.Write(p)
w.complete = false
return n, err
}
frags := bytes.SplitN(p, newline, newlines+1)
if w.compact {
for i, frag := range frags {
if i > 0 {
if err := w.w.WriteByte(' '); err != nil {
return n, err
}
n++
}
nn, err := w.w.Write(frag)
n += nn
if err != nil {
return n, err
}
}
return n, nil
}
for i, frag := range frags {
if w.complete {
w.writeIndent()
}
nn, err := w.w.Write(frag)
n += nn
if err != nil {
return n, err
}
if i+1 < len(frags) {
if err := w.w.WriteByte('\n'); err != nil {
return n, err
}
n++
}
}
w.complete = len(frags[len(frags)-1]) == 0
return n, nil
}
func (w *textWriter) WriteByte(c byte) error {
if w.compact && c == '\n' {
c = ' '
}
if !w.compact && w.complete {
w.writeIndent()
}
err := w.w.WriteByte(c)
w.complete = c == '\n'
return err
}
func (w *textWriter) indent() { w.ind++ }
func (w *textWriter) unindent() {
if w.ind == 0 {
log.Print("proto: textWriter unindented too far")
return
}
w.ind--
}
func writeName(w *textWriter, props *Properties) error {
if _, err := w.WriteString(props.OrigName); err != nil {
return err
}
if props.Wire != "group" {
return w.WriteByte(':')
}
return nil
}
func requiresQuotes(u string) bool {
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
for _, ch := range u {
switch {
case ch == '.' || ch == '/' || ch == '_':
continue
case '0' <= ch && ch <= '9':
continue
case 'A' <= ch && ch <= 'Z':
continue
case 'a' <= ch && ch <= 'z':
continue
default:
return true
}
}
return false
}
// isAny reports whether sv is a google.protobuf.Any message
func isAny(sv reflect.Value) bool {
type wkt interface {
XXX_WellKnownType() string
}
t, ok := sv.Addr().Interface().(wkt)
return ok && t.XXX_WellKnownType() == "Any"
}
// writeProto3Any writes an expanded google.protobuf.Any message.
//
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
// required messages are not linked in).
//
// It returns (true, error) when sv was written in expanded format or an error
// was encountered.
func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
turl := sv.FieldByName("TypeUrl")
val := sv.FieldByName("Value")
if !turl.IsValid() || !val.IsValid() {
return true, errors.New("proto: invalid google.protobuf.Any message")
}
b, ok := val.Interface().([]byte)
if !ok {
return true, errors.New("proto: invalid google.protobuf.Any message")
}
parts := strings.Split(turl.String(), "/")
mt := MessageType(parts[len(parts)-1])
if mt == nil {
return false, nil
}
m := reflect.New(mt.Elem())
if err := Unmarshal(b, m.Interface().(Message)); err != nil {
return false, nil
}
w.Write([]byte("["))
u := turl.String()
if requiresQuotes(u) {
writeString(w, u)
} else {
w.Write([]byte(u))
}
if w.compact {
w.Write([]byte("]:<"))
} else {
w.Write([]byte("]: <\n"))
w.ind++
}
if err := tm.writeStruct(w, m.Elem()); err != nil {
return true, err
}
if w.compact {
w.Write([]byte("> "))
} else {
w.ind--
w.Write([]byte(">\n"))
}
return true, nil
}
func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
if tm.ExpandAny && isAny(sv) {
if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
return err
}
}
st := sv.Type()
sprops := GetProperties(st)
for i := 0; i < sv.NumField(); i++ {
fv := sv.Field(i)
props := sprops.Prop[i]
name := st.Field(i).Name
if name == "XXX_NoUnkeyedLiteral" {
continue
}
if strings.HasPrefix(name, "XXX_") {
// There are two XXX_ fields:
// XXX_unrecognized []byte
// XXX_extensions map[int32]proto.Extension
// The first is handled here;
// the second is handled at the bottom of this function.
if name == "XXX_unrecognized" && !fv.IsNil() {
if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
return err
}
}
continue
}
if fv.Kind() == reflect.Ptr && fv.IsNil() {
// Field not filled in. This could be an optional field or
// a required field that wasn't filled in. Either way, there
// isn't anything we can show for it.
continue
}
if fv.Kind() == reflect.Slice && fv.IsNil() {
// Repeated field that is empty, or a bytes field that is unused.
continue
}
if props.Repeated && fv.Kind() == reflect.Slice {
// Repeated field.
for j := 0; j < fv.Len(); j++ {
if err := writeName(w, props); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
v := fv.Index(j)
if v.Kind() == reflect.Ptr && v.IsNil() {
// A nil message in a repeated field is not valid,
// but we can handle that more gracefully than panicking.
if _, err := w.Write([]byte("<nil>\n")); err != nil {
return err
}
continue
}
if err := tm.writeAny(w, v, props); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
continue
}
if fv.Kind() == reflect.Map {
// Map fields are rendered as a repeated struct with key/value fields.
keys := fv.MapKeys()
sort.Sort(mapKeys(keys))
for _, key := range keys {
val := fv.MapIndex(key)
if err := writeName(w, props); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
// open struct
if err := w.WriteByte('<'); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte('\n'); err != nil {
return err
}
}
w.indent()
// key
if _, err := w.WriteString("key:"); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
// nil values aren't legal, but we can avoid panicking because of them.
if val.Kind() != reflect.Ptr || !val.IsNil() {
// value
if _, err := w.WriteString("value:"); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := tm.writeAny(w, val, props.MapValProp); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
// close struct
w.unindent()
if err := w.WriteByte('>'); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
continue
}
if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
// empty bytes field
continue
}
if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
// proto3 non-repeated scalar field; skip if zero value
if isProto3Zero(fv) {
continue
}
}
if fv.Kind() == reflect.Interface {
// Check if it is a oneof.
if st.Field(i).Tag.Get("protobuf_oneof") != "" {
// fv is nil, or holds a pointer to generated struct.
// That generated struct has exactly one field,
// which has a protobuf struct tag.
if fv.IsNil() {
continue
}
inner := fv.Elem().Elem() // interface -> *T -> T
tag := inner.Type().Field(0).Tag.Get("protobuf")
props = new(Properties) // Overwrite the outer props var, but not its pointee.
props.Parse(tag)
// Write the value in the oneof, not the oneof itself.
fv = inner.Field(0)
// Special case to cope with malformed messages gracefully:
// If the value in the oneof is a nil pointer, don't panic
// in writeAny.
if fv.Kind() == reflect.Ptr && fv.IsNil() {
// Use errors.New so writeAny won't render quotes.
msg := errors.New("/* nil */")
fv = reflect.ValueOf(&msg).Elem()
}
}
}
if err := writeName(w, props); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
// Enums have a String method, so writeAny will work fine.
if err := tm.writeAny(w, fv, props); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
// Extensions (the XXX_extensions field).
pv := sv.Addr()
if _, err := extendable(pv.Interface()); err == nil {
if err := tm.writeExtensions(w, pv); err != nil {
return err
}
}
return nil
}
var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
// writeAny writes an arbitrary field.
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
v = reflect.Indirect(v)
// Floats have special cases.
if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
x := v.Float()
var b []byte
switch {
case math.IsInf(x, 1):
b = posInf
case math.IsInf(x, -1):
b = negInf
case math.IsNaN(x):
b = nan
}
if b != nil {
_, err := w.Write(b)
return err
}
// Other values are handled below.
}
// We don't attempt to serialise every possible value type; only those
// that can occur in protocol buffers.
switch v.Kind() {
case reflect.Slice:
// Should only be a []byte; repeated fields are handled in writeStruct.
if err := writeString(w, string(v.Bytes())); err != nil {
return err
}
case reflect.String:
if err := writeString(w, v.String()); err != nil {
return err
}
case reflect.Struct:
// Required/optional group/message.
var bra, ket byte = '<', '>'
if props != nil && props.Wire == "group" {
bra, ket = '{', '}'
}
if err := w.WriteByte(bra); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte('\n'); err != nil {
return err
}
}
w.indent()
if v.CanAddr() {
// Calling v.Interface on a struct causes the reflect package to
// copy the entire struct. This is racy with the new Marshaler
// since we atomically update the XXX_sizecache.
//
// Thus, we retrieve a pointer to the struct if possible to avoid
// a race since v.Interface on the pointer doesn't copy the struct.
//
// If v is not addressable, then we are not worried about a race
// since it implies that the binary Marshaler cannot possibly be
// mutating this value.
v = v.Addr()
}
if v.Type().Implements(textMarshalerType) {
text, err := v.Interface().(encoding.TextMarshaler).MarshalText()
if err != nil {
return err
}
if _, err = w.Write(text); err != nil {
return err
}
} else {
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
if err := tm.writeStruct(w, v); err != nil {
return err
}
}
w.unindent()
if err := w.WriteByte(ket); err != nil {
return err
}
default:
_, err := fmt.Fprint(w, v.Interface())
return err
}
return nil
}
// equivalent to C's isprint.
func isprint(c byte) bool {
return c >= 0x20 && c < 0x7f
}
// writeString writes a string in the protocol buffer text format.
// It is similar to strconv.Quote except we don't use Go escape sequences,
// we treat the string as a byte sequence, and we use octal escapes.
// These differences are to maintain interoperability with the other
// languages' implementations of the text format.
func writeString(w *textWriter, s string) error {
// use WriteByte here to get any needed indent
if err := w.WriteByte('"'); err != nil {
return err
}
// Loop over the bytes, not the runes.
for i := 0; i < len(s); i++ {
var err error
// Divergence from C++: we don't escape apostrophes.
// There's no need to escape them, and the C++ parser
// copes with a naked apostrophe.
switch c := s[i]; c {
case '\n':
_, err = w.w.Write(backslashN)
case '\r':
_, err = w.w.Write(backslashR)
case '\t':
_, err = w.w.Write(backslashT)
case '"':
_, err = w.w.Write(backslashDQ)
case '\\':
_, err = w.w.Write(backslashBS)
default:
if isprint(c) {
err = w.w.WriteByte(c)
} else {
_, err = fmt.Fprintf(w.w, "\\%03o", c)
}
}
if err != nil {
return err
}
}
return w.WriteByte('"')
}
func writeUnknownStruct(w *textWriter, data []byte) (err error) {
if !w.compact {
if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
return err
}
}
b := NewBuffer(data)
for b.index < len(b.buf) {
x, err := b.DecodeVarint()
if err != nil {
_, err := fmt.Fprintf(w, "/* %v */\n", err)
return err
}
wire, tag := x&7, x>>3
if wire == WireEndGroup {
w.unindent()
if _, err := w.Write(endBraceNewline); err != nil {
return err
}
continue
}
if _, err := fmt.Fprint(w, tag); err != nil {
return err
}
if wire != WireStartGroup {
if err := w.WriteByte(':'); err != nil {
return err
}
}
if !w.compact || wire == WireStartGroup {
if err := w.WriteByte(' '); err != nil {
return err
}
}
switch wire {
case WireBytes:
buf, e := b.DecodeRawBytes(false)
if e == nil {
_, err = fmt.Fprintf(w, "%q", buf)
} else {
_, err = fmt.Fprintf(w, "/* %v */", e)
}
case WireFixed32:
x, err = b.DecodeFixed32()
err = writeUnknownInt(w, x, err)
case WireFixed64:
x, err = b.DecodeFixed64()
err = writeUnknownInt(w, x, err)
case WireStartGroup:
err = w.WriteByte('{')
w.indent()
case WireVarint:
x, err = b.DecodeVarint()
err = writeUnknownInt(w, x, err)
default:
_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
}
if err != nil {
return err
}
if err = w.WriteByte('\n'); err != nil {
return err
}
}
return nil
}
func writeUnknownInt(w *textWriter, x uint64, err error) error {
if err == nil {
_, err = fmt.Fprint(w, x)
} else {
_, err = fmt.Fprintf(w, "/* %v */", err)
}
return err
}
type int32Slice []int32
func (s int32Slice) Len() int { return len(s) }
func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// writeExtensions writes all the extensions in pv.
// pv is assumed to be a pointer to a protocol message struct that is extendable.
func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
emap := extensionMaps[pv.Type().Elem()]
ep, _ := extendable(pv.Interface())
// Order the extensions by ID.
// This isn't strictly necessary, but it will give us
// canonical output, which will also make testing easier.
m, mu := ep.extensionsRead()
if m == nil {
return nil
}
mu.Lock()
ids := make([]int32, 0, len(m))
for id := range m {
ids = append(ids, id)
}
sort.Sort(int32Slice(ids))
mu.Unlock()
for _, extNum := range ids {
ext := m[extNum]
var desc *ExtensionDesc
if emap != nil {
desc = emap[extNum]
}
if desc == nil {
// Unknown extension.
if err := writeUnknownStruct(w, ext.enc); err != nil {
return err
}
continue
}
pb, err := GetExtension(ep, desc)
if err != nil {
return fmt.Errorf("failed getting extension: %v", err)
}
// Repeated extensions will appear as a slice.
if !desc.repeated() {
if err := tm.writeExtension(w, desc.Name, pb); err != nil {
return err
}
} else {
v := reflect.ValueOf(pb)
for i := 0; i < v.Len(); i++ {
if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
return err
}
}
}
}
return nil
}
func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
return nil
}
func (w *textWriter) writeIndent() {
if !w.complete {
return
}
remain := w.ind * 2
for remain > 0 {
n := remain
if n > len(spaces) {
n = len(spaces)
}
w.w.Write(spaces[:n])
remain -= n
}
w.complete = false
}
// TextMarshaler is a configurable text format marshaler.
type TextMarshaler struct {
Compact bool // use compact text format (one line).
ExpandAny bool // expand google.protobuf.Any messages of known types
}
// Marshal writes a given protocol buffer in text format.
// The only errors returned are from w.
func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
val := reflect.ValueOf(pb)
if pb == nil || val.IsNil() {
w.Write([]byte("<nil>"))
return nil
}
var bw *bufio.Writer
ww, ok := w.(writer)
if !ok {
bw = bufio.NewWriter(w)
ww = bw
}
aw := &textWriter{
w: ww,
complete: true,
compact: tm.Compact,
}
if etm, ok := pb.(encoding.TextMarshaler); ok {
text, err := etm.MarshalText()
if err != nil {
return err
}
if _, err = aw.Write(text); err != nil {
return err
}
if bw != nil {
return bw.Flush()
}
return nil
}
// Dereference the received pointer so we don't have outer < and >.
v := reflect.Indirect(val)
if err := tm.writeStruct(aw, v); err != nil {
return err
}
if bw != nil {
return bw.Flush()
}
return nil
}
// Text is the same as Marshal, but returns the string directly.
func (tm *TextMarshaler) Text(pb Message) string {
var buf bytes.Buffer
tm.Marshal(&buf, pb)
return buf.String()
}
var (
defaultTextMarshaler = TextMarshaler{}
compactTextMarshaler = TextMarshaler{Compact: true}
)
// TODO: consider removing some of the Marshal functions below.
// MarshalText writes a given protocol buffer in text format.
// The only errors returned are from w.
func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
// MarshalTextString is the same as MarshalText, but returns the string directly.
func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
// CompactText writes a given protocol buffer in compact text format (one line).
func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
// CompactTextString is the same as CompactText, but returns the string directly.
func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }

801
vendor/github.com/golang/protobuf/proto/text_decode.go generated vendored Normal file
View file

@ -0,0 +1,801 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"encoding"
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"unicode/utf8"
"google.golang.org/protobuf/encoding/prototext"
protoV2 "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
const wrapTextUnmarshalV2 = false
// ParseError is returned by UnmarshalText.
type ParseError struct {
Message string
// Deprecated: Do not use.
Line, Offset int
}
func (e *ParseError) Error() string {
if wrapTextUnmarshalV2 {
return e.Message
}
if e.Line == 1 {
return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message)
}
return fmt.Sprintf("line %d: %v", e.Line, e.Message)
}
// UnmarshalText parses a proto text formatted string into m.
func UnmarshalText(s string, m Message) error {
if u, ok := m.(encoding.TextUnmarshaler); ok {
return u.UnmarshalText([]byte(s))
}
m.Reset()
mi := MessageV2(m)
if wrapTextUnmarshalV2 {
err := prototext.UnmarshalOptions{
AllowPartial: true,
}.Unmarshal([]byte(s), mi)
if err != nil {
return &ParseError{Message: err.Error()}
}
return checkRequiredNotSet(mi)
} else {
if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil {
return err
}
return checkRequiredNotSet(mi)
}
}
type textParser struct {
s string // remaining input
done bool // whether the parsing is finished (success or error)
backed bool // whether back() was called
offset, line int
cur token
}
type token struct {
value string
err *ParseError
line int // line number
offset int // byte number from start of input, not start of line
unquoted string // the unquoted version of value, if it was a quoted string
}
func newTextParser(s string) *textParser {
p := new(textParser)
p.s = s
p.line = 1
p.cur.line = 1
return p
}
func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) {
md := m.Descriptor()
fds := md.Fields()
// A struct is a sequence of "name: value", terminated by one of
// '>' or '}', or the end of the input. A name may also be
// "[extension]" or "[type/url]".
//
// The whole struct can also be an expanded Any message, like:
// [type/url] < ... struct contents ... >
seen := make(map[protoreflect.FieldNumber]bool)
for {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == terminator {
break
}
if tok.value == "[" {
if err := p.unmarshalExtensionOrAny(m, seen); err != nil {
return err
}
continue
}
// This is a normal, non-extension field.
name := protoreflect.Name(tok.value)
fd := fds.ByName(name)
switch {
case fd == nil:
gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name))))
if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name {
fd = gd
}
case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name:
fd = nil
case fd.IsWeak() && fd.Message().IsPlaceholder():
fd = nil
}
if fd == nil {
typeName := string(md.FullName())
if m, ok := m.Interface().(Message); ok {
t := reflect.TypeOf(m)
if t.Kind() == reflect.Ptr {
typeName = t.Elem().String()
}
}
return p.errorf("unknown field name %q in %v", name, typeName)
}
if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil {
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name())
}
if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] {
return p.errorf("non-repeated field %q was repeated", fd.Name())
}
seen[fd.Number()] = true
// Consume any colon.
if err := p.checkForColon(fd); err != nil {
return err
}
// Parse into the field.
v := m.Get(fd)
if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
v = m.Mutable(fd)
}
if v, err = p.unmarshalValue(v, fd); err != nil {
return err
}
m.Set(fd, v)
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
}
return nil
}
func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error {
name, err := p.consumeExtensionOrAnyName()
if err != nil {
return err
}
// If it contains a slash, it's an Any type URL.
if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 {
tok := p.next()
if tok.err != nil {
return tok.err
}
// consume an optional colon
if tok.value == ":" {
tok = p.next()
if tok.err != nil {
return tok.err
}
}
var terminator string
switch tok.value {
case "<":
terminator = ">"
case "{":
terminator = "}"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
mt, err := protoregistry.GlobalTypes.FindMessageByURL(name)
if err != nil {
return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):])
}
m2 := mt.New()
if err := p.unmarshalMessage(m2, terminator); err != nil {
return err
}
b, err := protoV2.Marshal(m2.Interface())
if err != nil {
return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err)
}
urlFD := m.Descriptor().Fields().ByName("type_url")
valFD := m.Descriptor().Fields().ByName("value")
if seen[urlFD.Number()] {
return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name())
}
if seen[valFD.Number()] {
return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name())
}
m.Set(urlFD, protoreflect.ValueOfString(name))
m.Set(valFD, protoreflect.ValueOfBytes(b))
seen[urlFD.Number()] = true
seen[valFD.Number()] = true
return nil
}
xname := protoreflect.FullName(name)
xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
if xt == nil && isMessageSet(m.Descriptor()) {
xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
}
if xt == nil {
return p.errorf("unrecognized extension %q", name)
}
fd := xt.TypeDescriptor()
if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName())
}
if err := p.checkForColon(fd); err != nil {
return err
}
v := m.Get(fd)
if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
v = m.Mutable(fd)
}
v, err = p.unmarshalValue(v, fd)
if err != nil {
return err
}
m.Set(fd, v)
return p.consumeOptionalSeparator()
}
func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
tok := p.next()
if tok.err != nil {
return v, tok.err
}
if tok.value == "" {
return v, p.errorf("unexpected EOF")
}
switch {
case fd.IsList():
lv := v.List()
var err error
if tok.value == "[" {
// Repeated field with list notation, like [1,2,3].
for {
vv := lv.NewElement()
vv, err = p.unmarshalSingularValue(vv, fd)
if err != nil {
return v, err
}
lv.Append(vv)
tok := p.next()
if tok.err != nil {
return v, tok.err
}
if tok.value == "]" {
break
}
if tok.value != "," {
return v, p.errorf("Expected ']' or ',' found %q", tok.value)
}
}
return v, nil
}
// One value of the repeated field.
p.back()
vv := lv.NewElement()
vv, err = p.unmarshalSingularValue(vv, fd)
if err != nil {
return v, err
}
lv.Append(vv)
return v, nil
case fd.IsMap():
// The map entry should be this sequence of tokens:
// < key : KEY value : VALUE >
// However, implementations may omit key or value, and technically
// we should support them in any order.
var terminator string
switch tok.value {
case "<":
terminator = ">"
case "{":
terminator = "}"
default:
return v, p.errorf("expected '{' or '<', found %q", tok.value)
}
keyFD := fd.MapKey()
valFD := fd.MapValue()
mv := v.Map()
kv := keyFD.Default()
vv := mv.NewValue()
for {
tok := p.next()
if tok.err != nil {
return v, tok.err
}
if tok.value == terminator {
break
}
var err error
switch tok.value {
case "key":
if err := p.consumeToken(":"); err != nil {
return v, err
}
if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil {
return v, err
}
if err := p.consumeOptionalSeparator(); err != nil {
return v, err
}
case "value":
if err := p.checkForColon(valFD); err != nil {
return v, err
}
if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil {
return v, err
}
if err := p.consumeOptionalSeparator(); err != nil {
return v, err
}
default:
p.back()
return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
}
}
mv.Set(kv.MapKey(), vv)
return v, nil
default:
p.back()
return p.unmarshalSingularValue(v, fd)
}
}
func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
tok := p.next()
if tok.err != nil {
return v, tok.err
}
if tok.value == "" {
return v, p.errorf("unexpected EOF")
}
switch fd.Kind() {
case protoreflect.BoolKind:
switch tok.value {
case "true", "1", "t", "True":
return protoreflect.ValueOfBool(true), nil
case "false", "0", "f", "False":
return protoreflect.ValueOfBool(false), nil
}
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
return protoreflect.ValueOfInt32(int32(x)), nil
}
// The C++ parser accepts large positive hex numbers that uses
// two's complement arithmetic to represent negative numbers.
// This feature is here for backwards compatibility with C++.
if strings.HasPrefix(tok.value, "0x") {
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil
}
}
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
return protoreflect.ValueOfInt64(int64(x)), nil
}
// The C++ parser accepts large positive hex numbers that uses
// two's complement arithmetic to represent negative numbers.
// This feature is here for backwards compatibility with C++.
if strings.HasPrefix(tok.value, "0x") {
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil
}
}
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
return protoreflect.ValueOfUint32(uint32(x)), nil
}
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
return protoreflect.ValueOfUint64(uint64(x)), nil
}
case protoreflect.FloatKind:
// Ignore 'f' for compatibility with output generated by C++,
// but don't remove 'f' when the value is "-inf" or "inf".
v := tok.value
if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
v = v[:len(v)-len("f")]
}
if x, err := strconv.ParseFloat(v, 32); err == nil {
return protoreflect.ValueOfFloat32(float32(x)), nil
}
case protoreflect.DoubleKind:
// Ignore 'f' for compatibility with output generated by C++,
// but don't remove 'f' when the value is "-inf" or "inf".
v := tok.value
if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
v = v[:len(v)-len("f")]
}
if x, err := strconv.ParseFloat(v, 64); err == nil {
return protoreflect.ValueOfFloat64(float64(x)), nil
}
case protoreflect.StringKind:
if isQuote(tok.value[0]) {
return protoreflect.ValueOfString(tok.unquoted), nil
}
case protoreflect.BytesKind:
if isQuote(tok.value[0]) {
return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil
}
case protoreflect.EnumKind:
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil
}
vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value))
if vd != nil {
return protoreflect.ValueOfEnum(vd.Number()), nil
}
case protoreflect.MessageKind, protoreflect.GroupKind:
var terminator string
switch tok.value {
case "{":
terminator = "}"
case "<":
terminator = ">"
default:
return v, p.errorf("expected '{' or '<', found %q", tok.value)
}
err := p.unmarshalMessage(v.Message(), terminator)
return v, err
default:
panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
}
return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value)
}
// Consume a ':' from the input stream (if the next token is a colon),
// returning an error if a colon is needed but not present.
func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ":" {
if fd.Message() == nil {
return p.errorf("expected ':', found %q", tok.value)
}
p.back()
}
return nil
}
// consumeExtensionOrAnyName consumes an extension name or an Any type URL and
// the following ']'. It returns the name or URL consumed.
func (p *textParser) consumeExtensionOrAnyName() (string, error) {
tok := p.next()
if tok.err != nil {
return "", tok.err
}
// If extension name or type url is quoted, it's a single token.
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
if err != nil {
return "", err
}
return name, p.consumeToken("]")
}
// Consume everything up to "]"
var parts []string
for tok.value != "]" {
parts = append(parts, tok.value)
tok = p.next()
if tok.err != nil {
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
}
if p.done && tok.value != "]" {
return "", p.errorf("unclosed type_url or extension name")
}
}
return strings.Join(parts, ""), nil
}
// consumeOptionalSeparator consumes an optional semicolon or comma.
// It is used in unmarshalMessage to provide backward compatibility.
func (p *textParser) consumeOptionalSeparator() error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ";" && tok.value != "," {
p.back()
}
return nil
}
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
p.cur.err = pe
p.done = true
return pe
}
func (p *textParser) skipWhitespace() {
i := 0
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
if p.s[i] == '#' {
// comment; skip to end of line or input
for i < len(p.s) && p.s[i] != '\n' {
i++
}
if i == len(p.s) {
break
}
}
if p.s[i] == '\n' {
p.line++
}
i++
}
p.offset += i
p.s = p.s[i:len(p.s)]
if len(p.s) == 0 {
p.done = true
}
}
func (p *textParser) advance() {
// Skip whitespace
p.skipWhitespace()
if p.done {
return
}
// Start of non-whitespace
p.cur.err = nil
p.cur.offset, p.cur.line = p.offset, p.line
p.cur.unquoted = ""
switch p.s[0] {
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
// Single symbol
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
case '"', '\'':
// Quoted string
i := 1
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
if p.s[i] == '\\' && i+1 < len(p.s) {
// skip escaped char
i++
}
i++
}
if i >= len(p.s) || p.s[i] != p.s[0] {
p.errorf("unmatched quote")
return
}
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
if err != nil {
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
return
}
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
p.cur.unquoted = unq
default:
i := 0
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
i++
}
if i == 0 {
p.errorf("unexpected byte %#x", p.s[0])
return
}
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
}
p.offset += len(p.cur.value)
}
// Back off the parser by one token. Can only be done between calls to next().
// It makes the next advance() a no-op.
func (p *textParser) back() { p.backed = true }
// Advances the parser and returns the new current token.
func (p *textParser) next() *token {
if p.backed || p.done {
p.backed = false
return &p.cur
}
p.advance()
if p.done {
p.cur.value = ""
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
// Look for multiple quoted strings separated by whitespace,
// and concatenate them.
cat := p.cur
for {
p.skipWhitespace()
if p.done || !isQuote(p.s[0]) {
break
}
p.advance()
if p.cur.err != nil {
return &p.cur
}
cat.value += " " + p.cur.value
cat.unquoted += p.cur.unquoted
}
p.done = false // parser may have seen EOF, but we want to return cat
p.cur = cat
}
return &p.cur
}
func (p *textParser) consumeToken(s string) error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != s {
p.back()
return p.errorf("expected %q, found %q", s, tok.value)
}
return nil
}
var errBadUTF8 = errors.New("proto: bad UTF-8")
func unquoteC(s string, quote rune) (string, error) {
// This is based on C++'s tokenizer.cc.
// Despite its name, this is *not* parsing C syntax.
// For instance, "\0" is an invalid quoted string.
// Avoid allocation in trivial cases.
simple := true
for _, r := range s {
if r == '\\' || r == quote {
simple = false
break
}
}
if simple {
return s, nil
}
buf := make([]byte, 0, 3*len(s)/2)
for len(s) > 0 {
r, n := utf8.DecodeRuneInString(s)
if r == utf8.RuneError && n == 1 {
return "", errBadUTF8
}
s = s[n:]
if r != '\\' {
if r < utf8.RuneSelf {
buf = append(buf, byte(r))
} else {
buf = append(buf, string(r)...)
}
continue
}
ch, tail, err := unescape(s)
if err != nil {
return "", err
}
buf = append(buf, ch...)
s = tail
}
return string(buf), nil
}
func unescape(s string) (ch string, tail string, err error) {
r, n := utf8.DecodeRuneInString(s)
if r == utf8.RuneError && n == 1 {
return "", "", errBadUTF8
}
s = s[n:]
switch r {
case 'a':
return "\a", s, nil
case 'b':
return "\b", s, nil
case 'f':
return "\f", s, nil
case 'n':
return "\n", s, nil
case 'r':
return "\r", s, nil
case 't':
return "\t", s, nil
case 'v':
return "\v", s, nil
case '?':
return "?", s, nil // trigraph workaround
case '\'', '"', '\\':
return string(r), s, nil
case '0', '1', '2', '3', '4', '5', '6', '7':
if len(s) < 2 {
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
}
ss := string(r) + s[:2]
s = s[2:]
i, err := strconv.ParseUint(ss, 8, 8)
if err != nil {
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
}
return string([]byte{byte(i)}), s, nil
case 'x', 'X', 'u', 'U':
var n int
switch r {
case 'x', 'X':
n = 2
case 'u':
n = 4
case 'U':
n = 8
}
if len(s) < n {
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
}
ss := s[:n]
s = s[n:]
i, err := strconv.ParseUint(ss, 16, 64)
if err != nil {
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
}
if r == 'x' || r == 'X' {
return string([]byte{byte(i)}), s, nil
}
if i > utf8.MaxRune {
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
}
return string(i), s, nil
}
return "", "", fmt.Errorf(`unknown escape \%c`, r)
}
func isIdentOrNumberChar(c byte) bool {
switch {
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
return true
case '0' <= c && c <= '9':
return true
}
switch c {
case '-', '+', '.', '_':
return true
}
return false
}
func isWhitespace(c byte) bool {
switch c {
case ' ', '\t', '\n', '\r':
return true
}
return false
}
func isQuote(c byte) bool {
switch c {
case '"', '\'':
return true
}
return false
}

560
vendor/github.com/golang/protobuf/proto/text_encode.go generated vendored Normal file
View file

@ -0,0 +1,560 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"bytes"
"encoding"
"fmt"
"io"
"math"
"sort"
"strings"
"google.golang.org/protobuf/encoding/prototext"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
const wrapTextMarshalV2 = false
// TextMarshaler is a configurable text format marshaler.
type TextMarshaler struct {
Compact bool // use compact text format (one line)
ExpandAny bool // expand google.protobuf.Any messages of known types
}
// Marshal writes the proto text format of m to w.
func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error {
b, err := tm.marshal(m)
if len(b) > 0 {
if _, err := w.Write(b); err != nil {
return err
}
}
return err
}
// Text returns a proto text formatted string of m.
func (tm *TextMarshaler) Text(m Message) string {
b, _ := tm.marshal(m)
return string(b)
}
func (tm *TextMarshaler) marshal(m Message) ([]byte, error) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() {
return []byte("<nil>"), nil
}
if wrapTextMarshalV2 {
if m, ok := m.(encoding.TextMarshaler); ok {
return m.MarshalText()
}
opts := prototext.MarshalOptions{
AllowPartial: true,
EmitUnknown: true,
}
if !tm.Compact {
opts.Indent = " "
}
if !tm.ExpandAny {
opts.Resolver = (*protoregistry.Types)(nil)
}
return opts.Marshal(mr.Interface())
} else {
w := &textWriter{
compact: tm.Compact,
expandAny: tm.ExpandAny,
complete: true,
}
if m, ok := m.(encoding.TextMarshaler); ok {
b, err := m.MarshalText()
if err != nil {
return nil, err
}
w.Write(b)
return w.buf, nil
}
err := w.writeMessage(mr)
return w.buf, err
}
}
var (
defaultTextMarshaler = TextMarshaler{}
compactTextMarshaler = TextMarshaler{Compact: true}
)
// MarshalText writes the proto text format of m to w.
func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
// MarshalTextString returns a proto text formatted string of m.
func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
// CompactText writes the compact proto text format of m to w.
func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
// CompactTextString returns a compact proto text formatted string of m.
func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
var (
newline = []byte("\n")
endBraceNewline = []byte("}\n")
posInf = []byte("inf")
negInf = []byte("-inf")
nan = []byte("nan")
)
// textWriter is an io.Writer that tracks its indentation level.
type textWriter struct {
compact bool // same as TextMarshaler.Compact
expandAny bool // same as TextMarshaler.ExpandAny
complete bool // whether the current position is a complete line
indent int // indentation level; never negative
buf []byte
}
func (w *textWriter) Write(p []byte) (n int, _ error) {
newlines := bytes.Count(p, newline)
if newlines == 0 {
if !w.compact && w.complete {
w.writeIndent()
}
w.buf = append(w.buf, p...)
w.complete = false
return len(p), nil
}
frags := bytes.SplitN(p, newline, newlines+1)
if w.compact {
for i, frag := range frags {
if i > 0 {
w.buf = append(w.buf, ' ')
n++
}
w.buf = append(w.buf, frag...)
n += len(frag)
}
return n, nil
}
for i, frag := range frags {
if w.complete {
w.writeIndent()
}
w.buf = append(w.buf, frag...)
n += len(frag)
if i+1 < len(frags) {
w.buf = append(w.buf, '\n')
n++
}
}
w.complete = len(frags[len(frags)-1]) == 0
return n, nil
}
func (w *textWriter) WriteByte(c byte) error {
if w.compact && c == '\n' {
c = ' '
}
if !w.compact && w.complete {
w.writeIndent()
}
w.buf = append(w.buf, c)
w.complete = c == '\n'
return nil
}
func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) {
if !w.compact && w.complete {
w.writeIndent()
}
w.complete = false
if fd.Kind() != protoreflect.GroupKind {
w.buf = append(w.buf, fd.Name()...)
w.WriteByte(':')
} else {
// Use message type name for group field name.
w.buf = append(w.buf, fd.Message().Name()...)
}
if !w.compact {
w.WriteByte(' ')
}
}
func requiresQuotes(u string) bool {
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
for _, ch := range u {
switch {
case ch == '.' || ch == '/' || ch == '_':
continue
case '0' <= ch && ch <= '9':
continue
case 'A' <= ch && ch <= 'Z':
continue
case 'a' <= ch && ch <= 'z':
continue
default:
return true
}
}
return false
}
// writeProto3Any writes an expanded google.protobuf.Any message.
//
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
// required messages are not linked in).
//
// It returns (true, error) when sv was written in expanded format or an error
// was encountered.
func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) {
md := m.Descriptor()
fdURL := md.Fields().ByName("type_url")
fdVal := md.Fields().ByName("value")
url := m.Get(fdURL).String()
mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
if err != nil {
return false, nil
}
b := m.Get(fdVal).Bytes()
m2 := mt.New()
if err := proto.Unmarshal(b, m2.Interface()); err != nil {
return false, nil
}
w.Write([]byte("["))
if requiresQuotes(url) {
w.writeQuotedString(url)
} else {
w.Write([]byte(url))
}
if w.compact {
w.Write([]byte("]:<"))
} else {
w.Write([]byte("]: <\n"))
w.indent++
}
if err := w.writeMessage(m2); err != nil {
return true, err
}
if w.compact {
w.Write([]byte("> "))
} else {
w.indent--
w.Write([]byte(">\n"))
}
return true, nil
}
func (w *textWriter) writeMessage(m protoreflect.Message) error {
md := m.Descriptor()
if w.expandAny && md.FullName() == "google.protobuf.Any" {
if canExpand, err := w.writeProto3Any(m); canExpand {
return err
}
}
fds := md.Fields()
for i := 0; i < fds.Len(); {
fd := fds.Get(i)
if od := fd.ContainingOneof(); od != nil {
fd = m.WhichOneof(od)
i += od.Fields().Len()
} else {
i++
}
if fd == nil || !m.Has(fd) {
continue
}
switch {
case fd.IsList():
lv := m.Get(fd).List()
for j := 0; j < lv.Len(); j++ {
w.writeName(fd)
v := lv.Get(j)
if err := w.writeSingularValue(v, fd); err != nil {
return err
}
w.WriteByte('\n')
}
case fd.IsMap():
kfd := fd.MapKey()
vfd := fd.MapValue()
mv := m.Get(fd).Map()
type entry struct{ key, val protoreflect.Value }
var entries []entry
mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
entries = append(entries, entry{k.Value(), v})
return true
})
sort.Slice(entries, func(i, j int) bool {
switch kfd.Kind() {
case protoreflect.BoolKind:
return !entries[i].key.Bool() && entries[j].key.Bool()
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
return entries[i].key.Int() < entries[j].key.Int()
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
return entries[i].key.Uint() < entries[j].key.Uint()
case protoreflect.StringKind:
return entries[i].key.String() < entries[j].key.String()
default:
panic("invalid kind")
}
})
for _, entry := range entries {
w.writeName(fd)
w.WriteByte('<')
if !w.compact {
w.WriteByte('\n')
}
w.indent++
w.writeName(kfd)
if err := w.writeSingularValue(entry.key, kfd); err != nil {
return err
}
w.WriteByte('\n')
w.writeName(vfd)
if err := w.writeSingularValue(entry.val, vfd); err != nil {
return err
}
w.WriteByte('\n')
w.indent--
w.WriteByte('>')
w.WriteByte('\n')
}
default:
w.writeName(fd)
if err := w.writeSingularValue(m.Get(fd), fd); err != nil {
return err
}
w.WriteByte('\n')
}
}
if b := m.GetUnknown(); len(b) > 0 {
w.writeUnknownFields(b)
}
return w.writeExtensions(m)
}
func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
switch fd.Kind() {
case protoreflect.FloatKind, protoreflect.DoubleKind:
switch vf := v.Float(); {
case math.IsInf(vf, +1):
w.Write(posInf)
case math.IsInf(vf, -1):
w.Write(negInf)
case math.IsNaN(vf):
w.Write(nan)
default:
fmt.Fprint(w, v.Interface())
}
case protoreflect.StringKind:
// NOTE: This does not validate UTF-8 for historical reasons.
w.writeQuotedString(string(v.String()))
case protoreflect.BytesKind:
w.writeQuotedString(string(v.Bytes()))
case protoreflect.MessageKind, protoreflect.GroupKind:
var bra, ket byte = '<', '>'
if fd.Kind() == protoreflect.GroupKind {
bra, ket = '{', '}'
}
w.WriteByte(bra)
if !w.compact {
w.WriteByte('\n')
}
w.indent++
m := v.Message()
if m2, ok := m.Interface().(encoding.TextMarshaler); ok {
b, err := m2.MarshalText()
if err != nil {
return err
}
w.Write(b)
} else {
w.writeMessage(m)
}
w.indent--
w.WriteByte(ket)
case protoreflect.EnumKind:
if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil {
fmt.Fprint(w, ev.Name())
} else {
fmt.Fprint(w, v.Enum())
}
default:
fmt.Fprint(w, v.Interface())
}
return nil
}
// writeQuotedString writes a quoted string in the protocol buffer text format.
func (w *textWriter) writeQuotedString(s string) {
w.WriteByte('"')
for i := 0; i < len(s); i++ {
switch c := s[i]; c {
case '\n':
w.buf = append(w.buf, `\n`...)
case '\r':
w.buf = append(w.buf, `\r`...)
case '\t':
w.buf = append(w.buf, `\t`...)
case '"':
w.buf = append(w.buf, `\"`...)
case '\\':
w.buf = append(w.buf, `\\`...)
default:
if isPrint := c >= 0x20 && c < 0x7f; isPrint {
w.buf = append(w.buf, c)
} else {
w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...)
}
}
}
w.WriteByte('"')
}
func (w *textWriter) writeUnknownFields(b []byte) {
if !w.compact {
fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b))
}
for len(b) > 0 {
num, wtyp, n := protowire.ConsumeTag(b)
if n < 0 {
return
}
b = b[n:]
if wtyp == protowire.EndGroupType {
w.indent--
w.Write(endBraceNewline)
continue
}
fmt.Fprint(w, num)
if wtyp != protowire.StartGroupType {
w.WriteByte(':')
}
if !w.compact || wtyp == protowire.StartGroupType {
w.WriteByte(' ')
}
switch wtyp {
case protowire.VarintType:
v, n := protowire.ConsumeVarint(b)
if n < 0 {
return
}
b = b[n:]
fmt.Fprint(w, v)
case protowire.Fixed32Type:
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
return
}
b = b[n:]
fmt.Fprint(w, v)
case protowire.Fixed64Type:
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
return
}
b = b[n:]
fmt.Fprint(w, v)
case protowire.BytesType:
v, n := protowire.ConsumeBytes(b)
if n < 0 {
return
}
b = b[n:]
fmt.Fprintf(w, "%q", v)
case protowire.StartGroupType:
w.WriteByte('{')
w.indent++
default:
fmt.Fprintf(w, "/* unknown wire type %d */", wtyp)
}
w.WriteByte('\n')
}
}
// writeExtensions writes all the extensions in m.
func (w *textWriter) writeExtensions(m protoreflect.Message) error {
md := m.Descriptor()
if md.ExtensionRanges().Len() == 0 {
return nil
}
type ext struct {
desc protoreflect.FieldDescriptor
val protoreflect.Value
}
var exts []ext
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
if fd.IsExtension() {
exts = append(exts, ext{fd, v})
}
return true
})
sort.Slice(exts, func(i, j int) bool {
return exts[i].desc.Number() < exts[j].desc.Number()
})
for _, ext := range exts {
// For message set, use the name of the message as the extension name.
name := string(ext.desc.FullName())
if isMessageSet(ext.desc.ContainingMessage()) {
name = strings.TrimSuffix(name, ".message_set_extension")
}
if !ext.desc.IsList() {
if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil {
return err
}
} else {
lv := ext.val.List()
for i := 0; i < lv.Len(); i++ {
if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil {
return err
}
}
}
}
return nil
}
func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
fmt.Fprintf(w, "[%s]:", name)
if !w.compact {
w.WriteByte(' ')
}
if err := w.writeSingularValue(v, fd); err != nil {
return err
}
w.WriteByte('\n')
return nil
}
func (w *textWriter) writeIndent() {
if !w.complete {
return
}
for i := 0; i < w.indent*2; i++ {
w.buf = append(w.buf, ' ')
}
w.complete = false
}

View file

@ -1,880 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
// Functions for parsing the Text protocol buffer format.
// TODO: message sets.
import (
"encoding"
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"unicode/utf8"
)
// Error string emitted when deserializing Any and fields are already set
const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
type ParseError struct {
Message string
Line int // 1-based line number
Offset int // 0-based byte offset from start of input
}
func (p *ParseError) Error() string {
if p.Line == 1 {
// show offset only for first line
return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
}
return fmt.Sprintf("line %d: %v", p.Line, p.Message)
}
type token struct {
value string
err *ParseError
line int // line number
offset int // byte number from start of input, not start of line
unquoted string // the unquoted version of value, if it was a quoted string
}
func (t *token) String() string {
if t.err == nil {
return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
}
return fmt.Sprintf("parse error: %v", t.err)
}
type textParser struct {
s string // remaining input
done bool // whether the parsing is finished (success or error)
backed bool // whether back() was called
offset, line int
cur token
}
func newTextParser(s string) *textParser {
p := new(textParser)
p.s = s
p.line = 1
p.cur.line = 1
return p
}
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
p.cur.err = pe
p.done = true
return pe
}
// Numbers and identifiers are matched by [-+._A-Za-z0-9]
func isIdentOrNumberChar(c byte) bool {
switch {
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
return true
case '0' <= c && c <= '9':
return true
}
switch c {
case '-', '+', '.', '_':
return true
}
return false
}
func isWhitespace(c byte) bool {
switch c {
case ' ', '\t', '\n', '\r':
return true
}
return false
}
func isQuote(c byte) bool {
switch c {
case '"', '\'':
return true
}
return false
}
func (p *textParser) skipWhitespace() {
i := 0
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
if p.s[i] == '#' {
// comment; skip to end of line or input
for i < len(p.s) && p.s[i] != '\n' {
i++
}
if i == len(p.s) {
break
}
}
if p.s[i] == '\n' {
p.line++
}
i++
}
p.offset += i
p.s = p.s[i:len(p.s)]
if len(p.s) == 0 {
p.done = true
}
}
func (p *textParser) advance() {
// Skip whitespace
p.skipWhitespace()
if p.done {
return
}
// Start of non-whitespace
p.cur.err = nil
p.cur.offset, p.cur.line = p.offset, p.line
p.cur.unquoted = ""
switch p.s[0] {
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
// Single symbol
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
case '"', '\'':
// Quoted string
i := 1
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
if p.s[i] == '\\' && i+1 < len(p.s) {
// skip escaped char
i++
}
i++
}
if i >= len(p.s) || p.s[i] != p.s[0] {
p.errorf("unmatched quote")
return
}
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
if err != nil {
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
return
}
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
p.cur.unquoted = unq
default:
i := 0
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
i++
}
if i == 0 {
p.errorf("unexpected byte %#x", p.s[0])
return
}
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
}
p.offset += len(p.cur.value)
}
var (
errBadUTF8 = errors.New("proto: bad UTF-8")
)
func unquoteC(s string, quote rune) (string, error) {
// This is based on C++'s tokenizer.cc.
// Despite its name, this is *not* parsing C syntax.
// For instance, "\0" is an invalid quoted string.
// Avoid allocation in trivial cases.
simple := true
for _, r := range s {
if r == '\\' || r == quote {
simple = false
break
}
}
if simple {
return s, nil
}
buf := make([]byte, 0, 3*len(s)/2)
for len(s) > 0 {
r, n := utf8.DecodeRuneInString(s)
if r == utf8.RuneError && n == 1 {
return "", errBadUTF8
}
s = s[n:]
if r != '\\' {
if r < utf8.RuneSelf {
buf = append(buf, byte(r))
} else {
buf = append(buf, string(r)...)
}
continue
}
ch, tail, err := unescape(s)
if err != nil {
return "", err
}
buf = append(buf, ch...)
s = tail
}
return string(buf), nil
}
func unescape(s string) (ch string, tail string, err error) {
r, n := utf8.DecodeRuneInString(s)
if r == utf8.RuneError && n == 1 {
return "", "", errBadUTF8
}
s = s[n:]
switch r {
case 'a':
return "\a", s, nil
case 'b':
return "\b", s, nil
case 'f':
return "\f", s, nil
case 'n':
return "\n", s, nil
case 'r':
return "\r", s, nil
case 't':
return "\t", s, nil
case 'v':
return "\v", s, nil
case '?':
return "?", s, nil // trigraph workaround
case '\'', '"', '\\':
return string(r), s, nil
case '0', '1', '2', '3', '4', '5', '6', '7':
if len(s) < 2 {
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
}
ss := string(r) + s[:2]
s = s[2:]
i, err := strconv.ParseUint(ss, 8, 8)
if err != nil {
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
}
return string([]byte{byte(i)}), s, nil
case 'x', 'X', 'u', 'U':
var n int
switch r {
case 'x', 'X':
n = 2
case 'u':
n = 4
case 'U':
n = 8
}
if len(s) < n {
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
}
ss := s[:n]
s = s[n:]
i, err := strconv.ParseUint(ss, 16, 64)
if err != nil {
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
}
if r == 'x' || r == 'X' {
return string([]byte{byte(i)}), s, nil
}
if i > utf8.MaxRune {
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
}
return string(i), s, nil
}
return "", "", fmt.Errorf(`unknown escape \%c`, r)
}
// Back off the parser by one token. Can only be done between calls to next().
// It makes the next advance() a no-op.
func (p *textParser) back() { p.backed = true }
// Advances the parser and returns the new current token.
func (p *textParser) next() *token {
if p.backed || p.done {
p.backed = false
return &p.cur
}
p.advance()
if p.done {
p.cur.value = ""
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
// Look for multiple quoted strings separated by whitespace,
// and concatenate them.
cat := p.cur
for {
p.skipWhitespace()
if p.done || !isQuote(p.s[0]) {
break
}
p.advance()
if p.cur.err != nil {
return &p.cur
}
cat.value += " " + p.cur.value
cat.unquoted += p.cur.unquoted
}
p.done = false // parser may have seen EOF, but we want to return cat
p.cur = cat
}
return &p.cur
}
func (p *textParser) consumeToken(s string) error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != s {
p.back()
return p.errorf("expected %q, found %q", s, tok.value)
}
return nil
}
// Return a RequiredNotSetError indicating which required field was not set.
func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
st := sv.Type()
sprops := GetProperties(st)
for i := 0; i < st.NumField(); i++ {
if !isNil(sv.Field(i)) {
continue
}
props := sprops.Prop[i]
if props.Required {
return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
}
}
return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
}
// Returns the index in the struct for the named field, as well as the parsed tag properties.
func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
i, ok := sprops.decoderOrigNames[name]
if ok {
return i, sprops.Prop[i], true
}
return -1, nil, false
}
// Consume a ':' from the input stream (if the next token is a colon),
// returning an error if a colon is needed but not present.
func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ":" {
// Colon is optional when the field is a group or message.
needColon := true
switch props.Wire {
case "group":
needColon = false
case "bytes":
// A "bytes" field is either a message, a string, or a repeated field;
// those three become *T, *string and []T respectively, so we can check for
// this field being a pointer to a non-string.
if typ.Kind() == reflect.Ptr {
// *T or *string
if typ.Elem().Kind() == reflect.String {
break
}
} else if typ.Kind() == reflect.Slice {
// []T or []*T
if typ.Elem().Kind() != reflect.Ptr {
break
}
} else if typ.Kind() == reflect.String {
// The proto3 exception is for a string field,
// which requires a colon.
break
}
needColon = false
}
if needColon {
return p.errorf("expected ':', found %q", tok.value)
}
p.back()
}
return nil
}
func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
st := sv.Type()
sprops := GetProperties(st)
reqCount := sprops.reqCount
var reqFieldErr error
fieldSet := make(map[string]bool)
// A struct is a sequence of "name: value", terminated by one of
// '>' or '}', or the end of the input. A name may also be
// "[extension]" or "[type/url]".
//
// The whole struct can also be an expanded Any message, like:
// [type/url] < ... struct contents ... >
for {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == terminator {
break
}
if tok.value == "[" {
// Looks like an extension or an Any.
//
// TODO: Check whether we need to handle
// namespace rooted names (e.g. ".something.Foo").
extName, err := p.consumeExtName()
if err != nil {
return err
}
if s := strings.LastIndex(extName, "/"); s >= 0 {
// If it contains a slash, it's an Any type URL.
messageName := extName[s+1:]
mt := MessageType(messageName)
if mt == nil {
return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
}
tok = p.next()
if tok.err != nil {
return tok.err
}
// consume an optional colon
if tok.value == ":" {
tok = p.next()
if tok.err != nil {
return tok.err
}
}
var terminator string
switch tok.value {
case "<":
terminator = ">"
case "{":
terminator = "}"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
v := reflect.New(mt.Elem())
if pe := p.readStruct(v.Elem(), terminator); pe != nil {
return pe
}
b, err := Marshal(v.Interface().(Message))
if err != nil {
return p.errorf("failed to marshal message of type %q: %v", messageName, err)
}
if fieldSet["type_url"] {
return p.errorf(anyRepeatedlyUnpacked, "type_url")
}
if fieldSet["value"] {
return p.errorf(anyRepeatedlyUnpacked, "value")
}
sv.FieldByName("TypeUrl").SetString(extName)
sv.FieldByName("Value").SetBytes(b)
fieldSet["type_url"] = true
fieldSet["value"] = true
continue
}
var desc *ExtensionDesc
// This could be faster, but it's functional.
// TODO: Do something smarter than a linear scan.
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
if d.Name == extName {
desc = d
break
}
}
if desc == nil {
return p.errorf("unrecognized extension %q", extName)
}
props := &Properties{}
props.Parse(desc.Tag)
typ := reflect.TypeOf(desc.ExtensionType)
if err := p.checkForColon(props, typ); err != nil {
return err
}
rep := desc.repeated()
// Read the extension structure, and set it in
// the value we're constructing.
var ext reflect.Value
if !rep {
ext = reflect.New(typ).Elem()
} else {
ext = reflect.New(typ.Elem()).Elem()
}
if err := p.readAny(ext, props); err != nil {
if _, ok := err.(*RequiredNotSetError); !ok {
return err
}
reqFieldErr = err
}
ep := sv.Addr().Interface().(Message)
if !rep {
SetExtension(ep, desc, ext.Interface())
} else {
old, err := GetExtension(ep, desc)
var sl reflect.Value
if err == nil {
sl = reflect.ValueOf(old) // existing slice
} else {
sl = reflect.MakeSlice(typ, 0, 1)
}
sl = reflect.Append(sl, ext)
SetExtension(ep, desc, sl.Interface())
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
continue
}
// This is a normal, non-extension field.
name := tok.value
var dst reflect.Value
fi, props, ok := structFieldByName(sprops, name)
if ok {
dst = sv.Field(fi)
} else if oop, ok := sprops.OneofTypes[name]; ok {
// It is a oneof.
props = oop.Prop
nv := reflect.New(oop.Type.Elem())
dst = nv.Elem().Field(0)
field := sv.Field(oop.Field)
if !field.IsNil() {
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
}
field.Set(nv)
}
if !dst.IsValid() {
return p.errorf("unknown field name %q in %v", name, st)
}
if dst.Kind() == reflect.Map {
// Consume any colon.
if err := p.checkForColon(props, dst.Type()); err != nil {
return err
}
// Construct the map if it doesn't already exist.
if dst.IsNil() {
dst.Set(reflect.MakeMap(dst.Type()))
}
key := reflect.New(dst.Type().Key()).Elem()
val := reflect.New(dst.Type().Elem()).Elem()
// The map entry should be this sequence of tokens:
// < key : KEY value : VALUE >
// However, implementations may omit key or value, and technically
// we should support them in any order. See b/28924776 for a time
// this went wrong.
tok := p.next()
var terminator string
switch tok.value {
case "<":
terminator = ">"
case "{":
terminator = "}"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
for {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == terminator {
break
}
switch tok.value {
case "key":
if err := p.consumeToken(":"); err != nil {
return err
}
if err := p.readAny(key, props.MapKeyProp); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
case "value":
if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
return err
}
if err := p.readAny(val, props.MapValProp); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
default:
p.back()
return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
}
}
dst.SetMapIndex(key, val)
continue
}
// Check that it's not already set if it's not a repeated field.
if !props.Repeated && fieldSet[name] {
return p.errorf("non-repeated field %q was repeated", name)
}
if err := p.checkForColon(props, dst.Type()); err != nil {
return err
}
// Parse into the field.
fieldSet[name] = true
if err := p.readAny(dst, props); err != nil {
if _, ok := err.(*RequiredNotSetError); !ok {
return err
}
reqFieldErr = err
}
if props.Required {
reqCount--
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
}
if reqCount > 0 {
return p.missingRequiredFieldError(sv)
}
return reqFieldErr
}
// consumeExtName consumes extension name or expanded Any type URL and the
// following ']'. It returns the name or URL consumed.
func (p *textParser) consumeExtName() (string, error) {
tok := p.next()
if tok.err != nil {
return "", tok.err
}
// If extension name or type url is quoted, it's a single token.
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
if err != nil {
return "", err
}
return name, p.consumeToken("]")
}
// Consume everything up to "]"
var parts []string
for tok.value != "]" {
parts = append(parts, tok.value)
tok = p.next()
if tok.err != nil {
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
}
if p.done && tok.value != "]" {
return "", p.errorf("unclosed type_url or extension name")
}
}
return strings.Join(parts, ""), nil
}
// consumeOptionalSeparator consumes an optional semicolon or comma.
// It is used in readStruct to provide backward compatibility.
func (p *textParser) consumeOptionalSeparator() error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ";" && tok.value != "," {
p.back()
}
return nil
}
func (p *textParser) readAny(v reflect.Value, props *Properties) error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == "" {
return p.errorf("unexpected EOF")
}
switch fv := v; fv.Kind() {
case reflect.Slice:
at := v.Type()
if at.Elem().Kind() == reflect.Uint8 {
// Special case for []byte
if tok.value[0] != '"' && tok.value[0] != '\'' {
// Deliberately written out here, as the error after
// this switch statement would write "invalid []byte: ...",
// which is not as user-friendly.
return p.errorf("invalid string: %v", tok.value)
}
bytes := []byte(tok.unquoted)
fv.Set(reflect.ValueOf(bytes))
return nil
}
// Repeated field.
if tok.value == "[" {
// Repeated field with list notation, like [1,2,3].
for {
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
err := p.readAny(fv.Index(fv.Len()-1), props)
if err != nil {
return err
}
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == "]" {
break
}
if tok.value != "," {
return p.errorf("Expected ']' or ',' found %q", tok.value)
}
}
return nil
}
// One value of the repeated field.
p.back()
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
return p.readAny(fv.Index(fv.Len()-1), props)
case reflect.Bool:
// true/1/t/True or false/f/0/False.
switch tok.value {
case "true", "1", "t", "True":
fv.SetBool(true)
return nil
case "false", "0", "f", "False":
fv.SetBool(false)
return nil
}
case reflect.Float32, reflect.Float64:
v := tok.value
// Ignore 'f' for compatibility with output generated by C++, but don't
// remove 'f' when the value is "-inf" or "inf".
if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
v = v[:len(v)-1]
}
if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
fv.SetFloat(f)
return nil
}
case reflect.Int32:
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
fv.SetInt(x)
return nil
}
if len(props.Enum) == 0 {
break
}
m, ok := enumValueMaps[props.Enum]
if !ok {
break
}
x, ok := m[tok.value]
if !ok {
break
}
fv.SetInt(int64(x))
return nil
case reflect.Int64:
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
fv.SetInt(x)
return nil
}
case reflect.Ptr:
// A basic field (indirected through pointer), or a repeated message/group
p.back()
fv.Set(reflect.New(fv.Type().Elem()))
return p.readAny(fv.Elem(), props)
case reflect.String:
if tok.value[0] == '"' || tok.value[0] == '\'' {
fv.SetString(tok.unquoted)
return nil
}
case reflect.Struct:
var terminator string
switch tok.value {
case "{":
terminator = "}"
case "<":
terminator = ">"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
return p.readStruct(fv, terminator)
case reflect.Uint32:
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
fv.SetUint(uint64(x))
return nil
}
case reflect.Uint64:
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
fv.SetUint(x)
return nil
}
}
return p.errorf("invalid %v: %v", v.Type(), tok.value)
}
// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
// before starting to unmarshal, so any existing data in pb is always removed.
// If a required field is not set and no other error occurs,
// UnmarshalText returns *RequiredNotSetError.
func UnmarshalText(s string, pb Message) error {
if um, ok := pb.(encoding.TextUnmarshaler); ok {
return um.UnmarshalText([]byte(s))
}
pb.Reset()
v := reflect.ValueOf(pb)
return newTextParser(s).readStruct(v.Elem(), "")
}

78
vendor/github.com/golang/protobuf/proto/wire.go generated vendored Normal file
View file

@ -0,0 +1,78 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
protoV2 "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/runtime/protoiface"
)
// Size returns the size in bytes of the wire-format encoding of m.
func Size(m Message) int {
if m == nil {
return 0
}
mi := MessageV2(m)
return protoV2.Size(mi)
}
// Marshal returns the wire-format encoding of m.
func Marshal(m Message) ([]byte, error) {
b, err := marshalAppend(nil, m, false)
if b == nil {
b = zeroBytes
}
return b, err
}
var zeroBytes = make([]byte, 0, 0)
func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) {
if m == nil {
return nil, ErrNil
}
mi := MessageV2(m)
nbuf, err := protoV2.MarshalOptions{
Deterministic: deterministic,
AllowPartial: true,
}.MarshalAppend(buf, mi)
if err != nil {
return buf, err
}
if len(buf) == len(nbuf) {
if !mi.ProtoReflect().IsValid() {
return buf, ErrNil
}
}
return nbuf, checkRequiredNotSet(mi)
}
// Unmarshal parses a wire-format message in b and places the decoded results in m.
//
// Unmarshal resets m before starting to unmarshal, so any existing data in m is always
// removed. Use UnmarshalMerge to preserve and append to existing data.
func Unmarshal(b []byte, m Message) error {
m.Reset()
return UnmarshalMerge(b, m)
}
// UnmarshalMerge parses a wire-format message in b and places the decoded results in m.
func UnmarshalMerge(b []byte, m Message) error {
mi := MessageV2(m)
out, err := protoV2.UnmarshalOptions{
AllowPartial: true,
Merge: true,
}.UnmarshalState(protoiface.UnmarshalInput{
Buf: b,
Message: mi.ProtoReflect(),
})
if err != nil {
return err
}
if out.Flags&protoiface.UnmarshalInitialized > 0 {
return nil
}
return checkRequiredNotSet(mi)
}

34
vendor/github.com/golang/protobuf/proto/wrappers.go generated vendored Normal file
View file

@ -0,0 +1,34 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
// Bool stores v in a new bool value and returns a pointer to it.
func Bool(v bool) *bool { return &v }
// Int stores v in a new int32 value and returns a pointer to it.
//
// Deprecated: Use Int32 instead.
func Int(v int) *int32 { return Int32(int32(v)) }
// Int32 stores v in a new int32 value and returns a pointer to it.
func Int32(v int32) *int32 { return &v }
// Int64 stores v in a new int64 value and returns a pointer to it.
func Int64(v int64) *int64 { return &v }
// Uint32 stores v in a new uint32 value and returns a pointer to it.
func Uint32(v uint32) *uint32 { return &v }
// Uint64 stores v in a new uint64 value and returns a pointer to it.
func Uint64(v uint64) *uint64 { return &v }
// Float32 stores v in a new float32 value and returns a pointer to it.
func Float32(v float32) *float32 { return &v }
// Float64 stores v in a new float64 value and returns a pointer to it.
func Float64(v float64) *float64 { return &v }
// String stores v in a new string value and returns a pointer to it.
func String(v string) *string { return &v }

File diff suppressed because it is too large Load diff

View file

@ -1,885 +0,0 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Author: kenton@google.com (Kenton Varda)
// Based on original Protocol Buffers design by
// Sanjay Ghemawat, Jeff Dean, and others.
//
// The messages in this file describe the definitions found in .proto files.
// A valid .proto file can be translated directly to a FileDescriptorProto
// without any other information (e.g. without reading its imports).
syntax = "proto2";
package google.protobuf;
option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor";
option java_package = "com.google.protobuf";
option java_outer_classname = "DescriptorProtos";
option csharp_namespace = "Google.Protobuf.Reflection";
option objc_class_prefix = "GPB";
option cc_enable_arenas = true;
// descriptor.proto must be optimized for speed because reflection-based
// algorithms don't work during bootstrapping.
option optimize_for = SPEED;
// The protocol compiler can output a FileDescriptorSet containing the .proto
// files it parses.
message FileDescriptorSet {
repeated FileDescriptorProto file = 1;
}
// Describes a complete .proto file.
message FileDescriptorProto {
optional string name = 1; // file name, relative to root of source tree
optional string package = 2; // e.g. "foo", "foo.bar", etc.
// Names of files imported by this file.
repeated string dependency = 3;
// Indexes of the public imported files in the dependency list above.
repeated int32 public_dependency = 10;
// Indexes of the weak imported files in the dependency list.
// For Google-internal migration only. Do not use.
repeated int32 weak_dependency = 11;
// All top-level definitions in this file.
repeated DescriptorProto message_type = 4;
repeated EnumDescriptorProto enum_type = 5;
repeated ServiceDescriptorProto service = 6;
repeated FieldDescriptorProto extension = 7;
optional FileOptions options = 8;
// This field contains optional information about the original source code.
// You may safely remove this entire field without harming runtime
// functionality of the descriptors -- the information is needed only by
// development tools.
optional SourceCodeInfo source_code_info = 9;
// The syntax of the proto file.
// The supported values are "proto2" and "proto3".
optional string syntax = 12;
}
// Describes a message type.
message DescriptorProto {
optional string name = 1;
repeated FieldDescriptorProto field = 2;
repeated FieldDescriptorProto extension = 6;
repeated DescriptorProto nested_type = 3;
repeated EnumDescriptorProto enum_type = 4;
message ExtensionRange {
optional int32 start = 1; // Inclusive.
optional int32 end = 2; // Exclusive.
optional ExtensionRangeOptions options = 3;
}
repeated ExtensionRange extension_range = 5;
repeated OneofDescriptorProto oneof_decl = 8;
optional MessageOptions options = 7;
// Range of reserved tag numbers. Reserved tag numbers may not be used by
// fields or extension ranges in the same message. Reserved ranges may
// not overlap.
message ReservedRange {
optional int32 start = 1; // Inclusive.
optional int32 end = 2; // Exclusive.
}
repeated ReservedRange reserved_range = 9;
// Reserved field names, which may not be used by fields in the same message.
// A given name may only be reserved once.
repeated string reserved_name = 10;
}
message ExtensionRangeOptions {
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
// Clients can define custom options in extensions of this message. See above.
extensions 1000 to max;
}
// Describes a field within a message.
message FieldDescriptorProto {
enum Type {
// 0 is reserved for errors.
// Order is weird for historical reasons.
TYPE_DOUBLE = 1;
TYPE_FLOAT = 2;
// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if
// negative values are likely.
TYPE_INT64 = 3;
TYPE_UINT64 = 4;
// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if
// negative values are likely.
TYPE_INT32 = 5;
TYPE_FIXED64 = 6;
TYPE_FIXED32 = 7;
TYPE_BOOL = 8;
TYPE_STRING = 9;
// Tag-delimited aggregate.
// Group type is deprecated and not supported in proto3. However, Proto3
// implementations should still be able to parse the group wire format and
// treat group fields as unknown fields.
TYPE_GROUP = 10;
TYPE_MESSAGE = 11; // Length-delimited aggregate.
// New in version 2.
TYPE_BYTES = 12;
TYPE_UINT32 = 13;
TYPE_ENUM = 14;
TYPE_SFIXED32 = 15;
TYPE_SFIXED64 = 16;
TYPE_SINT32 = 17; // Uses ZigZag encoding.
TYPE_SINT64 = 18; // Uses ZigZag encoding.
}
enum Label {
// 0 is reserved for errors
LABEL_OPTIONAL = 1;
LABEL_REQUIRED = 2;
LABEL_REPEATED = 3;
}
optional string name = 1;
optional int32 number = 3;
optional Label label = 4;
// If type_name is set, this need not be set. If both this and type_name
// are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
optional Type type = 5;
// For message and enum types, this is the name of the type. If the name
// starts with a '.', it is fully-qualified. Otherwise, C++-like scoping
// rules are used to find the type (i.e. first the nested types within this
// message are searched, then within the parent, on up to the root
// namespace).
optional string type_name = 6;
// For extensions, this is the name of the type being extended. It is
// resolved in the same manner as type_name.
optional string extendee = 2;
// For numeric types, contains the original text representation of the value.
// For booleans, "true" or "false".
// For strings, contains the default text contents (not escaped in any way).
// For bytes, contains the C escaped value. All bytes >= 128 are escaped.
// TODO(kenton): Base-64 encode?
optional string default_value = 7;
// If set, gives the index of a oneof in the containing type's oneof_decl
// list. This field is a member of that oneof.
optional int32 oneof_index = 9;
// JSON name of this field. The value is set by protocol compiler. If the
// user has set a "json_name" option on this field, that option's value
// will be used. Otherwise, it's deduced from the field's name by converting
// it to camelCase.
optional string json_name = 10;
optional FieldOptions options = 8;
}
// Describes a oneof.
message OneofDescriptorProto {
optional string name = 1;
optional OneofOptions options = 2;
}
// Describes an enum type.
message EnumDescriptorProto {
optional string name = 1;
repeated EnumValueDescriptorProto value = 2;
optional EnumOptions options = 3;
// Range of reserved numeric values. Reserved values may not be used by
// entries in the same enum. Reserved ranges may not overlap.
//
// Note that this is distinct from DescriptorProto.ReservedRange in that it
// is inclusive such that it can appropriately represent the entire int32
// domain.
message EnumReservedRange {
optional int32 start = 1; // Inclusive.
optional int32 end = 2; // Inclusive.
}
// Range of reserved numeric values. Reserved numeric values may not be used
// by enum values in the same enum declaration. Reserved ranges may not
// overlap.
repeated EnumReservedRange reserved_range = 4;
// Reserved enum value names, which may not be reused. A given name may only
// be reserved once.
repeated string reserved_name = 5;
}
// Describes a value within an enum.
message EnumValueDescriptorProto {
optional string name = 1;
optional int32 number = 2;
optional EnumValueOptions options = 3;
}
// Describes a service.
message ServiceDescriptorProto {
optional string name = 1;
repeated MethodDescriptorProto method = 2;
optional ServiceOptions options = 3;
}
// Describes a method of a service.
message MethodDescriptorProto {
optional string name = 1;
// Input and output type names. These are resolved in the same way as
// FieldDescriptorProto.type_name, but must refer to a message type.
optional string input_type = 2;
optional string output_type = 3;
optional MethodOptions options = 4;
// Identifies if client streams multiple client messages
optional bool client_streaming = 5 [default = false];
// Identifies if server streams multiple server messages
optional bool server_streaming = 6 [default = false];
}
// ===================================================================
// Options
// Each of the definitions above may have "options" attached. These are
// just annotations which may cause code to be generated slightly differently
// or may contain hints for code that manipulates protocol messages.
//
// Clients may define custom options as extensions of the *Options messages.
// These extensions may not yet be known at parsing time, so the parser cannot
// store the values in them. Instead it stores them in a field in the *Options
// message called uninterpreted_option. This field must have the same name
// across all *Options messages. We then use this field to populate the
// extensions when we build a descriptor, at which point all protos have been
// parsed and so all extensions are known.
//
// Extension numbers for custom options may be chosen as follows:
// * For options which will only be used within a single application or
// organization, or for experimental options, use field numbers 50000
// through 99999. It is up to you to ensure that you do not use the
// same number for multiple options.
// * For options which will be published and used publicly by multiple
// independent entities, e-mail protobuf-global-extension-registry@google.com
// to reserve extension numbers. Simply provide your project name (e.g.
// Objective-C plugin) and your project website (if available) -- there's no
// need to explain how you intend to use them. Usually you only need one
// extension number. You can declare multiple options with only one extension
// number by putting them in a sub-message. See the Custom Options section of
// the docs for examples:
// https://developers.google.com/protocol-buffers/docs/proto#options
// If this turns out to be popular, a web service will be set up
// to automatically assign option numbers.
message FileOptions {
// Sets the Java package where classes generated from this .proto will be
// placed. By default, the proto package is used, but this is often
// inappropriate because proto packages do not normally start with backwards
// domain names.
optional string java_package = 1;
// If set, all the classes from the .proto file are wrapped in a single
// outer class with the given name. This applies to both Proto1
// (equivalent to the old "--one_java_file" option) and Proto2 (where
// a .proto always translates to a single class, but you may want to
// explicitly choose the class name).
optional string java_outer_classname = 8;
// If set true, then the Java code generator will generate a separate .java
// file for each top-level message, enum, and service defined in the .proto
// file. Thus, these types will *not* be nested inside the outer class
// named by java_outer_classname. However, the outer class will still be
// generated to contain the file's getDescriptor() method as well as any
// top-level extensions defined in the file.
optional bool java_multiple_files = 10 [default = false];
// This option does nothing.
optional bool java_generate_equals_and_hash = 20 [deprecated=true];
// If set true, then the Java2 code generator will generate code that
// throws an exception whenever an attempt is made to assign a non-UTF-8
// byte sequence to a string field.
// Message reflection will do the same.
// However, an extension field still accepts non-UTF-8 byte sequences.
// This option has no effect on when used with the lite runtime.
optional bool java_string_check_utf8 = 27 [default = false];
// Generated classes can be optimized for speed or code size.
enum OptimizeMode {
SPEED = 1; // Generate complete code for parsing, serialization,
// etc.
CODE_SIZE = 2; // Use ReflectionOps to implement these methods.
LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
}
optional OptimizeMode optimize_for = 9 [default = SPEED];
// Sets the Go package where structs generated from this .proto will be
// placed. If omitted, the Go package will be derived from the following:
// - The basename of the package import path, if provided.
// - Otherwise, the package statement in the .proto file, if present.
// - Otherwise, the basename of the .proto file, without extension.
optional string go_package = 11;
// Should generic services be generated in each language? "Generic" services
// are not specific to any particular RPC system. They are generated by the
// main code generators in each language (without additional plugins).
// Generic services were the only kind of service generation supported by
// early versions of google.protobuf.
//
// Generic services are now considered deprecated in favor of using plugins
// that generate code specific to your particular RPC system. Therefore,
// these default to false. Old code which depends on generic services should
// explicitly set them to true.
optional bool cc_generic_services = 16 [default = false];
optional bool java_generic_services = 17 [default = false];
optional bool py_generic_services = 18 [default = false];
optional bool php_generic_services = 42 [default = false];
// Is this file deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for everything in the file, or it will be completely ignored; in the very
// least, this is a formalization for deprecating files.
optional bool deprecated = 23 [default = false];
// Enables the use of arenas for the proto messages in this file. This applies
// only to generated classes for C++.
optional bool cc_enable_arenas = 31 [default = false];
// Sets the objective c class prefix which is prepended to all objective c
// generated classes from this .proto. There is no default.
optional string objc_class_prefix = 36;
// Namespace for generated classes; defaults to the package.
optional string csharp_namespace = 37;
// By default Swift generators will take the proto package and CamelCase it
// replacing '.' with underscore and use that to prefix the types/symbols
// defined. When this options is provided, they will use this value instead
// to prefix the types/symbols defined.
optional string swift_prefix = 39;
// Sets the php class prefix which is prepended to all php generated classes
// from this .proto. Default is empty.
optional string php_class_prefix = 40;
// Use this option to change the namespace of php generated classes. Default
// is empty. When this option is empty, the package name will be used for
// determining the namespace.
optional string php_namespace = 41;
// Use this option to change the namespace of php generated metadata classes.
// Default is empty. When this option is empty, the proto file name will be
// used for determining the namespace.
optional string php_metadata_namespace = 44;
// Use this option to change the package of ruby generated classes. Default
// is empty. When this option is not set, the package name will be used for
// determining the ruby package.
optional string ruby_package = 45;
// The parser stores options it doesn't recognize here.
// See the documentation for the "Options" section above.
repeated UninterpretedOption uninterpreted_option = 999;
// Clients can define custom options in extensions of this message.
// See the documentation for the "Options" section above.
extensions 1000 to max;
reserved 38;
}
message MessageOptions {
// Set true to use the old proto1 MessageSet wire format for extensions.
// This is provided for backwards-compatibility with the MessageSet wire
// format. You should not use this for any other reason: It's less
// efficient, has fewer features, and is more complicated.
//
// The message must be defined exactly as follows:
// message Foo {
// option message_set_wire_format = true;
// extensions 4 to max;
// }
// Note that the message cannot have any defined fields; MessageSets only
// have extensions.
//
// All extensions of your type must be singular messages; e.g. they cannot
// be int32s, enums, or repeated messages.
//
// Because this is an option, the above two restrictions are not enforced by
// the protocol compiler.
optional bool message_set_wire_format = 1 [default = false];
// Disables the generation of the standard "descriptor()" accessor, which can
// conflict with a field of the same name. This is meant to make migration
// from proto1 easier; new code should avoid fields named "descriptor".
optional bool no_standard_descriptor_accessor = 2 [default = false];
// Is this message deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for the message, or it will be completely ignored; in the very least,
// this is a formalization for deprecating messages.
optional bool deprecated = 3 [default = false];
// Whether the message is an automatically generated map entry type for the
// maps field.
//
// For maps fields:
// map<KeyType, ValueType> map_field = 1;
// The parsed descriptor looks like:
// message MapFieldEntry {
// option map_entry = true;
// optional KeyType key = 1;
// optional ValueType value = 2;
// }
// repeated MapFieldEntry map_field = 1;
//
// Implementations may choose not to generate the map_entry=true message, but
// use a native map in the target language to hold the keys and values.
// The reflection APIs in such implementations still need to work as
// if the field is a repeated message field.
//
// NOTE: Do not set the option in .proto files. Always use the maps syntax
// instead. The option should only be implicitly set by the proto compiler
// parser.
optional bool map_entry = 7;
reserved 8; // javalite_serializable
reserved 9; // javanano_as_lite
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
// Clients can define custom options in extensions of this message. See above.
extensions 1000 to max;
}
message FieldOptions {
// The ctype option instructs the C++ code generator to use a different
// representation of the field than it normally would. See the specific
// options below. This option is not yet implemented in the open source
// release -- sorry, we'll try to include it in a future version!
optional CType ctype = 1 [default = STRING];
enum CType {
// Default mode.
STRING = 0;
CORD = 1;
STRING_PIECE = 2;
}
// The packed option can be enabled for repeated primitive fields to enable
// a more efficient representation on the wire. Rather than repeatedly
// writing the tag and type for each element, the entire array is encoded as
// a single length-delimited blob. In proto3, only explicit setting it to
// false will avoid using packed encoding.
optional bool packed = 2;
// The jstype option determines the JavaScript type used for values of the
// field. The option is permitted only for 64 bit integral and fixed types
// (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING
// is represented as JavaScript string, which avoids loss of precision that
// can happen when a large value is converted to a floating point JavaScript.
// Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
// use the JavaScript "number" type. The behavior of the default option
// JS_NORMAL is implementation dependent.
//
// This option is an enum to permit additional types to be added, e.g.
// goog.math.Integer.
optional JSType jstype = 6 [default = JS_NORMAL];
enum JSType {
// Use the default type.
JS_NORMAL = 0;
// Use JavaScript strings.
JS_STRING = 1;
// Use JavaScript numbers.
JS_NUMBER = 2;
}
// Should this field be parsed lazily? Lazy applies only to message-type
// fields. It means that when the outer message is initially parsed, the
// inner message's contents will not be parsed but instead stored in encoded
// form. The inner message will actually be parsed when it is first accessed.
//
// This is only a hint. Implementations are free to choose whether to use
// eager or lazy parsing regardless of the value of this option. However,
// setting this option true suggests that the protocol author believes that
// using lazy parsing on this field is worth the additional bookkeeping
// overhead typically needed to implement it.
//
// This option does not affect the public interface of any generated code;
// all method signatures remain the same. Furthermore, thread-safety of the
// interface is not affected by this option; const methods remain safe to
// call from multiple threads concurrently, while non-const methods continue
// to require exclusive access.
//
//
// Note that implementations may choose not to check required fields within
// a lazy sub-message. That is, calling IsInitialized() on the outer message
// may return true even if the inner message has missing required fields.
// This is necessary because otherwise the inner message would have to be
// parsed in order to perform the check, defeating the purpose of lazy
// parsing. An implementation which chooses not to check required fields
// must be consistent about it. That is, for any particular sub-message, the
// implementation must either *always* check its required fields, or *never*
// check its required fields, regardless of whether or not the message has
// been parsed.
optional bool lazy = 5 [default = false];
// Is this field deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for accessors, or it will be completely ignored; in the very least, this
// is a formalization for deprecating fields.
optional bool deprecated = 3 [default = false];
// For Google-internal migration only. Do not use.
optional bool weak = 10 [default = false];
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
// Clients can define custom options in extensions of this message. See above.
extensions 1000 to max;
reserved 4; // removed jtype
}
message OneofOptions {
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
// Clients can define custom options in extensions of this message. See above.
extensions 1000 to max;
}
message EnumOptions {
// Set this option to true to allow mapping different tag names to the same
// value.
optional bool allow_alias = 2;
// Is this enum deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for the enum, or it will be completely ignored; in the very least, this
// is a formalization for deprecating enums.
optional bool deprecated = 3 [default = false];
reserved 5; // javanano_as_lite
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
// Clients can define custom options in extensions of this message. See above.
extensions 1000 to max;
}
message EnumValueOptions {
// Is this enum value deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for the enum value, or it will be completely ignored; in the very least,
// this is a formalization for deprecating enum values.
optional bool deprecated = 1 [default = false];
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
// Clients can define custom options in extensions of this message. See above.
extensions 1000 to max;
}
message ServiceOptions {
// Note: Field numbers 1 through 32 are reserved for Google's internal RPC
// framework. We apologize for hoarding these numbers to ourselves, but
// we were already using them long before we decided to release Protocol
// Buffers.
// Is this service deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for the service, or it will be completely ignored; in the very least,
// this is a formalization for deprecating services.
optional bool deprecated = 33 [default = false];
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
// Clients can define custom options in extensions of this message. See above.
extensions 1000 to max;
}
message MethodOptions {
// Note: Field numbers 1 through 32 are reserved for Google's internal RPC
// framework. We apologize for hoarding these numbers to ourselves, but
// we were already using them long before we decided to release Protocol
// Buffers.
// Is this method deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for the method, or it will be completely ignored; in the very least,
// this is a formalization for deprecating methods.
optional bool deprecated = 33 [default = false];
// Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
// or neither? HTTP based RPC implementation may choose GET verb for safe
// methods, and PUT verb for idempotent methods instead of the default POST.
enum IdempotencyLevel {
IDEMPOTENCY_UNKNOWN = 0;
NO_SIDE_EFFECTS = 1; // implies idempotent
IDEMPOTENT = 2; // idempotent, but may have side effects
}
optional IdempotencyLevel idempotency_level = 34
[default = IDEMPOTENCY_UNKNOWN];
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
// Clients can define custom options in extensions of this message. See above.
extensions 1000 to max;
}
// A message representing a option the parser does not recognize. This only
// appears in options protos created by the compiler::Parser class.
// DescriptorPool resolves these when building Descriptor objects. Therefore,
// options protos in descriptor objects (e.g. returned by Descriptor::options(),
// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
// in them.
message UninterpretedOption {
// The name of the uninterpreted option. Each string represents a segment in
// a dot-separated name. is_extension is true iff a segment represents an
// extension (denoted with parentheses in options specs in .proto files).
// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
// "foo.(bar.baz).qux".
message NamePart {
required string name_part = 1;
required bool is_extension = 2;
}
repeated NamePart name = 2;
// The value of the uninterpreted option, in whatever type the tokenizer
// identified it as during parsing. Exactly one of these should be set.
optional string identifier_value = 3;
optional uint64 positive_int_value = 4;
optional int64 negative_int_value = 5;
optional double double_value = 6;
optional bytes string_value = 7;
optional string aggregate_value = 8;
}
// ===================================================================
// Optional source code info
// Encapsulates information about the original source file from which a
// FileDescriptorProto was generated.
message SourceCodeInfo {
// A Location identifies a piece of source code in a .proto file which
// corresponds to a particular definition. This information is intended
// to be useful to IDEs, code indexers, documentation generators, and similar
// tools.
//
// For example, say we have a file like:
// message Foo {
// optional string foo = 1;
// }
// Let's look at just the field definition:
// optional string foo = 1;
// ^ ^^ ^^ ^ ^^^
// a bc de f ghi
// We have the following locations:
// span path represents
// [a,i) [ 4, 0, 2, 0 ] The whole field definition.
// [a,b) [ 4, 0, 2, 0, 4 ] The label (optional).
// [c,d) [ 4, 0, 2, 0, 5 ] The type (string).
// [e,f) [ 4, 0, 2, 0, 1 ] The name (foo).
// [g,h) [ 4, 0, 2, 0, 3 ] The number (1).
//
// Notes:
// - A location may refer to a repeated field itself (i.e. not to any
// particular index within it). This is used whenever a set of elements are
// logically enclosed in a single code segment. For example, an entire
// extend block (possibly containing multiple extension definitions) will
// have an outer location whose path refers to the "extensions" repeated
// field without an index.
// - Multiple locations may have the same path. This happens when a single
// logical declaration is spread out across multiple places. The most
// obvious example is the "extend" block again -- there may be multiple
// extend blocks in the same scope, each of which will have the same path.
// - A location's span is not always a subset of its parent's span. For
// example, the "extendee" of an extension declaration appears at the
// beginning of the "extend" block and is shared by all extensions within
// the block.
// - Just because a location's span is a subset of some other location's span
// does not mean that it is a descendant. For example, a "group" defines
// both a type and a field in a single declaration. Thus, the locations
// corresponding to the type and field and their components will overlap.
// - Code which tries to interpret locations should probably be designed to
// ignore those that it doesn't understand, as more types of locations could
// be recorded in the future.
repeated Location location = 1;
message Location {
// Identifies which part of the FileDescriptorProto was defined at this
// location.
//
// Each element is a field number or an index. They form a path from
// the root FileDescriptorProto to the place where the definition. For
// example, this path:
// [ 4, 3, 2, 7, 1 ]
// refers to:
// file.message_type(3) // 4, 3
// .field(7) // 2, 7
// .name() // 1
// This is because FileDescriptorProto.message_type has field number 4:
// repeated DescriptorProto message_type = 4;
// and DescriptorProto.field has field number 2:
// repeated FieldDescriptorProto field = 2;
// and FieldDescriptorProto.name has field number 1:
// optional string name = 1;
//
// Thus, the above path gives the location of a field name. If we removed
// the last element:
// [ 4, 3, 2, 7 ]
// this path refers to the whole field declaration (from the beginning
// of the label to the terminating semicolon).
repeated int32 path = 1 [packed = true];
// Always has exactly three or four elements: start line, start column,
// end line (optional, otherwise assumed same as start line), end column.
// These are packed into a single field for efficiency. Note that line
// and column numbers are zero-based -- typically you will want to add
// 1 to each before displaying to a user.
repeated int32 span = 2 [packed = true];
// If this SourceCodeInfo represents a complete declaration, these are any
// comments appearing before and after the declaration which appear to be
// attached to the declaration.
//
// A series of line comments appearing on consecutive lines, with no other
// tokens appearing on those lines, will be treated as a single comment.
//
// leading_detached_comments will keep paragraphs of comments that appear
// before (but not connected to) the current element. Each paragraph,
// separated by empty lines, will be one comment element in the repeated
// field.
//
// Only the comment content is provided; comment markers (e.g. //) are
// stripped out. For block comments, leading whitespace and an asterisk
// will be stripped from the beginning of each line other than the first.
// Newlines are included in the output.
//
// Examples:
//
// optional int32 foo = 1; // Comment attached to foo.
// // Comment attached to bar.
// optional int32 bar = 2;
//
// optional string baz = 3;
// // Comment attached to baz.
// // Another line attached to baz.
//
// // Comment attached to qux.
// //
// // Another line attached to qux.
// optional double qux = 4;
//
// // Detached comment for corge. This is not leading or trailing comments
// // to qux or corge because there are blank lines separating it from
// // both.
//
// // Detached comment for corge paragraph 2.
//
// optional string corge = 5;
// /* Block comment attached
// * to corge. Leading asterisks
// * will be removed. */
// /* Block comment attached to
// * grault. */
// optional int32 grault = 6;
//
// // ignored detached comments.
optional string leading_comments = 3;
optional string trailing_comments = 4;
repeated string leading_detached_comments = 6;
}
}
// Describes the relationship between generated code and its original source
// file. A GeneratedCodeInfo message is associated with only one generated
// source file, but may contain references to different source .proto files.
message GeneratedCodeInfo {
// An Annotation connects some span of text in generated code to an element
// of its generating .proto file.
repeated Annotation annotation = 1;
message Annotation {
// Identifies the element in the original source .proto file. This field
// is formatted the same as SourceCodeInfo.Location.path.
repeated int32 path = 1 [packed = true];
// Identifies the filesystem path to the original source .proto.
optional string source_file = 2;
// Identifies the starting offset in bytes in the generated code
// that relates to the identified object.
optional int32 begin = 3;
// Identifies the ending offset in bytes in the generated code that
// relates to the identified offset. The end offset should be one past
// the last relevant byte (so the length of the text = end - begin).
optional int32 end = 4;
}
}

View file

@ -1,51 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
A plugin for the Google protocol buffer compiler to generate Go code.
Run it by building this program and putting it in your path with the name
protoc-gen-go
That word 'go' at the end becomes part of the option string set for the
protocol compiler, so once the protocol compiler (protoc) is installed
you can run
protoc --go_out=output_directory input_directory/file.proto
to generate Go bindings for the protocol defined by file.proto.
With that input, the output will be written to
output_directory/file.pb.go
The generated code is documented in the package comment for
the library.
See the README and documentation for protocol buffers to learn more:
https://developers.google.com/protocol-buffers/
*/
package documentation

File diff suppressed because it is too large Load diff

View file

@ -1,117 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2017 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
Package remap handles tracking the locations of Go tokens in a source text
across a rewrite by the Go formatter.
*/
package remap
import (
"fmt"
"go/scanner"
"go/token"
)
// A Location represents a span of byte offsets in the source text.
type Location struct {
Pos, End int // End is exclusive
}
// A Map represents a mapping between token locations in an input source text
// and locations in the corresponding output text.
type Map map[Location]Location
// Find reports whether the specified span is recorded by m, and if so returns
// the new location it was mapped to. If the input span was not found, the
// returned location is the same as the input.
func (m Map) Find(pos, end int) (Location, bool) {
key := Location{
Pos: pos,
End: end,
}
if loc, ok := m[key]; ok {
return loc, true
}
return key, false
}
func (m Map) add(opos, oend, npos, nend int) {
m[Location{Pos: opos, End: oend}] = Location{Pos: npos, End: nend}
}
// Compute constructs a location mapping from input to output. An error is
// reported if any of the tokens of output cannot be mapped.
func Compute(input, output []byte) (Map, error) {
itok := tokenize(input)
otok := tokenize(output)
if len(itok) != len(otok) {
return nil, fmt.Errorf("wrong number of tokens, %d ≠ %d", len(itok), len(otok))
}
m := make(Map)
for i, ti := range itok {
to := otok[i]
if ti.Token != to.Token {
return nil, fmt.Errorf("token %d type mismatch: %s ≠ %s", i+1, ti, to)
}
m.add(ti.pos, ti.end, to.pos, to.end)
}
return m, nil
}
// tokinfo records the span and type of a source token.
type tokinfo struct {
pos, end int
token.Token
}
func tokenize(src []byte) []tokinfo {
fs := token.NewFileSet()
var s scanner.Scanner
s.Init(fs.AddFile("src", fs.Base(), len(src)), src, nil, scanner.ScanComments)
var info []tokinfo
for {
pos, next, lit := s.Scan()
switch next {
case token.SEMICOLON:
continue
}
info = append(info, tokinfo{
pos: int(pos - 1),
end: int(pos + token.Pos(len(lit)) - 1),
Token: next,
})
if next == token.EOF {
break
}
}
return info
}

View file

@ -1,545 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2015 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Package grpc outputs gRPC service descriptions in Go code.
// It runs as a plugin for the Go protocol buffer compiler plugin.
// It is linked in to protoc-gen-go.
package grpc
import (
"fmt"
"strconv"
"strings"
pb "github.com/golang/protobuf/protoc-gen-go/descriptor"
"github.com/golang/protobuf/protoc-gen-go/generator"
)
// generatedCodeVersion indicates a version of the generated code.
// It is incremented whenever an incompatibility between the generated code and
// the grpc package is introduced; the generated code references
// a constant, grpc.SupportPackageIsVersionN (where N is generatedCodeVersion).
const generatedCodeVersion = 6
// Paths for packages used by code generated in this file,
// relative to the import_prefix of the generator.Generator.
const (
contextPkgPath = "context"
grpcPkgPath = "google.golang.org/grpc"
codePkgPath = "google.golang.org/grpc/codes"
statusPkgPath = "google.golang.org/grpc/status"
)
func init() {
generator.RegisterPlugin(new(grpc))
}
// grpc is an implementation of the Go protocol buffer compiler's
// plugin architecture. It generates bindings for gRPC support.
type grpc struct {
gen *generator.Generator
}
// Name returns the name of this plugin, "grpc".
func (g *grpc) Name() string {
return "grpc"
}
// The names for packages imported in the generated code.
// They may vary from the final path component of the import path
// if the name is used by other packages.
var (
contextPkg string
grpcPkg string
)
// Init initializes the plugin.
func (g *grpc) Init(gen *generator.Generator) {
g.gen = gen
}
// Given a type name defined in a .proto, return its object.
// Also record that we're using it, to guarantee the associated import.
func (g *grpc) objectNamed(name string) generator.Object {
g.gen.RecordTypeUse(name)
return g.gen.ObjectNamed(name)
}
// Given a type name defined in a .proto, return its name as we will print it.
func (g *grpc) typeName(str string) string {
return g.gen.TypeName(g.objectNamed(str))
}
// P forwards to g.gen.P.
func (g *grpc) P(args ...interface{}) { g.gen.P(args...) }
// Generate generates code for the services in the given file.
func (g *grpc) Generate(file *generator.FileDescriptor) {
if len(file.FileDescriptorProto.Service) == 0 {
return
}
contextPkg = string(g.gen.AddImport(contextPkgPath))
grpcPkg = string(g.gen.AddImport(grpcPkgPath))
g.P("// Reference imports to suppress errors if they are not otherwise used.")
g.P("var _ ", contextPkg, ".Context")
g.P("var _ ", grpcPkg, ".ClientConnInterface")
g.P()
// Assert version compatibility.
g.P("// This is a compile-time assertion to ensure that this generated file")
g.P("// is compatible with the grpc package it is being compiled against.")
g.P("const _ = ", grpcPkg, ".SupportPackageIsVersion", generatedCodeVersion)
g.P()
for i, service := range file.FileDescriptorProto.Service {
g.generateService(file, service, i)
}
}
// GenerateImports generates the import declaration for this file.
func (g *grpc) GenerateImports(file *generator.FileDescriptor) {
}
// reservedClientName records whether a client name is reserved on the client side.
var reservedClientName = map[string]bool{
// TODO: do we need any in gRPC?
}
func unexport(s string) string { return strings.ToLower(s[:1]) + s[1:] }
// deprecationComment is the standard comment added to deprecated
// messages, fields, enums, and enum values.
var deprecationComment = "// Deprecated: Do not use."
// generateService generates all the code for the named service.
func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.ServiceDescriptorProto, index int) {
path := fmt.Sprintf("6,%d", index) // 6 means service.
origServName := service.GetName()
fullServName := origServName
if pkg := file.GetPackage(); pkg != "" {
fullServName = pkg + "." + fullServName
}
servName := generator.CamelCase(origServName)
deprecated := service.GetOptions().GetDeprecated()
g.P()
g.P(fmt.Sprintf(`// %sClient is the client API for %s service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.`, servName, servName))
// Client interface.
if deprecated {
g.P("//")
g.P(deprecationComment)
}
g.P("type ", servName, "Client interface {")
for i, method := range service.Method {
g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service.
if method.GetOptions().GetDeprecated() {
g.P("//")
g.P(deprecationComment)
}
g.P(g.generateClientSignature(servName, method))
}
g.P("}")
g.P()
// Client structure.
g.P("type ", unexport(servName), "Client struct {")
g.P("cc ", grpcPkg, ".ClientConnInterface")
g.P("}")
g.P()
// NewClient factory.
if deprecated {
g.P(deprecationComment)
}
g.P("func New", servName, "Client (cc ", grpcPkg, ".ClientConnInterface) ", servName, "Client {")
g.P("return &", unexport(servName), "Client{cc}")
g.P("}")
g.P()
var methodIndex, streamIndex int
serviceDescVar := "_" + servName + "_serviceDesc"
// Client method implementations.
for _, method := range service.Method {
var descExpr string
if !method.GetServerStreaming() && !method.GetClientStreaming() {
// Unary RPC method
descExpr = fmt.Sprintf("&%s.Methods[%d]", serviceDescVar, methodIndex)
methodIndex++
} else {
// Streaming RPC method
descExpr = fmt.Sprintf("&%s.Streams[%d]", serviceDescVar, streamIndex)
streamIndex++
}
g.generateClientMethod(servName, fullServName, serviceDescVar, method, descExpr)
}
// Server interface.
serverType := servName + "Server"
g.P("// ", serverType, " is the server API for ", servName, " service.")
if deprecated {
g.P("//")
g.P(deprecationComment)
}
g.P("type ", serverType, " interface {")
for i, method := range service.Method {
g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service.
if method.GetOptions().GetDeprecated() {
g.P("//")
g.P(deprecationComment)
}
g.P(g.generateServerSignature(servName, method))
}
g.P("}")
g.P()
// Server Unimplemented struct for forward compatibility.
if deprecated {
g.P(deprecationComment)
}
g.generateUnimplementedServer(servName, service)
// Server registration.
if deprecated {
g.P(deprecationComment)
}
g.P("func Register", servName, "Server(s *", grpcPkg, ".Server, srv ", serverType, ") {")
g.P("s.RegisterService(&", serviceDescVar, `, srv)`)
g.P("}")
g.P()
// Server handler implementations.
var handlerNames []string
for _, method := range service.Method {
hname := g.generateServerMethod(servName, fullServName, method)
handlerNames = append(handlerNames, hname)
}
// Service descriptor.
g.P("var ", serviceDescVar, " = ", grpcPkg, ".ServiceDesc {")
g.P("ServiceName: ", strconv.Quote(fullServName), ",")
g.P("HandlerType: (*", serverType, ")(nil),")
g.P("Methods: []", grpcPkg, ".MethodDesc{")
for i, method := range service.Method {
if method.GetServerStreaming() || method.GetClientStreaming() {
continue
}
g.P("{")
g.P("MethodName: ", strconv.Quote(method.GetName()), ",")
g.P("Handler: ", handlerNames[i], ",")
g.P("},")
}
g.P("},")
g.P("Streams: []", grpcPkg, ".StreamDesc{")
for i, method := range service.Method {
if !method.GetServerStreaming() && !method.GetClientStreaming() {
continue
}
g.P("{")
g.P("StreamName: ", strconv.Quote(method.GetName()), ",")
g.P("Handler: ", handlerNames[i], ",")
if method.GetServerStreaming() {
g.P("ServerStreams: true,")
}
if method.GetClientStreaming() {
g.P("ClientStreams: true,")
}
g.P("},")
}
g.P("},")
g.P("Metadata: \"", file.GetName(), "\",")
g.P("}")
g.P()
}
// generateUnimplementedServer creates the unimplemented server struct
func (g *grpc) generateUnimplementedServer(servName string, service *pb.ServiceDescriptorProto) {
serverType := servName + "Server"
g.P("// Unimplemented", serverType, " can be embedded to have forward compatible implementations.")
g.P("type Unimplemented", serverType, " struct {")
g.P("}")
g.P()
// Unimplemented<service_name>Server's concrete methods
for _, method := range service.Method {
g.generateServerMethodConcrete(servName, method)
}
g.P()
}
// generateServerMethodConcrete returns unimplemented methods which ensure forward compatibility
func (g *grpc) generateServerMethodConcrete(servName string, method *pb.MethodDescriptorProto) {
header := g.generateServerSignatureWithParamNames(servName, method)
g.P("func (*Unimplemented", servName, "Server) ", header, " {")
var nilArg string
if !method.GetServerStreaming() && !method.GetClientStreaming() {
nilArg = "nil, "
}
methName := generator.CamelCase(method.GetName())
statusPkg := string(g.gen.AddImport(statusPkgPath))
codePkg := string(g.gen.AddImport(codePkgPath))
g.P("return ", nilArg, statusPkg, `.Errorf(`, codePkg, `.Unimplemented, "method `, methName, ` not implemented")`)
g.P("}")
}
// generateClientSignature returns the client-side signature for a method.
func (g *grpc) generateClientSignature(servName string, method *pb.MethodDescriptorProto) string {
origMethName := method.GetName()
methName := generator.CamelCase(origMethName)
if reservedClientName[methName] {
methName += "_"
}
reqArg := ", in *" + g.typeName(method.GetInputType())
if method.GetClientStreaming() {
reqArg = ""
}
respName := "*" + g.typeName(method.GetOutputType())
if method.GetServerStreaming() || method.GetClientStreaming() {
respName = servName + "_" + generator.CamelCase(origMethName) + "Client"
}
return fmt.Sprintf("%s(ctx %s.Context%s, opts ...%s.CallOption) (%s, error)", methName, contextPkg, reqArg, grpcPkg, respName)
}
func (g *grpc) generateClientMethod(servName, fullServName, serviceDescVar string, method *pb.MethodDescriptorProto, descExpr string) {
sname := fmt.Sprintf("/%s/%s", fullServName, method.GetName())
methName := generator.CamelCase(method.GetName())
inType := g.typeName(method.GetInputType())
outType := g.typeName(method.GetOutputType())
if method.GetOptions().GetDeprecated() {
g.P(deprecationComment)
}
g.P("func (c *", unexport(servName), "Client) ", g.generateClientSignature(servName, method), "{")
if !method.GetServerStreaming() && !method.GetClientStreaming() {
g.P("out := new(", outType, ")")
// TODO: Pass descExpr to Invoke.
g.P(`err := c.cc.Invoke(ctx, "`, sname, `", in, out, opts...)`)
g.P("if err != nil { return nil, err }")
g.P("return out, nil")
g.P("}")
g.P()
return
}
streamType := unexport(servName) + methName + "Client"
g.P("stream, err := c.cc.NewStream(ctx, ", descExpr, `, "`, sname, `", opts...)`)
g.P("if err != nil { return nil, err }")
g.P("x := &", streamType, "{stream}")
if !method.GetClientStreaming() {
g.P("if err := x.ClientStream.SendMsg(in); err != nil { return nil, err }")
g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }")
}
g.P("return x, nil")
g.P("}")
g.P()
genSend := method.GetClientStreaming()
genRecv := method.GetServerStreaming()
genCloseAndRecv := !method.GetServerStreaming()
// Stream auxiliary types and methods.
g.P("type ", servName, "_", methName, "Client interface {")
if genSend {
g.P("Send(*", inType, ") error")
}
if genRecv {
g.P("Recv() (*", outType, ", error)")
}
if genCloseAndRecv {
g.P("CloseAndRecv() (*", outType, ", error)")
}
g.P(grpcPkg, ".ClientStream")
g.P("}")
g.P()
g.P("type ", streamType, " struct {")
g.P(grpcPkg, ".ClientStream")
g.P("}")
g.P()
if genSend {
g.P("func (x *", streamType, ") Send(m *", inType, ") error {")
g.P("return x.ClientStream.SendMsg(m)")
g.P("}")
g.P()
}
if genRecv {
g.P("func (x *", streamType, ") Recv() (*", outType, ", error) {")
g.P("m := new(", outType, ")")
g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }")
g.P("return m, nil")
g.P("}")
g.P()
}
if genCloseAndRecv {
g.P("func (x *", streamType, ") CloseAndRecv() (*", outType, ", error) {")
g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }")
g.P("m := new(", outType, ")")
g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }")
g.P("return m, nil")
g.P("}")
g.P()
}
}
// generateServerSignatureWithParamNames returns the server-side signature for a method with parameter names.
func (g *grpc) generateServerSignatureWithParamNames(servName string, method *pb.MethodDescriptorProto) string {
origMethName := method.GetName()
methName := generator.CamelCase(origMethName)
if reservedClientName[methName] {
methName += "_"
}
var reqArgs []string
ret := "error"
if !method.GetServerStreaming() && !method.GetClientStreaming() {
reqArgs = append(reqArgs, "ctx "+contextPkg+".Context")
ret = "(*" + g.typeName(method.GetOutputType()) + ", error)"
}
if !method.GetClientStreaming() {
reqArgs = append(reqArgs, "req *"+g.typeName(method.GetInputType()))
}
if method.GetServerStreaming() || method.GetClientStreaming() {
reqArgs = append(reqArgs, "srv "+servName+"_"+generator.CamelCase(origMethName)+"Server")
}
return methName + "(" + strings.Join(reqArgs, ", ") + ") " + ret
}
// generateServerSignature returns the server-side signature for a method.
func (g *grpc) generateServerSignature(servName string, method *pb.MethodDescriptorProto) string {
origMethName := method.GetName()
methName := generator.CamelCase(origMethName)
if reservedClientName[methName] {
methName += "_"
}
var reqArgs []string
ret := "error"
if !method.GetServerStreaming() && !method.GetClientStreaming() {
reqArgs = append(reqArgs, contextPkg+".Context")
ret = "(*" + g.typeName(method.GetOutputType()) + ", error)"
}
if !method.GetClientStreaming() {
reqArgs = append(reqArgs, "*"+g.typeName(method.GetInputType()))
}
if method.GetServerStreaming() || method.GetClientStreaming() {
reqArgs = append(reqArgs, servName+"_"+generator.CamelCase(origMethName)+"Server")
}
return methName + "(" + strings.Join(reqArgs, ", ") + ") " + ret
}
func (g *grpc) generateServerMethod(servName, fullServName string, method *pb.MethodDescriptorProto) string {
methName := generator.CamelCase(method.GetName())
hname := fmt.Sprintf("_%s_%s_Handler", servName, methName)
inType := g.typeName(method.GetInputType())
outType := g.typeName(method.GetOutputType())
if !method.GetServerStreaming() && !method.GetClientStreaming() {
g.P("func ", hname, "(srv interface{}, ctx ", contextPkg, ".Context, dec func(interface{}) error, interceptor ", grpcPkg, ".UnaryServerInterceptor) (interface{}, error) {")
g.P("in := new(", inType, ")")
g.P("if err := dec(in); err != nil { return nil, err }")
g.P("if interceptor == nil { return srv.(", servName, "Server).", methName, "(ctx, in) }")
g.P("info := &", grpcPkg, ".UnaryServerInfo{")
g.P("Server: srv,")
g.P("FullMethod: ", strconv.Quote(fmt.Sprintf("/%s/%s", fullServName, methName)), ",")
g.P("}")
g.P("handler := func(ctx ", contextPkg, ".Context, req interface{}) (interface{}, error) {")
g.P("return srv.(", servName, "Server).", methName, "(ctx, req.(*", inType, "))")
g.P("}")
g.P("return interceptor(ctx, in, info, handler)")
g.P("}")
g.P()
return hname
}
streamType := unexport(servName) + methName + "Server"
g.P("func ", hname, "(srv interface{}, stream ", grpcPkg, ".ServerStream) error {")
if !method.GetClientStreaming() {
g.P("m := new(", inType, ")")
g.P("if err := stream.RecvMsg(m); err != nil { return err }")
g.P("return srv.(", servName, "Server).", methName, "(m, &", streamType, "{stream})")
} else {
g.P("return srv.(", servName, "Server).", methName, "(&", streamType, "{stream})")
}
g.P("}")
g.P()
genSend := method.GetServerStreaming()
genSendAndClose := !method.GetServerStreaming()
genRecv := method.GetClientStreaming()
// Stream auxiliary types and methods.
g.P("type ", servName, "_", methName, "Server interface {")
if genSend {
g.P("Send(*", outType, ") error")
}
if genSendAndClose {
g.P("SendAndClose(*", outType, ") error")
}
if genRecv {
g.P("Recv() (*", inType, ", error)")
}
g.P(grpcPkg, ".ServerStream")
g.P("}")
g.P()
g.P("type ", streamType, " struct {")
g.P(grpcPkg, ".ServerStream")
g.P("}")
g.P()
if genSend {
g.P("func (x *", streamType, ") Send(m *", outType, ") error {")
g.P("return x.ServerStream.SendMsg(m)")
g.P("}")
g.P()
}
if genSendAndClose {
g.P("func (x *", streamType, ") SendAndClose(m *", outType, ") error {")
g.P("return x.ServerStream.SendMsg(m)")
g.P("}")
g.P()
}
if genRecv {
g.P("func (x *", streamType, ") Recv() (*", inType, ", error) {")
g.P("m := new(", inType, ")")
g.P("if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err }")
g.P("return m, nil")
g.P("}")
g.P()
}
return hname
}

View file

@ -1,34 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2015 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package main
import _ "github.com/golang/protobuf/protoc-gen-go/grpc"

View file

@ -1,98 +1,73 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// protoc-gen-go is a plugin for the Google protocol buffer compiler to generate
// Go code. Run it by building this program and putting it in your path with
// the name
// protoc-gen-go
// That word 'go' at the end becomes part of the option string set for the
// protocol compiler, so once the protocol compiler (protoc) is installed
// you can run
// protoc --go_out=output_directory input_directory/file.proto
// to generate Go bindings for the protocol defined by file.proto.
// With that input, the output will be written to
// output_directory/file.pb.go
// Go code. Install it by building this program and making it accessible within
// your PATH with the name:
// protoc-gen-go
//
// The generated code is documented in the package comment for
// the library.
// The 'go' suffix becomes part of the argument for the protocol compiler,
// such that it can be invoked as:
// protoc --go_out=paths=source_relative:. path/to/file.proto
//
// This generates Go bindings for the protocol buffer defined by file.proto.
// With that input, the output will be written to:
// path/to/file.pb.go
//
// See the README and documentation for protocol buffers to learn more:
// https://developers.google.com/protocol-buffers/
// https://developers.google.com/protocol-buffers/
package main
import (
"io/ioutil"
"os"
"flag"
"fmt"
"strings"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/protoc-gen-go/generator"
"github.com/golang/protobuf/internal/gengogrpc"
gengo "google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo"
"google.golang.org/protobuf/compiler/protogen"
)
func main() {
// Begin by allocating a generator. The request and response structures are stored there
// so we can do error handling easily - the response structure contains the field to
// report failure.
g := generator.New()
data, err := ioutil.ReadAll(os.Stdin)
if err != nil {
g.Error(err, "reading input")
}
if err := proto.Unmarshal(data, g.Request); err != nil {
g.Error(err, "parsing input proto")
}
if len(g.Request.FileToGenerate) == 0 {
g.Fail("no files to generate")
}
g.CommandLineParameters(g.Request.GetParameter())
// Create a wrapped version of the Descriptors and EnumDescriptors that
// point to the file that defines them.
g.WrapTypes()
g.SetPackageNames()
g.BuildTypeNameMap()
g.GenerateAllFiles()
// Send back the results.
data, err = proto.Marshal(g.Response)
if err != nil {
g.Error(err, "failed to marshal output proto")
}
_, err = os.Stdout.Write(data)
if err != nil {
g.Error(err, "failed to write output proto")
var (
flags flag.FlagSet
plugins = flags.String("plugins", "", "list of plugins to enable (supported values: grpc)")
importPrefix = flags.String("import_prefix", "", "prefix to prepend to import paths")
)
importRewriteFunc := func(importPath protogen.GoImportPath) protogen.GoImportPath {
switch importPath {
case "context", "fmt", "math":
return importPath
}
if *importPrefix != "" {
return protogen.GoImportPath(*importPrefix) + importPath
}
return importPath
}
protogen.Options{
ParamFunc: flags.Set,
ImportRewriteFunc: importRewriteFunc,
}.Run(func(gen *protogen.Plugin) error {
grpc := false
for _, plugin := range strings.Split(*plugins, ",") {
switch plugin {
case "grpc":
grpc = true
case "":
default:
return fmt.Errorf("protoc-gen-go: unknown plugin %q", plugin)
}
}
for _, f := range gen.Files {
if !f.Generate {
continue
}
g := gengo.GenerateFile(gen, f)
if grpc {
gengogrpc.GenerateFileContent(gen, f, g)
}
}
return nil
})
}

View file

@ -1,369 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/compiler/plugin.proto
/*
Package plugin_go is a generated protocol buffer package.
It is generated from these files:
google/protobuf/compiler/plugin.proto
It has these top-level messages:
Version
CodeGeneratorRequest
CodeGeneratorResponse
*/
package plugin_go
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// The version number of protocol compiler.
type Version struct {
Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"`
Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"`
Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"`
// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
// be empty for mainline stable releases.
Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Version) Reset() { *m = Version{} }
func (m *Version) String() string { return proto.CompactTextString(m) }
func (*Version) ProtoMessage() {}
func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *Version) Unmarshal(b []byte) error {
return xxx_messageInfo_Version.Unmarshal(m, b)
}
func (m *Version) Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Version.Marshal(b, m, deterministic)
}
func (dst *Version) XXX_Merge(src proto.Message) {
xxx_messageInfo_Version.Merge(dst, src)
}
func (m *Version) XXX_Size() int {
return xxx_messageInfo_Version.Size(m)
}
func (m *Version) XXX_DiscardUnknown() {
xxx_messageInfo_Version.DiscardUnknown(m)
}
var xxx_messageInfo_Version proto.InternalMessageInfo
func (m *Version) GetMajor() int32 {
if m != nil && m.Major != nil {
return *m.Major
}
return 0
}
func (m *Version) GetMinor() int32 {
if m != nil && m.Minor != nil {
return *m.Minor
}
return 0
}
func (m *Version) GetPatch() int32 {
if m != nil && m.Patch != nil {
return *m.Patch
}
return 0
}
func (m *Version) GetSuffix() string {
if m != nil && m.Suffix != nil {
return *m.Suffix
}
return ""
}
// An encoded CodeGeneratorRequest is written to the plugin's stdin.
type CodeGeneratorRequest struct {
// The .proto files that were explicitly listed on the command-line. The
// code generator should generate code only for these files. Each file's
// descriptor will be included in proto_file, below.
FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"`
// The generator parameter passed on the command-line.
Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"`
// FileDescriptorProtos for all files in files_to_generate and everything
// they import. The files will appear in topological order, so each file
// appears before any file that imports it.
//
// protoc guarantees that all proto_files will be written after
// the fields above, even though this is not technically guaranteed by the
// protobuf wire format. This theoretically could allow a plugin to stream
// in the FileDescriptorProtos and handle them one by one rather than read
// the entire set into memory at once. However, as of this writing, this
// is not similarly optimized on protoc's end -- it will store all fields in
// memory at once before sending them to the plugin.
//
// Type names of fields and extensions in the FileDescriptorProto are always
// fully qualified.
ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"`
// The version number of protocol compiler.
CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CodeGeneratorRequest) Reset() { *m = CodeGeneratorRequest{} }
func (m *CodeGeneratorRequest) String() string { return proto.CompactTextString(m) }
func (*CodeGeneratorRequest) ProtoMessage() {}
func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *CodeGeneratorRequest) Unmarshal(b []byte) error {
return xxx_messageInfo_CodeGeneratorRequest.Unmarshal(m, b)
}
func (m *CodeGeneratorRequest) Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CodeGeneratorRequest.Marshal(b, m, deterministic)
}
func (dst *CodeGeneratorRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CodeGeneratorRequest.Merge(dst, src)
}
func (m *CodeGeneratorRequest) XXX_Size() int {
return xxx_messageInfo_CodeGeneratorRequest.Size(m)
}
func (m *CodeGeneratorRequest) XXX_DiscardUnknown() {
xxx_messageInfo_CodeGeneratorRequest.DiscardUnknown(m)
}
var xxx_messageInfo_CodeGeneratorRequest proto.InternalMessageInfo
func (m *CodeGeneratorRequest) GetFileToGenerate() []string {
if m != nil {
return m.FileToGenerate
}
return nil
}
func (m *CodeGeneratorRequest) GetParameter() string {
if m != nil && m.Parameter != nil {
return *m.Parameter
}
return ""
}
func (m *CodeGeneratorRequest) GetProtoFile() []*google_protobuf.FileDescriptorProto {
if m != nil {
return m.ProtoFile
}
return nil
}
func (m *CodeGeneratorRequest) GetCompilerVersion() *Version {
if m != nil {
return m.CompilerVersion
}
return nil
}
// The plugin writes an encoded CodeGeneratorResponse to stdout.
type CodeGeneratorResponse struct {
// Error message. If non-empty, code generation failed. The plugin process
// should exit with status code zero even if it reports an error in this way.
//
// This should be used to indicate errors in .proto files which prevent the
// code generator from generating correct code. Errors which indicate a
// problem in protoc itself -- such as the input CodeGeneratorRequest being
// unparseable -- should be reported by writing a message to stderr and
// exiting with a non-zero status code.
Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CodeGeneratorResponse) Reset() { *m = CodeGeneratorResponse{} }
func (m *CodeGeneratorResponse) String() string { return proto.CompactTextString(m) }
func (*CodeGeneratorResponse) ProtoMessage() {}
func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *CodeGeneratorResponse) Unmarshal(b []byte) error {
return xxx_messageInfo_CodeGeneratorResponse.Unmarshal(m, b)
}
func (m *CodeGeneratorResponse) Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CodeGeneratorResponse.Marshal(b, m, deterministic)
}
func (dst *CodeGeneratorResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_CodeGeneratorResponse.Merge(dst, src)
}
func (m *CodeGeneratorResponse) XXX_Size() int {
return xxx_messageInfo_CodeGeneratorResponse.Size(m)
}
func (m *CodeGeneratorResponse) XXX_DiscardUnknown() {
xxx_messageInfo_CodeGeneratorResponse.DiscardUnknown(m)
}
var xxx_messageInfo_CodeGeneratorResponse proto.InternalMessageInfo
func (m *CodeGeneratorResponse) GetError() string {
if m != nil && m.Error != nil {
return *m.Error
}
return ""
}
func (m *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File {
if m != nil {
return m.File
}
return nil
}
// Represents a single generated file.
type CodeGeneratorResponse_File struct {
// The file name, relative to the output directory. The name must not
// contain "." or ".." components and must be relative, not be absolute (so,
// the file cannot lie outside the output directory). "/" must be used as
// the path separator, not "\".
//
// If the name is omitted, the content will be appended to the previous
// file. This allows the generator to break large files into small chunks,
// and allows the generated text to be streamed back to protoc so that large
// files need not reside completely in memory at one time. Note that as of
// this writing protoc does not optimize for this -- it will read the entire
// CodeGeneratorResponse before writing files to disk.
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// If non-empty, indicates that the named file should already exist, and the
// content here is to be inserted into that file at a defined insertion
// point. This feature allows a code generator to extend the output
// produced by another code generator. The original generator may provide
// insertion points by placing special annotations in the file that look
// like:
// @@protoc_insertion_point(NAME)
// The annotation can have arbitrary text before and after it on the line,
// which allows it to be placed in a comment. NAME should be replaced with
// an identifier naming the point -- this is what other generators will use
// as the insertion_point. Code inserted at this point will be placed
// immediately above the line containing the insertion point (thus multiple
// insertions to the same point will come out in the order they were added).
// The double-@ is intended to make it unlikely that the generated code
// could contain things that look like insertion points by accident.
//
// For example, the C++ code generator places the following line in the
// .pb.h files that it generates:
// // @@protoc_insertion_point(namespace_scope)
// This line appears within the scope of the file's package namespace, but
// outside of any particular class. Another plugin can then specify the
// insertion_point "namespace_scope" to generate additional classes or
// other declarations that should be placed in this scope.
//
// Note that if the line containing the insertion point begins with
// whitespace, the same whitespace will be added to every line of the
// inserted text. This is useful for languages like Python, where
// indentation matters. In these languages, the insertion point comment
// should be indented the same amount as any inserted code will need to be
// in order to work correctly in that context.
//
// The code generator that generates the initial file and the one which
// inserts into it must both run as part of a single invocation of protoc.
// Code generators are executed in the order in which they appear on the
// command line.
//
// If |insertion_point| is present, |name| must also be present.
InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"`
// The file contents.
Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CodeGeneratorResponse_File) Reset() { *m = CodeGeneratorResponse_File{} }
func (m *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(m) }
func (*CodeGeneratorResponse_File) ProtoMessage() {}
func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} }
func (m *CodeGeneratorResponse_File) Unmarshal(b []byte) error {
return xxx_messageInfo_CodeGeneratorResponse_File.Unmarshal(m, b)
}
func (m *CodeGeneratorResponse_File) Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CodeGeneratorResponse_File.Marshal(b, m, deterministic)
}
func (dst *CodeGeneratorResponse_File) XXX_Merge(src proto.Message) {
xxx_messageInfo_CodeGeneratorResponse_File.Merge(dst, src)
}
func (m *CodeGeneratorResponse_File) XXX_Size() int {
return xxx_messageInfo_CodeGeneratorResponse_File.Size(m)
}
func (m *CodeGeneratorResponse_File) XXX_DiscardUnknown() {
xxx_messageInfo_CodeGeneratorResponse_File.DiscardUnknown(m)
}
var xxx_messageInfo_CodeGeneratorResponse_File proto.InternalMessageInfo
func (m *CodeGeneratorResponse_File) GetName() string {
if m != nil && m.Name != nil {
return *m.Name
}
return ""
}
func (m *CodeGeneratorResponse_File) GetInsertionPoint() string {
if m != nil && m.InsertionPoint != nil {
return *m.InsertionPoint
}
return ""
}
func (m *CodeGeneratorResponse_File) GetContent() string {
if m != nil && m.Content != nil {
return *m.Content
}
return ""
}
func init() {
proto.RegisterType((*Version)(nil), "google.protobuf.compiler.Version")
proto.RegisterType((*CodeGeneratorRequest)(nil), "google.protobuf.compiler.CodeGeneratorRequest")
proto.RegisterType((*CodeGeneratorResponse)(nil), "google.protobuf.compiler.CodeGeneratorResponse")
proto.RegisterType((*CodeGeneratorResponse_File)(nil), "google.protobuf.compiler.CodeGeneratorResponse.File")
}
func init() { proto.RegisterFile("google/protobuf/compiler/plugin.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 417 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xcf, 0x6a, 0x14, 0x41,
0x10, 0xc6, 0x19, 0x77, 0x63, 0x98, 0x8a, 0x64, 0x43, 0x13, 0xa5, 0x09, 0x39, 0x8c, 0x8b, 0xe2,
0x5c, 0x32, 0x0b, 0xc1, 0x8b, 0x78, 0x4b, 0x44, 0x3d, 0x78, 0x58, 0x1a, 0xf1, 0x20, 0xc8, 0x30,
0x99, 0xd4, 0x74, 0x5a, 0x66, 0xba, 0xc6, 0xee, 0x1e, 0xf1, 0x49, 0x7d, 0x0f, 0xdf, 0x40, 0xfa,
0xcf, 0x24, 0xb2, 0xb8, 0xa7, 0xee, 0xef, 0x57, 0xd5, 0xd5, 0x55, 0x1f, 0x05, 0x2f, 0x25, 0x91,
0xec, 0x71, 0x33, 0x1a, 0x72, 0x74, 0x33, 0x75, 0x9b, 0x96, 0x86, 0x51, 0xf5, 0x68, 0x36, 0x63,
0x3f, 0x49, 0xa5, 0xab, 0x10, 0x60, 0x3c, 0xa6, 0x55, 0x73, 0x5a, 0x35, 0xa7, 0x9d, 0x15, 0xbb,
0x05, 0x6e, 0xd1, 0xb6, 0x46, 0x8d, 0x8e, 0x4c, 0xcc, 0x5e, 0xb7, 0x70, 0xf8, 0x05, 0x8d, 0x55,
0xa4, 0xd9, 0x29, 0x1c, 0x0c, 0xcd, 0x77, 0x32, 0x3c, 0x2b, 0xb2, 0xf2, 0x40, 0x44, 0x11, 0xa8,
0xd2, 0x64, 0xf8, 0xa3, 0x44, 0xbd, 0xf0, 0x74, 0x6c, 0x5c, 0x7b, 0xc7, 0x17, 0x91, 0x06, 0xc1,
0x9e, 0xc1, 0x63, 0x3b, 0x75, 0x9d, 0xfa, 0xc5, 0x97, 0x45, 0x56, 0xe6, 0x22, 0xa9, 0xf5, 0x9f,
0x0c, 0x4e, 0xaf, 0xe9, 0x16, 0x3f, 0xa0, 0x46, 0xd3, 0x38, 0x32, 0x02, 0x7f, 0x4c, 0x68, 0x1d,
0x2b, 0xe1, 0xa4, 0x53, 0x3d, 0xd6, 0x8e, 0x6a, 0x19, 0x63, 0xc8, 0xb3, 0x62, 0x51, 0xe6, 0xe2,
0xd8, 0xf3, 0xcf, 0x94, 0x5e, 0x20, 0x3b, 0x87, 0x7c, 0x6c, 0x4c, 0x33, 0xa0, 0xc3, 0xd8, 0x4a,
0x2e, 0x1e, 0x00, 0xbb, 0x06, 0x08, 0xe3, 0xd4, 0xfe, 0x15, 0x5f, 0x15, 0x8b, 0xf2, 0xe8, 0xf2,
0x45, 0xb5, 0x6b, 0xcb, 0x7b, 0xd5, 0xe3, 0xbb, 0x7b, 0x03, 0xb6, 0x1e, 0x8b, 0x3c, 0x44, 0x7d,
0x84, 0x7d, 0x82, 0x93, 0xd9, 0xb8, 0xfa, 0x67, 0xf4, 0x24, 0x8c, 0x77, 0x74, 0xf9, 0xbc, 0xda,
0xe7, 0x70, 0x95, 0xcc, 0x13, 0xab, 0x99, 0x24, 0xb0, 0xfe, 0x9d, 0xc1, 0xd3, 0x9d, 0x99, 0xed,
0x48, 0xda, 0xa2, 0xf7, 0x0e, 0x8d, 0x49, 0x3e, 0xe7, 0x22, 0x0a, 0xf6, 0x11, 0x96, 0xff, 0x34,
0xff, 0x7a, 0xff, 0x8f, 0xff, 0x2d, 0x1a, 0x66, 0x13, 0xa1, 0xc2, 0xd9, 0x37, 0x58, 0x86, 0x79,
0x18, 0x2c, 0x75, 0x33, 0x60, 0xfa, 0x26, 0xdc, 0xd9, 0x2b, 0x58, 0x29, 0x6d, 0xd1, 0x38, 0x45,
0xba, 0x1e, 0x49, 0x69, 0x97, 0xcc, 0x3c, 0xbe, 0xc7, 0x5b, 0x4f, 0x19, 0x87, 0xc3, 0x96, 0xb4,
0x43, 0xed, 0xf8, 0x2a, 0x24, 0xcc, 0xf2, 0x4a, 0xc2, 0x79, 0x4b, 0xc3, 0xde, 0xfe, 0xae, 0x9e,
0x6c, 0xc3, 0x6e, 0x06, 0x7b, 0xed, 0xd7, 0x37, 0x52, 0xb9, 0xbb, 0xe9, 0xc6, 0x87, 0x37, 0x92,
0xfa, 0x46, 0xcb, 0x87, 0x65, 0x0c, 0x97, 0xf6, 0x42, 0xa2, 0xbe, 0x90, 0x94, 0x56, 0xfa, 0x6d,
0x3c, 0x6a, 0x49, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x15, 0x40, 0xc5, 0xfe, 0x02, 0x00,
0x00,
}

View file

@ -1,83 +0,0 @@
// Code generated by protoc-gen-go.
// source: google/protobuf/compiler/plugin.proto
// DO NOT EDIT!
package google_protobuf_compiler
import proto "github.com/golang/protobuf/proto"
import "math"
import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
// Reference proto and math imports to suppress error if they are not otherwise used.
var _ = proto.GetString
var _ = math.Inf
type CodeGeneratorRequest struct {
FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate" json:"file_to_generate,omitempty"`
Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"`
ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file" json:"proto_file,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (this *CodeGeneratorRequest) Reset() { *this = CodeGeneratorRequest{} }
func (this *CodeGeneratorRequest) String() string { return proto.CompactTextString(this) }
func (*CodeGeneratorRequest) ProtoMessage() {}
func (this *CodeGeneratorRequest) GetParameter() string {
if this != nil && this.Parameter != nil {
return *this.Parameter
}
return ""
}
type CodeGeneratorResponse struct {
Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (this *CodeGeneratorResponse) Reset() { *this = CodeGeneratorResponse{} }
func (this *CodeGeneratorResponse) String() string { return proto.CompactTextString(this) }
func (*CodeGeneratorResponse) ProtoMessage() {}
func (this *CodeGeneratorResponse) GetError() string {
if this != nil && this.Error != nil {
return *this.Error
}
return ""
}
type CodeGeneratorResponse_File struct {
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point" json:"insertion_point,omitempty"`
Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (this *CodeGeneratorResponse_File) Reset() { *this = CodeGeneratorResponse_File{} }
func (this *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(this) }
func (*CodeGeneratorResponse_File) ProtoMessage() {}
func (this *CodeGeneratorResponse_File) GetName() string {
if this != nil && this.Name != nil {
return *this.Name
}
return ""
}
func (this *CodeGeneratorResponse_File) GetInsertionPoint() string {
if this != nil && this.InsertionPoint != nil {
return *this.InsertionPoint
}
return ""
}
func (this *CodeGeneratorResponse_File) GetContent() string {
if this != nil && this.Content != nil {
return *this.Content
}
return ""
}
func init() {
}

View file

@ -1,167 +0,0 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Author: kenton@google.com (Kenton Varda)
//
// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to
// change.
//
// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is
// just a program that reads a CodeGeneratorRequest from stdin and writes a
// CodeGeneratorResponse to stdout.
//
// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead
// of dealing with the raw protocol defined here.
//
// A plugin executable needs only to be placed somewhere in the path. The
// plugin should be named "protoc-gen-$NAME", and will then be used when the
// flag "--${NAME}_out" is passed to protoc.
syntax = "proto2";
package google.protobuf.compiler;
option java_package = "com.google.protobuf.compiler";
option java_outer_classname = "PluginProtos";
option go_package = "github.com/golang/protobuf/protoc-gen-go/plugin;plugin_go";
import "google/protobuf/descriptor.proto";
// The version number of protocol compiler.
message Version {
optional int32 major = 1;
optional int32 minor = 2;
optional int32 patch = 3;
// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
// be empty for mainline stable releases.
optional string suffix = 4;
}
// An encoded CodeGeneratorRequest is written to the plugin's stdin.
message CodeGeneratorRequest {
// The .proto files that were explicitly listed on the command-line. The
// code generator should generate code only for these files. Each file's
// descriptor will be included in proto_file, below.
repeated string file_to_generate = 1;
// The generator parameter passed on the command-line.
optional string parameter = 2;
// FileDescriptorProtos for all files in files_to_generate and everything
// they import. The files will appear in topological order, so each file
// appears before any file that imports it.
//
// protoc guarantees that all proto_files will be written after
// the fields above, even though this is not technically guaranteed by the
// protobuf wire format. This theoretically could allow a plugin to stream
// in the FileDescriptorProtos and handle them one by one rather than read
// the entire set into memory at once. However, as of this writing, this
// is not similarly optimized on protoc's end -- it will store all fields in
// memory at once before sending them to the plugin.
//
// Type names of fields and extensions in the FileDescriptorProto are always
// fully qualified.
repeated FileDescriptorProto proto_file = 15;
// The version number of protocol compiler.
optional Version compiler_version = 3;
}
// The plugin writes an encoded CodeGeneratorResponse to stdout.
message CodeGeneratorResponse {
// Error message. If non-empty, code generation failed. The plugin process
// should exit with status code zero even if it reports an error in this way.
//
// This should be used to indicate errors in .proto files which prevent the
// code generator from generating correct code. Errors which indicate a
// problem in protoc itself -- such as the input CodeGeneratorRequest being
// unparseable -- should be reported by writing a message to stderr and
// exiting with a non-zero status code.
optional string error = 1;
// Represents a single generated file.
message File {
// The file name, relative to the output directory. The name must not
// contain "." or ".." components and must be relative, not be absolute (so,
// the file cannot lie outside the output directory). "/" must be used as
// the path separator, not "\".
//
// If the name is omitted, the content will be appended to the previous
// file. This allows the generator to break large files into small chunks,
// and allows the generated text to be streamed back to protoc so that large
// files need not reside completely in memory at one time. Note that as of
// this writing protoc does not optimize for this -- it will read the entire
// CodeGeneratorResponse before writing files to disk.
optional string name = 1;
// If non-empty, indicates that the named file should already exist, and the
// content here is to be inserted into that file at a defined insertion
// point. This feature allows a code generator to extend the output
// produced by another code generator. The original generator may provide
// insertion points by placing special annotations in the file that look
// like:
// @@protoc_insertion_point(NAME)
// The annotation can have arbitrary text before and after it on the line,
// which allows it to be placed in a comment. NAME should be replaced with
// an identifier naming the point -- this is what other generators will use
// as the insertion_point. Code inserted at this point will be placed
// immediately above the line containing the insertion point (thus multiple
// insertions to the same point will come out in the order they were added).
// The double-@ is intended to make it unlikely that the generated code
// could contain things that look like insertion points by accident.
//
// For example, the C++ code generator places the following line in the
// .pb.h files that it generates:
// // @@protoc_insertion_point(namespace_scope)
// This line appears within the scope of the file's package namespace, but
// outside of any particular class. Another plugin can then specify the
// insertion_point "namespace_scope" to generate additional classes or
// other declarations that should be placed in this scope.
//
// Note that if the line containing the insertion point begins with
// whitespace, the same whitespace will be added to every line of the
// inserted text. This is useful for languages like Python, where
// indentation matters. In these languages, the insertion point comment
// should be indented the same amount as any inserted code will need to be
// in order to work correctly in that context.
//
// The code generator that generates the initial file and the one which
// inserts into it must both run as part of a single invocation of protoc.
// Code generators are executed in the order in which they appear on the
// command line.
//
// If |insertion_point| is present, |name| must also be present.
optional string insertion_point = 2;
// The file contents.
optional string content = 15;
}
repeated File file = 15;
}

View file

@ -1,141 +1,165 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ptypes
// This file implements functions to marshal proto.Message to/from
// google.protobuf.Any message.
import (
"fmt"
"reflect"
"strings"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/any"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
anypb "github.com/golang/protobuf/ptypes/any"
)
const googleApis = "type.googleapis.com/"
const urlPrefix = "type.googleapis.com/"
// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
//
// Note that regular type assertions should be done using the Is
// function. AnyMessageName is provided for less common use cases like filtering a
// sequence of Any messages based on a set of allowed message type names.
func AnyMessageName(any *any.Any) (string, error) {
// AnyMessageName returns the message name contained in an anypb.Any message.
// Most type assertions should use the Is function instead.
func AnyMessageName(any *anypb.Any) (string, error) {
name, err := anyMessageName(any)
return string(name), err
}
func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
if any == nil {
return "", fmt.Errorf("message is nil")
}
slash := strings.LastIndex(any.TypeUrl, "/")
if slash < 0 {
name := protoreflect.FullName(any.TypeUrl)
if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 {
name = name[i+len("/"):]
}
if !name.IsValid() {
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
}
return any.TypeUrl[slash+1:], nil
return name, nil
}
// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
func MarshalAny(pb proto.Message) (*any.Any, error) {
value, err := proto.Marshal(pb)
// MarshalAny marshals the given message m into an anypb.Any message.
func MarshalAny(m proto.Message) (*anypb.Any, error) {
switch dm := m.(type) {
case DynamicAny:
m = dm.Message
case *DynamicAny:
if dm == nil {
return nil, proto.ErrNil
}
m = dm.Message
}
b, err := proto.Marshal(m)
if err != nil {
return nil, err
}
return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil
}
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
// allocate a proto.Message for the type specified in a google.protobuf.Any
// message. The allocated message is stored in the embedded proto.Message.
//
// Example:
//
// var x ptypes.DynamicAny
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
// fmt.Printf("unmarshaled message: %v", x.Message)
type DynamicAny struct {
proto.Message
}
// Empty returns a new proto.Message of the type specified in a
// google.protobuf.Any message. It returns an error if corresponding message
// type isn't linked in.
func Empty(any *any.Any) (proto.Message, error) {
aname, err := AnyMessageName(any)
// Empty returns a new message of the type specified in an anypb.Any message.
// It returns protoregistry.NotFound if the corresponding message type could not
// be resolved in the global registry.
func Empty(any *anypb.Any) (proto.Message, error) {
name, err := anyMessageName(any)
if err != nil {
return nil, err
}
t := proto.MessageType(aname)
if t == nil {
return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
mt, err := protoregistry.GlobalTypes.FindMessageByName(name)
if err != nil {
return nil, err
}
return reflect.New(t.Elem()).Interface().(proto.Message), nil
return proto.MessageV1(mt.New().Interface()), nil
}
// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
// message and places the decoded result in pb. It returns an error if type of
// contents of Any message does not match type of pb message.
// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
// into the provided message m. It returns an error if the target message
// does not match the type in the Any message or if an unmarshal error occurs.
//
// pb can be a proto.Message, or a *DynamicAny.
func UnmarshalAny(any *any.Any, pb proto.Message) error {
if d, ok := pb.(*DynamicAny); ok {
if d.Message == nil {
// The target message m may be a *DynamicAny message. If the underlying message
// type could not be resolved, then this returns protoregistry.NotFound.
func UnmarshalAny(any *anypb.Any, m proto.Message) error {
if dm, ok := m.(*DynamicAny); ok {
if dm.Message == nil {
var err error
d.Message, err = Empty(any)
dm.Message, err = Empty(any)
if err != nil {
return err
}
}
return UnmarshalAny(any, d.Message)
m = dm.Message
}
aname, err := AnyMessageName(any)
anyName, err := AnyMessageName(any)
if err != nil {
return err
}
mname := proto.MessageName(pb)
if aname != mname {
return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
msgName := proto.MessageName(m)
if anyName != msgName {
return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName)
}
return proto.Unmarshal(any.Value, pb)
return proto.Unmarshal(any.Value, m)
}
// Is returns true if any value contains a given message type.
func Is(any *any.Any, pb proto.Message) bool {
// The following is equivalent to AnyMessageName(any) == proto.MessageName(pb),
// but it avoids scanning TypeUrl for the slash.
if any == nil {
// Is reports whether the Any message contains a message of the specified type.
func Is(any *anypb.Any, m proto.Message) bool {
if any == nil || m == nil {
return false
}
name := proto.MessageName(pb)
prefix := len(any.TypeUrl) - len(name)
return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name
name := proto.MessageName(m)
if !strings.HasSuffix(any.TypeUrl, name) {
return false
}
return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/'
}
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
// allocate a proto.Message for the type specified in an anypb.Any message.
// The allocated message is stored in the embedded proto.Message.
//
// Example:
// var x ptypes.DynamicAny
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
// fmt.Printf("unmarshaled message: %v", x.Message)
type DynamicAny struct{ proto.Message }
func (m DynamicAny) String() string {
if m.Message == nil {
return "<nil>"
}
return m.Message.String()
}
func (m DynamicAny) Reset() {
if m.Message == nil {
return
}
m.Message.Reset()
}
func (m DynamicAny) ProtoMessage() {
return
}
func (m DynamicAny) ProtoReflect() protoreflect.Message {
if m.Message == nil {
return nil
}
return dynamicAny{proto.MessageReflect(m.Message)}
}
type dynamicAny struct{ protoreflect.Message }
func (m dynamicAny) Type() protoreflect.MessageType {
return dynamicAnyType{m.Message.Type()}
}
func (m dynamicAny) New() protoreflect.Message {
return dynamicAnyType{m.Message.Type()}.New()
}
func (m dynamicAny) Interface() protoreflect.ProtoMessage {
return DynamicAny{proto.MessageV1(m.Message.Interface())}
}
type dynamicAnyType struct{ protoreflect.MessageType }
func (t dynamicAnyType) New() protoreflect.Message {
return dynamicAny{t.MessageType.New()}
}
func (t dynamicAnyType) Zero() protoreflect.Message {
return dynamicAny{t.MessageType.Zero()}
}

View file

@ -1,203 +1,62 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/any.proto
// source: github.com/golang/protobuf/ptypes/any/any.proto
package any
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
anypb "google.golang.org/protobuf/types/known/anypb"
reflect "reflect"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// Symbols defined in public import of google/protobuf/any.proto.
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type Any = anypb.Any
// `Any` contains an arbitrary serialized protocol buffer message along with a
// URL that describes the type of the serialized message.
//
// Protobuf library provides support to pack/unpack Any values in the form
// of utility functions or additional generated methods of the Any type.
//
// Example 1: Pack and unpack a message in C++.
//
// Foo foo = ...;
// Any any;
// any.PackFrom(foo);
// ...
// if (any.UnpackTo(&foo)) {
// ...
// }
//
// Example 2: Pack and unpack a message in Java.
//
// Foo foo = ...;
// Any any = Any.pack(foo);
// ...
// if (any.is(Foo.class)) {
// foo = any.unpack(Foo.class);
// }
//
// Example 3: Pack and unpack a message in Python.
//
// foo = Foo(...)
// any = Any()
// any.Pack(foo)
// ...
// if any.Is(Foo.DESCRIPTOR):
// any.Unpack(foo)
// ...
//
// Example 4: Pack and unpack a message in Go
//
// foo := &pb.Foo{...}
// any, err := ptypes.MarshalAny(foo)
// ...
// foo := &pb.Foo{}
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
// ...
// }
//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
// name "y.z".
//
//
// JSON
// ====
// The JSON representation of an `Any` value uses the regular
// representation of the deserialized, embedded message, with an
// additional field `@type` which contains the type URL. Example:
//
// package google.profile;
// message Person {
// string first_name = 1;
// string last_name = 2;
// }
//
// {
// "@type": "type.googleapis.com/google.profile.Person",
// "firstName": <string>,
// "lastName": <string>
// }
//
// If the embedded message type is well-known and has a custom JSON
// representation, that representation will be embedded adding a field
// `value` which holds the custom JSON in addition to the `@type`
// field. Example (for message [google.protobuf.Duration][]):
//
// {
// "@type": "type.googleapis.com/google.protobuf.Duration",
// "value": "1.212s"
// }
//
type Any struct {
// A URL/resource name that uniquely identifies the type of the serialized
// protocol buffer message. This string must contain at least
// one "/" character. The last segment of the URL's path must represent
// the fully qualified name of the type (as in
// `path/google.protobuf.Duration`). The name should be in a canonical form
// (e.g., leading "." is not accepted).
//
// In practice, teams usually precompile into the binary all types that they
// expect it to use in the context of Any. However, for URLs which use the
// scheme `http`, `https`, or no scheme, one can optionally set up a type
// server that maps type URLs to message definitions as follows:
//
// * If no scheme is provided, `https` is assumed.
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
// URL, or have them precompiled into a binary to avoid any
// lookup. Therefore, binary compatibility needs to be preserved
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
// Note: this functionality is not currently available in the official
// protobuf release, and it is not used for type URLs beginning with
// type.googleapis.com.
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
// Must be a valid serialized protocol buffer of the above specified type.
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor
var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{
0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29,
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65,
0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x33,
}
func (m *Any) Reset() { *m = Any{} }
func (m *Any) String() string { return proto.CompactTextString(m) }
func (*Any) ProtoMessage() {}
func (*Any) Descriptor() ([]byte, []int) {
return fileDescriptor_b53526c13ae22eb4, []int{0}
var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{}
var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func (*Any) XXX_WellKnownType() string { return "Any" }
func (m *Any) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Any.Unmarshal(m, b)
}
func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Any.Marshal(b, m, deterministic)
}
func (m *Any) XXX_Merge(src proto.Message) {
xxx_messageInfo_Any.Merge(m, src)
}
func (m *Any) XXX_Size() int {
return xxx_messageInfo_Any.Size(m)
}
func (m *Any) XXX_DiscardUnknown() {
xxx_messageInfo_Any.DiscardUnknown(m)
}
var xxx_messageInfo_Any proto.InternalMessageInfo
func (m *Any) GetTypeUrl() string {
if m != nil {
return m.TypeUrl
func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() }
func file_github_com_golang_protobuf_ptypes_any_any_proto_init() {
if File_github_com_golang_protobuf_ptypes_any_any_proto != nil {
return
}
return ""
}
func (m *Any) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
func init() {
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
}
func init() {
proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4)
}
var fileDescriptor_b53526c13ae22eb4 = []byte{
// 185 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce,
0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52,
0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc,
0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c,
0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce,
0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff,
0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00,
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc,
NumEnums: 0,
NumMessages: 0,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes,
DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs,
}.Build()
File_github_com_golang_protobuf_ptypes_any_any_proto = out.File
file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil
file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil
file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil
}

View file

@ -1,155 +0,0 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
option go_package = "github.com/golang/protobuf/ptypes/any";
option java_package = "com.google.protobuf";
option java_outer_classname = "AnyProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
// `Any` contains an arbitrary serialized protocol buffer message along with a
// URL that describes the type of the serialized message.
//
// Protobuf library provides support to pack/unpack Any values in the form
// of utility functions or additional generated methods of the Any type.
//
// Example 1: Pack and unpack a message in C++.
//
// Foo foo = ...;
// Any any;
// any.PackFrom(foo);
// ...
// if (any.UnpackTo(&foo)) {
// ...
// }
//
// Example 2: Pack and unpack a message in Java.
//
// Foo foo = ...;
// Any any = Any.pack(foo);
// ...
// if (any.is(Foo.class)) {
// foo = any.unpack(Foo.class);
// }
//
// Example 3: Pack and unpack a message in Python.
//
// foo = Foo(...)
// any = Any()
// any.Pack(foo)
// ...
// if any.Is(Foo.DESCRIPTOR):
// any.Unpack(foo)
// ...
//
// Example 4: Pack and unpack a message in Go
//
// foo := &pb.Foo{...}
// any, err := ptypes.MarshalAny(foo)
// ...
// foo := &pb.Foo{}
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
// ...
// }
//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
// name "y.z".
//
//
// JSON
// ====
// The JSON representation of an `Any` value uses the regular
// representation of the deserialized, embedded message, with an
// additional field `@type` which contains the type URL. Example:
//
// package google.profile;
// message Person {
// string first_name = 1;
// string last_name = 2;
// }
//
// {
// "@type": "type.googleapis.com/google.profile.Person",
// "firstName": <string>,
// "lastName": <string>
// }
//
// If the embedded message type is well-known and has a custom JSON
// representation, that representation will be embedded adding a field
// `value` which holds the custom JSON in addition to the `@type`
// field. Example (for message [google.protobuf.Duration][]):
//
// {
// "@type": "type.googleapis.com/google.protobuf.Duration",
// "value": "1.212s"
// }
//
message Any {
// A URL/resource name that uniquely identifies the type of the serialized
// protocol buffer message. This string must contain at least
// one "/" character. The last segment of the URL's path must represent
// the fully qualified name of the type (as in
// `path/google.protobuf.Duration`). The name should be in a canonical form
// (e.g., leading "." is not accepted).
//
// In practice, teams usually precompile into the binary all types that they
// expect it to use in the context of Any. However, for URLs which use the
// scheme `http`, `https`, or no scheme, one can optionally set up a type
// server that maps type URLs to message definitions as follows:
//
// * If no scheme is provided, `https` is assumed.
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
// URL, or have them precompiled into a binary to avoid any
// lookup. Therefore, binary compatibility needs to be preserved
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
// Note: this functionality is not currently available in the official
// protobuf release, and it is not used for type URLs beginning with
// type.googleapis.com.
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//
string type_url = 1;
// Must be a valid serialized protocol buffer of the above specified type.
bytes value = 2;
}

View file

@ -1,35 +1,6 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package ptypes contains code for interacting with well-known types.
*/
// Package ptypes provides functionality for interacting with well-known types.
package ptypes

View file

@ -1,102 +1,72 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ptypes
// This file implements conversions between google.protobuf.Duration
// and time.Duration.
import (
"errors"
"fmt"
"time"
durpb "github.com/golang/protobuf/ptypes/duration"
durationpb "github.com/golang/protobuf/ptypes/duration"
)
// Range of google.protobuf.Duration as specified in duration.proto.
// This is about 10,000 years in seconds.
const (
// Range of a durpb.Duration in seconds, as specified in
// google/protobuf/duration.proto. This is about 10,000 years in seconds.
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
minSeconds = -maxSeconds
)
// validateDuration determines whether the durpb.Duration is valid according to the
// definition in google/protobuf/duration.proto. A valid durpb.Duration
// may still be too large to fit into a time.Duration (the range of durpb.Duration
// is about 10,000 years, and the range of time.Duration is about 290).
func validateDuration(d *durpb.Duration) error {
if d == nil {
return errors.New("duration: nil Duration")
}
if d.Seconds < minSeconds || d.Seconds > maxSeconds {
return fmt.Errorf("duration: %v: seconds out of range", d)
}
if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
return fmt.Errorf("duration: %v: nanos out of range", d)
}
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
}
return nil
}
// Duration converts a durpb.Duration to a time.Duration. Duration
// returns an error if the durpb.Duration is invalid or is too large to be
// represented in a time.Duration.
func Duration(p *durpb.Duration) (time.Duration, error) {
if err := validateDuration(p); err != nil {
// Duration converts a durationpb.Duration to a time.Duration.
// Duration returns an error if dur is invalid or overflows a time.Duration.
func Duration(dur *durationpb.Duration) (time.Duration, error) {
if err := validateDuration(dur); err != nil {
return 0, err
}
d := time.Duration(p.Seconds) * time.Second
if int64(d/time.Second) != p.Seconds {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
d := time.Duration(dur.Seconds) * time.Second
if int64(d/time.Second) != dur.Seconds {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
}
if p.Nanos != 0 {
d += time.Duration(p.Nanos) * time.Nanosecond
if (d < 0) != (p.Nanos < 0) {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
if dur.Nanos != 0 {
d += time.Duration(dur.Nanos) * time.Nanosecond
if (d < 0) != (dur.Nanos < 0) {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
}
}
return d, nil
}
// DurationProto converts a time.Duration to a durpb.Duration.
func DurationProto(d time.Duration) *durpb.Duration {
// DurationProto converts a time.Duration to a durationpb.Duration.
func DurationProto(d time.Duration) *durationpb.Duration {
nanos := d.Nanoseconds()
secs := nanos / 1e9
nanos -= secs * 1e9
return &durpb.Duration{
Seconds: secs,
return &durationpb.Duration{
Seconds: int64(secs),
Nanos: int32(nanos),
}
}
// validateDuration determines whether the durationpb.Duration is valid
// according to the definition in google/protobuf/duration.proto.
// A valid durpb.Duration may still be too large to fit into a time.Duration
// Note that the range of durationpb.Duration is about 10,000 years,
// while the range of time.Duration is about 290 years.
func validateDuration(dur *durationpb.Duration) error {
if dur == nil {
return errors.New("duration: nil Duration")
}
if dur.Seconds < minSeconds || dur.Seconds > maxSeconds {
return fmt.Errorf("duration: %v: seconds out of range", dur)
}
if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 {
return fmt.Errorf("duration: %v: nanos out of range", dur)
}
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) {
return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur)
}
return nil
}

View file

@ -1,163 +1,63 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/duration.proto
// source: github.com/golang/protobuf/ptypes/duration/duration.proto
package duration
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
durationpb "google.golang.org/protobuf/types/known/durationpb"
reflect "reflect"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// Symbols defined in public import of google/protobuf/duration.proto.
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type Duration = durationpb.Duration
// A Duration represents a signed, fixed-length span of time represented
// as a count of seconds and fractions of seconds at nanosecond
// resolution. It is independent of any calendar and concepts like "day"
// or "month". It is related to Timestamp in that the difference between
// two Timestamp values is a Duration and it can be added or subtracted
// from a Timestamp. Range is approximately +-10,000 years.
//
// # Examples
//
// Example 1: Compute Duration from two Timestamps in pseudo code.
//
// Timestamp start = ...;
// Timestamp end = ...;
// Duration duration = ...;
//
// duration.seconds = end.seconds - start.seconds;
// duration.nanos = end.nanos - start.nanos;
//
// if (duration.seconds < 0 && duration.nanos > 0) {
// duration.seconds += 1;
// duration.nanos -= 1000000000;
// } else if (duration.seconds > 0 && duration.nanos < 0) {
// duration.seconds -= 1;
// duration.nanos += 1000000000;
// }
//
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
//
// Timestamp start = ...;
// Duration duration = ...;
// Timestamp end = ...;
//
// end.seconds = start.seconds + duration.seconds;
// end.nanos = start.nanos + duration.nanos;
//
// if (end.nanos < 0) {
// end.seconds -= 1;
// end.nanos += 1000000000;
// } else if (end.nanos >= 1000000000) {
// end.seconds += 1;
// end.nanos -= 1000000000;
// }
//
// Example 3: Compute Duration from datetime.timedelta in Python.
//
// td = datetime.timedelta(days=3, minutes=10)
// duration = Duration()
// duration.FromTimedelta(td)
//
// # JSON Mapping
//
// In JSON format, the Duration type is encoded as a string rather than an
// object, where the string ends in the suffix "s" (indicating seconds) and
// is preceded by the number of seconds, with nanoseconds expressed as
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
// microsecond should be expressed in JSON format as "3.000001s".
//
//
type Duration struct {
// Signed seconds of the span of time. Must be from -315,576,000,000
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
// Signed fractions of a second at nanosecond resolution of the span
// of time. Durations less than one second are represented with a 0
// `seconds` field and a positive or negative `nanos` field. For durations
// of one second or more, a non-zero value for the `nanos` field must be
// of the same sign as the `seconds` field. Must be from -999,999,999
// to +999,999,999 inclusive.
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{
0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
func (m *Duration) Reset() { *m = Duration{} }
func (m *Duration) String() string { return proto.CompactTextString(m) }
func (*Duration) ProtoMessage() {}
func (*Duration) Descriptor() ([]byte, []int) {
return fileDescriptor_23597b2ebd7ac6c5, []int{0}
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{}
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func (*Duration) XXX_WellKnownType() string { return "Duration" }
func (m *Duration) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Duration.Unmarshal(m, b)
}
func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
}
func (m *Duration) XXX_Merge(src proto.Message) {
xxx_messageInfo_Duration.Merge(m, src)
}
func (m *Duration) XXX_Size() int {
return xxx_messageInfo_Duration.Size(m)
}
func (m *Duration) XXX_DiscardUnknown() {
xxx_messageInfo_Duration.DiscardUnknown(m)
}
var xxx_messageInfo_Duration proto.InternalMessageInfo
func (m *Duration) GetSeconds() int64 {
if m != nil {
return m.Seconds
func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() }
func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() {
if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil {
return
}
return 0
}
func (m *Duration) GetNanos() int32 {
if m != nil {
return m.Nanos
}
return 0
}
func init() {
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
}
func init() {
proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5)
}
var fileDescriptor_23597b2ebd7ac6c5 = []byte{
// 190 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c,
0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56,
0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e,
0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4,
0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78,
0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63,
0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00,
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc,
NumEnums: 0,
NumMessages: 0,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes,
DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs,
}.Build()
File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File
file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil
file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil
file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil
}

View file

@ -1,116 +0,0 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
option cc_enable_arenas = true;
option go_package = "github.com/golang/protobuf/ptypes/duration";
option java_package = "com.google.protobuf";
option java_outer_classname = "DurationProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
// A Duration represents a signed, fixed-length span of time represented
// as a count of seconds and fractions of seconds at nanosecond
// resolution. It is independent of any calendar and concepts like "day"
// or "month". It is related to Timestamp in that the difference between
// two Timestamp values is a Duration and it can be added or subtracted
// from a Timestamp. Range is approximately +-10,000 years.
//
// # Examples
//
// Example 1: Compute Duration from two Timestamps in pseudo code.
//
// Timestamp start = ...;
// Timestamp end = ...;
// Duration duration = ...;
//
// duration.seconds = end.seconds - start.seconds;
// duration.nanos = end.nanos - start.nanos;
//
// if (duration.seconds < 0 && duration.nanos > 0) {
// duration.seconds += 1;
// duration.nanos -= 1000000000;
// } else if (duration.seconds > 0 && duration.nanos < 0) {
// duration.seconds -= 1;
// duration.nanos += 1000000000;
// }
//
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
//
// Timestamp start = ...;
// Duration duration = ...;
// Timestamp end = ...;
//
// end.seconds = start.seconds + duration.seconds;
// end.nanos = start.nanos + duration.nanos;
//
// if (end.nanos < 0) {
// end.seconds -= 1;
// end.nanos += 1000000000;
// } else if (end.nanos >= 1000000000) {
// end.seconds += 1;
// end.nanos -= 1000000000;
// }
//
// Example 3: Compute Duration from datetime.timedelta in Python.
//
// td = datetime.timedelta(days=3, minutes=10)
// duration = Duration()
// duration.FromTimedelta(td)
//
// # JSON Mapping
//
// In JSON format, the Duration type is encoded as a string rather than an
// object, where the string ends in the suffix "s" (indicating seconds) and
// is preceded by the number of seconds, with nanoseconds expressed as
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
// microsecond should be expressed in JSON format as "3.000001s".
//
//
message Duration {
// Signed seconds of the span of time. Must be from -315,576,000,000
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
int64 seconds = 1;
// Signed fractions of a second at nanosecond resolution of the span
// of time. Durations less than one second are represented with a 0
// `seconds` field and a positive or negative `nanos` field. For durations
// of one second or more, a non-zero value for the `nanos` field must be
// of the same sign as the `seconds` field. Must be from -999,999,999
// to +999,999,999 inclusive.
int32 nanos = 2;
}

View file

@ -1,46 +1,18 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ptypes
// This file implements operations on google.protobuf.Timestamp.
import (
"errors"
"fmt"
"time"
tspb "github.com/golang/protobuf/ptypes/timestamp"
timestamppb "github.com/golang/protobuf/ptypes/timestamp"
)
// Range of google.protobuf.Duration as specified in timestamp.proto.
const (
// Seconds field of the earliest valid Timestamp.
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
@ -50,17 +22,71 @@ const (
maxValidSeconds = 253402300800
)
// Timestamp converts a timestamppb.Timestamp to a time.Time.
// It returns an error if the argument is invalid.
//
// Unlike most Go functions, if Timestamp returns an error, the first return
// value is not the zero time.Time. Instead, it is the value obtained from the
// time.Unix function when passed the contents of the Timestamp, in the UTC
// locale. This may or may not be a meaningful time; many invalid Timestamps
// do map to valid time.Times.
//
// A nil Timestamp returns an error. The first return value in that case is
// undefined.
func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
// Don't return the zero value on error, because corresponds to a valid
// timestamp. Instead return whatever time.Unix gives us.
var t time.Time
if ts == nil {
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
} else {
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
}
return t, validateTimestamp(ts)
}
// TimestampNow returns a google.protobuf.Timestamp for the current time.
func TimestampNow() *timestamppb.Timestamp {
ts, err := TimestampProto(time.Now())
if err != nil {
panic("ptypes: time.Now() out of Timestamp range")
}
return ts
}
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
// It returns an error if the resulting Timestamp is invalid.
func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
ts := &timestamppb.Timestamp{
Seconds: t.Unix(),
Nanos: int32(t.Nanosecond()),
}
if err := validateTimestamp(ts); err != nil {
return nil, err
}
return ts, nil
}
// TimestampString returns the RFC 3339 string for valid Timestamps.
// For invalid Timestamps, it returns an error message in parentheses.
func TimestampString(ts *timestamppb.Timestamp) string {
t, err := Timestamp(ts)
if err != nil {
return fmt.Sprintf("(%v)", err)
}
return t.Format(time.RFC3339Nano)
}
// validateTimestamp determines whether a Timestamp is valid.
// A valid timestamp represents a time in the range
// [0001-01-01, 10000-01-01) and has a Nanos field
// in the range [0, 1e9).
// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
// and has a Nanos field in the range [0, 1e9).
//
// If the Timestamp is valid, validateTimestamp returns nil.
// Otherwise, it returns an error that describes
// the problem.
// Otherwise, it returns an error that describes the problem.
//
// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
func validateTimestamp(ts *tspb.Timestamp) error {
// Every valid Timestamp can be represented by a time.Time,
// but the converse is not true.
func validateTimestamp(ts *timestamppb.Timestamp) error {
if ts == nil {
return errors.New("timestamp: nil Timestamp")
}
@ -75,58 +101,3 @@ func validateTimestamp(ts *tspb.Timestamp) error {
}
return nil
}
// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
// It returns an error if the argument is invalid.
//
// Unlike most Go functions, if Timestamp returns an error, the first return value
// is not the zero time.Time. Instead, it is the value obtained from the
// time.Unix function when passed the contents of the Timestamp, in the UTC
// locale. This may or may not be a meaningful time; many invalid Timestamps
// do map to valid time.Times.
//
// A nil Timestamp returns an error. The first return value in that case is
// undefined.
func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
// Don't return the zero value on error, because corresponds to a valid
// timestamp. Instead return whatever time.Unix gives us.
var t time.Time
if ts == nil {
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
} else {
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
}
return t, validateTimestamp(ts)
}
// TimestampNow returns a google.protobuf.Timestamp for the current time.
func TimestampNow() *tspb.Timestamp {
ts, err := TimestampProto(time.Now())
if err != nil {
panic("ptypes: time.Now() out of Timestamp range")
}
return ts
}
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
// It returns an error if the resulting Timestamp is invalid.
func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
ts := &tspb.Timestamp{
Seconds: t.Unix(),
Nanos: int32(t.Nanosecond()),
}
if err := validateTimestamp(ts); err != nil {
return nil, err
}
return ts, nil
}
// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
// Timestamps, it returns an error message in parentheses.
func TimestampString(ts *tspb.Timestamp) string {
t, err := Timestamp(ts)
if err != nil {
return fmt.Sprintf("(%v)", err)
}
return t.Format(time.RFC3339Nano)
}

View file

@ -1,185 +1,64 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/timestamp.proto
// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
package timestamp
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// Symbols defined in public import of google/protobuf/timestamp.proto.
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type Timestamp = timestamppb.Timestamp
// A Timestamp represents a point in time independent of any time zone or local
// calendar, encoded as a count of seconds and fractions of seconds at
// nanosecond resolution. The count is relative to an epoch at UTC midnight on
// January 1, 1970, in the proleptic Gregorian calendar which extends the
// Gregorian calendar backwards to year one.
//
// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
// second table is needed for interpretation, using a [24-hour linear
// smear](https://developers.google.com/time/smear).
//
// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
// restricting to that range, we ensure that we can convert to and from [RFC
// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
//
// # Examples
//
// Example 1: Compute Timestamp from POSIX `time()`.
//
// Timestamp timestamp;
// timestamp.set_seconds(time(NULL));
// timestamp.set_nanos(0);
//
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
//
// struct timeval tv;
// gettimeofday(&tv, NULL);
//
// Timestamp timestamp;
// timestamp.set_seconds(tv.tv_sec);
// timestamp.set_nanos(tv.tv_usec * 1000);
//
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
//
// FILETIME ft;
// GetSystemTimeAsFileTime(&ft);
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
//
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
// Timestamp timestamp;
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
//
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
//
// long millis = System.currentTimeMillis();
//
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
// .setNanos((int) ((millis % 1000) * 1000000)).build();
//
//
// Example 5: Compute Timestamp from current time in Python.
//
// timestamp = Timestamp()
// timestamp.GetCurrentTime()
//
// # JSON Mapping
//
// In JSON format, the Timestamp type is encoded as a string in the
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
// where {year} is always expressed using four digits while {month}, {day},
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
// is required. A proto3 JSON serializer should always use UTC (as indicated by
// "Z") when printing the Timestamp type and a proto3 JSON parser should be
// able to accept both UTC and other timezones (as indicated by an offset).
//
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
// 01:30 UTC on January 15, 2017.
//
// In JavaScript, one can convert a Date object to this format using the
// standard
// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
// method. In Python, a standard `datetime.datetime` object can be converted
// to this format using
// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
// the Joda Time's [`ISODateTimeFormat.dateTime()`](
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
// ) to obtain a formatter capable of generating timestamps in this format.
//
//
type Timestamp struct {
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive.
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{
0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37,
0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
}
func (m *Timestamp) Reset() { *m = Timestamp{} }
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
func (*Timestamp) ProtoMessage() {}
func (*Timestamp) Descriptor() ([]byte, []int) {
return fileDescriptor_292007bbfe81227e, []int{0}
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{}
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
func (m *Timestamp) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Timestamp.Unmarshal(m, b)
}
func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
}
func (m *Timestamp) XXX_Merge(src proto.Message) {
xxx_messageInfo_Timestamp.Merge(m, src)
}
func (m *Timestamp) XXX_Size() int {
return xxx_messageInfo_Timestamp.Size(m)
}
func (m *Timestamp) XXX_DiscardUnknown() {
xxx_messageInfo_Timestamp.DiscardUnknown(m)
}
var xxx_messageInfo_Timestamp proto.InternalMessageInfo
func (m *Timestamp) GetSeconds() int64 {
if m != nil {
return m.Seconds
func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() }
func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() {
if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil {
return
}
return 0
}
func (m *Timestamp) GetNanos() int32 {
if m != nil {
return m.Nanos
}
return 0
}
func init() {
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
}
func init() {
proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e)
}
var fileDescriptor_292007bbfe81227e = []byte{
// 191 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70,
0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51,
0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89,
0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71,
0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a,
0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43,
0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00,
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc,
NumEnums: 0,
NumMessages: 0,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes,
DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs,
}.Build()
File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil
}

View file

@ -1,138 +0,0 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
option cc_enable_arenas = true;
option go_package = "github.com/golang/protobuf/ptypes/timestamp";
option java_package = "com.google.protobuf";
option java_outer_classname = "TimestampProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
// A Timestamp represents a point in time independent of any time zone or local
// calendar, encoded as a count of seconds and fractions of seconds at
// nanosecond resolution. The count is relative to an epoch at UTC midnight on
// January 1, 1970, in the proleptic Gregorian calendar which extends the
// Gregorian calendar backwards to year one.
//
// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
// second table is needed for interpretation, using a [24-hour linear
// smear](https://developers.google.com/time/smear).
//
// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
// restricting to that range, we ensure that we can convert to and from [RFC
// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
//
// # Examples
//
// Example 1: Compute Timestamp from POSIX `time()`.
//
// Timestamp timestamp;
// timestamp.set_seconds(time(NULL));
// timestamp.set_nanos(0);
//
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
//
// struct timeval tv;
// gettimeofday(&tv, NULL);
//
// Timestamp timestamp;
// timestamp.set_seconds(tv.tv_sec);
// timestamp.set_nanos(tv.tv_usec * 1000);
//
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
//
// FILETIME ft;
// GetSystemTimeAsFileTime(&ft);
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
//
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
// Timestamp timestamp;
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
//
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
//
// long millis = System.currentTimeMillis();
//
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
// .setNanos((int) ((millis % 1000) * 1000000)).build();
//
//
// Example 5: Compute Timestamp from current time in Python.
//
// timestamp = Timestamp()
// timestamp.GetCurrentTime()
//
// # JSON Mapping
//
// In JSON format, the Timestamp type is encoded as a string in the
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
// where {year} is always expressed using four digits while {month}, {day},
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
// is required. A proto3 JSON serializer should always use UTC (as indicated by
// "Z") when printing the Timestamp type and a proto3 JSON parser should be
// able to accept both UTC and other timezones (as indicated by an offset).
//
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
// 01:30 UTC on January 15, 2017.
//
// In JavaScript, one can convert a Date object to this format using the
// standard
// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
// method. In Python, a standard `datetime.datetime` object can be converted
// to this format using
// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
// the Joda Time's [`ISODateTimeFormat.dateTime()`](
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
// ) to obtain a formatter capable of generating timestamps in this format.
//
//
message Timestamp {
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
int64 seconds = 1;
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive.
int32 nanos = 2;
}

View file

@ -133,12 +133,6 @@ func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error)
return openat(dirfd, path, flags|O_LARGEFILE, mode)
}
//sys openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error)
func Openat2(dirfd int, path string, how *OpenHow) (fd int, err error) {
return openat2(dirfd, path, how, SizeofOpenHow)
}
//sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error)
func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {

View file

@ -83,22 +83,6 @@ func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
r0, _, e1 := Syscall6(SYS_OPENAT2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(open_how)), uintptr(size), 0, 0)
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
n = int(r0)

View file

@ -691,22 +691,6 @@ const (
AT_EACCESS = 0x200
)
type OpenHow struct {
Flags uint64
Mode uint64
Resolve uint64
}
const SizeofOpenHow = 0x18
const (
RESOLVE_BENEATH = 0x8
RESOLVE_IN_ROOT = 0x10
RESOLVE_NO_MAGICLINKS = 0x2
RESOLVE_NO_SYMLINKS = 0x4
RESOLVE_NO_XDEV = 0x1
)
type PollFd struct {
Fd int32
Events int16

View file

@ -150,7 +150,11 @@ func traverse(files []*ast.File) []event {
extent += int(f.End() - f.Pos())
}
// This estimate is based on the net/http package.
events := make([]event, 0, extent*33/100)
capacity := extent * 33 / 100
if capacity > 1e6 {
capacity = 1e6 // impose some reasonable maximum
}
events := make([]event, 0, capacity)
var stack []event
for _, f := range files {

3
vendor/google.golang.org/protobuf/AUTHORS generated vendored Normal file
View file

@ -0,0 +1,3 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at https://tip.golang.org/AUTHORS.

3
vendor/google.golang.org/protobuf/CONTRIBUTORS generated vendored Normal file
View file

@ -0,0 +1,3 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at https://tip.golang.org/CONTRIBUTORS.

27
vendor/google.golang.org/protobuf/LICENSE generated vendored Normal file
View file

@ -0,0 +1,27 @@
Copyright (c) 2018 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/google.golang.org/protobuf/PATENTS generated vendored Normal file
View file

@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

View file

@ -0,0 +1,168 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package internal_gengo
import (
"unicode"
"unicode/utf8"
"google.golang.org/protobuf/compiler/protogen"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/types/descriptorpb"
)
type fileInfo struct {
*protogen.File
allEnums []*enumInfo
allMessages []*messageInfo
allExtensions []*extensionInfo
allEnumsByPtr map[*enumInfo]int // value is index into allEnums
allMessagesByPtr map[*messageInfo]int // value is index into allMessages
allMessageFieldsByPtr map[*messageInfo]*structFields
// needRawDesc specifies whether the generator should emit logic to provide
// the legacy raw descriptor in GZIP'd form.
// This is updated by enum and message generation logic as necessary,
// and checked at the end of file generation.
needRawDesc bool
}
type structFields struct {
count int
unexported map[int]string
}
func (sf *structFields) append(name string) {
if r, _ := utf8.DecodeRuneInString(name); !unicode.IsUpper(r) {
if sf.unexported == nil {
sf.unexported = make(map[int]string)
}
sf.unexported[sf.count] = name
}
sf.count++
}
func newFileInfo(file *protogen.File) *fileInfo {
f := &fileInfo{File: file}
// Collect all enums, messages, and extensions in "flattened ordering".
// See filetype.TypeBuilder.
var walkMessages func([]*protogen.Message, func(*protogen.Message))
walkMessages = func(messages []*protogen.Message, f func(*protogen.Message)) {
for _, m := range messages {
f(m)
walkMessages(m.Messages, f)
}
}
initEnumInfos := func(enums []*protogen.Enum) {
for _, enum := range enums {
f.allEnums = append(f.allEnums, newEnumInfo(f, enum))
}
}
initMessageInfos := func(messages []*protogen.Message) {
for _, message := range messages {
f.allMessages = append(f.allMessages, newMessageInfo(f, message))
}
}
initExtensionInfos := func(extensions []*protogen.Extension) {
for _, extension := range extensions {
f.allExtensions = append(f.allExtensions, newExtensionInfo(f, extension))
}
}
initEnumInfos(f.Enums)
initMessageInfos(f.Messages)
initExtensionInfos(f.Extensions)
walkMessages(f.Messages, func(m *protogen.Message) {
initEnumInfos(m.Enums)
initMessageInfos(m.Messages)
initExtensionInfos(m.Extensions)
})
// Derive a reverse mapping of enum and message pointers to their index
// in allEnums and allMessages.
if len(f.allEnums) > 0 {
f.allEnumsByPtr = make(map[*enumInfo]int)
for i, e := range f.allEnums {
f.allEnumsByPtr[e] = i
}
}
if len(f.allMessages) > 0 {
f.allMessagesByPtr = make(map[*messageInfo]int)
f.allMessageFieldsByPtr = make(map[*messageInfo]*structFields)
for i, m := range f.allMessages {
f.allMessagesByPtr[m] = i
f.allMessageFieldsByPtr[m] = new(structFields)
}
}
return f
}
type enumInfo struct {
*protogen.Enum
genJSONMethod bool
genRawDescMethod bool
}
func newEnumInfo(f *fileInfo, enum *protogen.Enum) *enumInfo {
e := &enumInfo{Enum: enum}
e.genJSONMethod = true
e.genRawDescMethod = true
return e
}
type messageInfo struct {
*protogen.Message
genRawDescMethod bool
genExtRangeMethod bool
isTracked bool
hasWeak bool
}
func newMessageInfo(f *fileInfo, message *protogen.Message) *messageInfo {
m := &messageInfo{Message: message}
m.genRawDescMethod = true
m.genExtRangeMethod = true
m.isTracked = isTrackedMessage(m)
for _, field := range m.Fields {
m.hasWeak = m.hasWeak || field.Desc.IsWeak()
}
return m
}
// isTrackedMessage reports whether field tracking is enabled on the message.
func isTrackedMessage(m *messageInfo) (tracked bool) {
const trackFieldUse_fieldNumber = 37383685
// Decode the option from unknown fields to avoid a dependency on the
// annotation proto from protoc-gen-go.
b := m.Desc.Options().(*descriptorpb.MessageOptions).ProtoReflect().GetUnknown()
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
if num == trackFieldUse_fieldNumber && typ == protowire.VarintType {
v, _ := protowire.ConsumeVarint(b)
tracked = protowire.DecodeBool(v)
}
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
return tracked
}
type extensionInfo struct {
*protogen.Extension
}
func newExtensionInfo(f *fileInfo, extension *protogen.Extension) *extensionInfo {
x := &extensionInfo{Extension: extension}
return x
}

View file

@ -0,0 +1,891 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package internal_gengo is internal to the protobuf module.
package internal_gengo
import (
"fmt"
"go/ast"
"go/parser"
"go/token"
"math"
"strconv"
"strings"
"unicode"
"unicode/utf8"
"google.golang.org/protobuf/compiler/protogen"
"google.golang.org/protobuf/internal/encoding/messageset"
"google.golang.org/protobuf/internal/encoding/tag"
"google.golang.org/protobuf/internal/fieldnum"
"google.golang.org/protobuf/internal/genname"
"google.golang.org/protobuf/internal/version"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/runtime/protoimpl"
"google.golang.org/protobuf/types/descriptorpb"
)
// GenerateVersionMarkers specifies whether to generate version markers.
var GenerateVersionMarkers = true
// Standard library dependencies.
const (
mathPackage = protogen.GoImportPath("math")
reflectPackage = protogen.GoImportPath("reflect")
syncPackage = protogen.GoImportPath("sync")
)
// Protobuf library dependencies.
//
// These are declared as an interface type so that they can be more easily
// patched to support unique build environments that impose restrictions
// on the dependencies of generated source code.
var (
protoifacePackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/runtime/protoiface")
protoimplPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/runtime/protoimpl")
protoreflectPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/reflect/protoreflect")
protoV1Package goImportPath = protogen.GoImportPath("github.com/golang/protobuf/proto")
)
type goImportPath interface {
String() string
Ident(string) protogen.GoIdent
}
// GenerateFile generates the contents of a .pb.go file.
func GenerateFile(gen *protogen.Plugin, file *protogen.File) *protogen.GeneratedFile {
filename := file.GeneratedFilenamePrefix + ".pb.go"
g := gen.NewGeneratedFile(filename, file.GoImportPath)
f := newFileInfo(file)
genStandaloneComments(g, f, fieldnum.FileDescriptorProto_Syntax)
genGeneratedHeader(gen, g, f)
genStandaloneComments(g, f, fieldnum.FileDescriptorProto_Package)
g.P("package ", f.GoPackageName)
g.P()
// Emit a static check that enforces a minimum version of the proto package.
if GenerateVersionMarkers {
g.P("const (")
g.P("// Verify that this generated code is sufficiently up-to-date.")
g.P("_ = ", protoimplPackage.Ident("EnforceVersion"), "(", protoimpl.GenVersion, " - ", protoimplPackage.Ident("MinVersion"), ")")
g.P("// Verify that runtime/protoimpl is sufficiently up-to-date.")
g.P("_ = ", protoimplPackage.Ident("EnforceVersion"), "(", protoimplPackage.Ident("MaxVersion"), " - ", protoimpl.GenVersion, ")")
g.P(")")
g.P()
// TODO: Remove this after some soak-in period after the v2 release.
g.P("// This is a compile-time assertion that a sufficiently up-to-date version")
g.P("// of the legacy proto package is being used.")
g.P("const _ = ", protoV1Package.Ident("ProtoPackageIsVersion4"))
g.P()
}
for i, imps := 0, f.Desc.Imports(); i < imps.Len(); i++ {
genImport(gen, g, f, imps.Get(i))
}
for _, enum := range f.allEnums {
genEnum(g, f, enum)
}
for _, message := range f.allMessages {
genMessage(g, f, message)
}
genExtensions(g, f)
genReflectFileDescriptor(gen, g, f)
return g
}
// genStandaloneComments prints all leading comments for a FileDescriptorProto
// location identified by the field number n.
func genStandaloneComments(g *protogen.GeneratedFile, f *fileInfo, n int32) {
for _, loc := range f.Proto.GetSourceCodeInfo().GetLocation() {
if len(loc.Path) == 1 && loc.Path[0] == n {
for _, s := range loc.GetLeadingDetachedComments() {
g.P(protogen.Comments(s))
g.P()
}
if s := loc.GetLeadingComments(); s != "" {
g.P(protogen.Comments(s))
g.P()
}
}
}
}
func genGeneratedHeader(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo) {
g.P("// Code generated by protoc-gen-go. DO NOT EDIT.")
if GenerateVersionMarkers {
g.P("// versions:")
protocGenGoVersion := version.String()
protocVersion := "(unknown)"
if v := gen.Request.GetCompilerVersion(); v != nil {
protocVersion = fmt.Sprintf("v%v.%v.%v", v.GetMajor(), v.GetMinor(), v.GetPatch())
}
g.P("// \tprotoc-gen-go ", protocGenGoVersion)
g.P("// \tprotoc ", protocVersion)
}
if f.Proto.GetOptions().GetDeprecated() {
g.P("// ", f.Desc.Path(), " is a deprecated file.")
} else {
g.P("// source: ", f.Desc.Path())
}
g.P()
}
func genImport(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo, imp protoreflect.FileImport) {
impFile, ok := gen.FilesByPath[imp.Path()]
if !ok {
return
}
if impFile.GoImportPath == f.GoImportPath {
// Don't generate imports or aliases for types in the same Go package.
return
}
// Generate imports for all non-weak dependencies, even if they are not
// referenced, because other code and tools depend on having the
// full transitive closure of protocol buffer types in the binary.
if !imp.IsWeak {
g.Import(impFile.GoImportPath)
}
if !imp.IsPublic {
return
}
// Generate public imports by generating the imported file, parsing it,
// and extracting every symbol that should receive a forwarding declaration.
impGen := GenerateFile(gen, impFile)
impGen.Skip()
b, err := impGen.Content()
if err != nil {
gen.Error(err)
return
}
fset := token.NewFileSet()
astFile, err := parser.ParseFile(fset, "", b, parser.ParseComments)
if err != nil {
gen.Error(err)
return
}
genForward := func(tok token.Token, name string, expr ast.Expr) {
// Don't import unexported symbols.
r, _ := utf8.DecodeRuneInString(name)
if !unicode.IsUpper(r) {
return
}
// Don't import the FileDescriptor.
if name == impFile.GoDescriptorIdent.GoName {
return
}
// Don't import decls referencing a symbol defined in another package.
// i.e., don't import decls which are themselves public imports:
//
// type T = somepackage.T
if _, ok := expr.(*ast.SelectorExpr); ok {
return
}
g.P(tok, " ", name, " = ", impFile.GoImportPath.Ident(name))
}
g.P("// Symbols defined in public import of ", imp.Path(), ".")
g.P()
for _, decl := range astFile.Decls {
switch decl := decl.(type) {
case *ast.GenDecl:
for _, spec := range decl.Specs {
switch spec := spec.(type) {
case *ast.TypeSpec:
genForward(decl.Tok, spec.Name.Name, spec.Type)
case *ast.ValueSpec:
for i, name := range spec.Names {
var expr ast.Expr
if i < len(spec.Values) {
expr = spec.Values[i]
}
genForward(decl.Tok, name.Name, expr)
}
case *ast.ImportSpec:
default:
panic(fmt.Sprintf("can't generate forward for spec type %T", spec))
}
}
}
}
g.P()
}
func genEnum(g *protogen.GeneratedFile, f *fileInfo, e *enumInfo) {
// Enum type declaration.
g.Annotate(e.GoIdent.GoName, e.Location)
leadingComments := appendDeprecationSuffix(e.Comments.Leading,
e.Desc.Options().(*descriptorpb.EnumOptions).GetDeprecated())
g.P(leadingComments,
"type ", e.GoIdent, " int32")
// Enum value constants.
g.P("const (")
for _, value := range e.Values {
g.Annotate(value.GoIdent.GoName, value.Location)
leadingComments := appendDeprecationSuffix(value.Comments.Leading,
value.Desc.Options().(*descriptorpb.EnumValueOptions).GetDeprecated())
g.P(leadingComments,
value.GoIdent, " ", e.GoIdent, " = ", value.Desc.Number(),
trailingComment(value.Comments.Trailing))
}
g.P(")")
g.P()
// Enum value maps.
g.P("// Enum value maps for ", e.GoIdent, ".")
g.P("var (")
g.P(e.GoIdent.GoName+"_name", " = map[int32]string{")
for _, value := range e.Values {
duplicate := ""
if value.Desc != e.Desc.Values().ByNumber(value.Desc.Number()) {
duplicate = "// Duplicate value: "
}
g.P(duplicate, value.Desc.Number(), ": ", strconv.Quote(string(value.Desc.Name())), ",")
}
g.P("}")
g.P(e.GoIdent.GoName+"_value", " = map[string]int32{")
for _, value := range e.Values {
g.P(strconv.Quote(string(value.Desc.Name())), ": ", value.Desc.Number(), ",")
}
g.P("}")
g.P(")")
g.P()
// Enum method.
//
// NOTE: A pointer value is needed to represent presence in proto2.
// Since a proto2 message can reference a proto3 enum, it is useful to
// always generate this method (even on proto3 enums) to support that case.
g.P("func (x ", e.GoIdent, ") Enum() *", e.GoIdent, " {")
g.P("p := new(", e.GoIdent, ")")
g.P("*p = x")
g.P("return p")
g.P("}")
g.P()
// String method.
g.P("func (x ", e.GoIdent, ") String() string {")
g.P("return ", protoimplPackage.Ident("X"), ".EnumStringOf(x.Descriptor(), ", protoreflectPackage.Ident("EnumNumber"), "(x))")
g.P("}")
g.P()
genEnumReflectMethods(g, f, e)
// UnmarshalJSON method.
if e.genJSONMethod && e.Desc.Syntax() == protoreflect.Proto2 {
g.P("// Deprecated: Do not use.")
g.P("func (x *", e.GoIdent, ") UnmarshalJSON(b []byte) error {")
g.P("num, err := ", protoimplPackage.Ident("X"), ".UnmarshalJSONEnum(x.Descriptor(), b)")
g.P("if err != nil {")
g.P("return err")
g.P("}")
g.P("*x = ", e.GoIdent, "(num)")
g.P("return nil")
g.P("}")
g.P()
}
// EnumDescriptor method.
if e.genRawDescMethod {
var indexes []string
for i := 1; i < len(e.Location.Path); i += 2 {
indexes = append(indexes, strconv.Itoa(int(e.Location.Path[i])))
}
g.P("// Deprecated: Use ", e.GoIdent, ".Descriptor instead.")
g.P("func (", e.GoIdent, ") EnumDescriptor() ([]byte, []int) {")
g.P("return ", rawDescVarName(f), "GZIP(), []int{", strings.Join(indexes, ","), "}")
g.P("}")
g.P()
f.needRawDesc = true
}
}
func genMessage(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) {
if m.Desc.IsMapEntry() {
return
}
// Message type declaration.
g.Annotate(m.GoIdent.GoName, m.Location)
leadingComments := appendDeprecationSuffix(m.Comments.Leading,
m.Desc.Options().(*descriptorpb.MessageOptions).GetDeprecated())
g.P(leadingComments,
"type ", m.GoIdent, " struct {")
genMessageFields(g, f, m)
g.P("}")
g.P()
genMessageDefaultDecls(g, f, m)
genMessageMethods(g, f, m)
genMessageOneofWrapperTypes(g, f, m)
}
func genMessageFields(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) {
sf := f.allMessageFieldsByPtr[m]
genMessageInternalFields(g, f, m, sf)
for _, field := range m.Fields {
genMessageField(g, f, m, field, sf)
}
}
func genMessageInternalFields(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo, sf *structFields) {
g.P(genname.State, " ", protoimplPackage.Ident("MessageState"))
sf.append(genname.State)
g.P(genname.SizeCache, " ", protoimplPackage.Ident("SizeCache"))
sf.append(genname.SizeCache)
if m.hasWeak {
g.P(genname.WeakFields, " ", protoimplPackage.Ident("WeakFields"))
sf.append(genname.WeakFields)
}
g.P(genname.UnknownFields, " ", protoimplPackage.Ident("UnknownFields"))
sf.append(genname.UnknownFields)
if m.Desc.ExtensionRanges().Len() > 0 {
g.P(genname.ExtensionFields, " ", protoimplPackage.Ident("ExtensionFields"))
sf.append(genname.ExtensionFields)
}
if sf.count > 0 {
g.P()
}
}
func genMessageField(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo, field *protogen.Field, sf *structFields) {
if oneof := field.Oneof; oneof != nil {
// It would be a bit simpler to iterate over the oneofs below,
// but generating the field here keeps the contents of the Go
// struct in the same order as the contents of the source
// .proto file.
if oneof.Fields[0] != field {
return // only generate for first appearance
}
tags := structTags{
{"protobuf_oneof", string(oneof.Desc.Name())},
}
if m.isTracked {
tags = append(tags, gotrackTags...)
}
g.Annotate(m.GoIdent.GoName+"."+oneof.GoName, oneof.Location)
leadingComments := oneof.Comments.Leading
if leadingComments != "" {
leadingComments += "\n"
}
ss := []string{fmt.Sprintf(" Types that are assignable to %s:\n", oneof.GoName)}
for _, field := range oneof.Fields {
ss = append(ss, "\t*"+field.GoIdent.GoName+"\n")
}
leadingComments += protogen.Comments(strings.Join(ss, ""))
g.P(leadingComments,
oneof.GoName, " ", oneofInterfaceName(oneof), tags)
sf.append(oneof.GoName)
return
}
goType, pointer := fieldGoType(g, f, field)
if pointer {
goType = "*" + goType
}
tags := structTags{
{"protobuf", fieldProtobufTagValue(field)},
{"json", fieldJSONTagValue(field)},
}
if field.Desc.IsMap() {
key := field.Message.Fields[0]
val := field.Message.Fields[1]
tags = append(tags, structTags{
{"protobuf_key", fieldProtobufTagValue(key)},
{"protobuf_val", fieldProtobufTagValue(val)},
}...)
}
if m.isTracked {
tags = append(tags, gotrackTags...)
}
name := field.GoName
if field.Desc.IsWeak() {
name = genname.WeakFieldPrefix + name
}
g.Annotate(m.GoIdent.GoName+"."+name, field.Location)
leadingComments := appendDeprecationSuffix(field.Comments.Leading,
field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated())
g.P(leadingComments,
name, " ", goType, tags,
trailingComment(field.Comments.Trailing))
sf.append(field.GoName)
}
// genMessageDefaultDecls generates consts and vars holding the default
// values of fields.
func genMessageDefaultDecls(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) {
var consts, vars []string
for _, field := range m.Fields {
if !field.Desc.HasDefault() {
continue
}
name := "Default_" + m.GoIdent.GoName + "_" + field.GoName
goType, _ := fieldGoType(g, f, field)
defVal := field.Desc.Default()
switch field.Desc.Kind() {
case protoreflect.StringKind:
consts = append(consts, fmt.Sprintf("%s = %s(%q)", name, goType, defVal.String()))
case protoreflect.BytesKind:
vars = append(vars, fmt.Sprintf("%s = %s(%q)", name, goType, defVal.Bytes()))
case protoreflect.EnumKind:
idx := field.Desc.DefaultEnumValue().Index()
val := field.Enum.Values[idx]
consts = append(consts, fmt.Sprintf("%s = %s", name, g.QualifiedGoIdent(val.GoIdent)))
case protoreflect.FloatKind, protoreflect.DoubleKind:
if f := defVal.Float(); math.IsNaN(f) || math.IsInf(f, 0) {
var fn, arg string
switch f := defVal.Float(); {
case math.IsInf(f, -1):
fn, arg = g.QualifiedGoIdent(mathPackage.Ident("Inf")), "-1"
case math.IsInf(f, +1):
fn, arg = g.QualifiedGoIdent(mathPackage.Ident("Inf")), "+1"
case math.IsNaN(f):
fn, arg = g.QualifiedGoIdent(mathPackage.Ident("NaN")), ""
}
vars = append(vars, fmt.Sprintf("%s = %s(%s(%s))", name, goType, fn, arg))
} else {
consts = append(consts, fmt.Sprintf("%s = %s(%v)", name, goType, f))
}
default:
consts = append(consts, fmt.Sprintf("%s = %s(%v)", name, goType, defVal.Interface()))
}
}
if len(consts) > 0 {
g.P("// Default values for ", m.GoIdent, " fields.")
g.P("const (")
for _, s := range consts {
g.P(s)
}
g.P(")")
}
if len(vars) > 0 {
g.P("// Default values for ", m.GoIdent, " fields.")
g.P("var (")
for _, s := range vars {
g.P(s)
}
g.P(")")
}
g.P()
}
func genMessageMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) {
genMessageBaseMethods(g, f, m)
genMessageGetterMethods(g, f, m)
genMessageSetterMethods(g, f, m)
}
func genMessageBaseMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) {
// Reset method.
g.P("func (x *", m.GoIdent, ") Reset() {")
g.P("*x = ", m.GoIdent, "{}")
g.P("if ", protoimplPackage.Ident("UnsafeEnabled"), " {")
g.P("mi := &", messageTypesVarName(f), "[", f.allMessagesByPtr[m], "]")
g.P("ms := ", protoimplPackage.Ident("X"), ".MessageStateOf(", protoimplPackage.Ident("Pointer"), "(x))")
g.P("ms.StoreMessageInfo(mi)")
g.P("}")
g.P("}")
g.P()
// String method.
g.P("func (x *", m.GoIdent, ") String() string {")
g.P("return ", protoimplPackage.Ident("X"), ".MessageStringOf(x)")
g.P("}")
g.P()
// ProtoMessage method.
g.P("func (*", m.GoIdent, ") ProtoMessage() {}")
g.P()
// ProtoReflect method.
genMessageReflectMethods(g, f, m)
// Descriptor method.
if m.genRawDescMethod {
var indexes []string
for i := 1; i < len(m.Location.Path); i += 2 {
indexes = append(indexes, strconv.Itoa(int(m.Location.Path[i])))
}
g.P("// Deprecated: Use ", m.GoIdent, ".ProtoReflect.Descriptor instead.")
g.P("func (*", m.GoIdent, ") Descriptor() ([]byte, []int) {")
g.P("return ", rawDescVarName(f), "GZIP(), []int{", strings.Join(indexes, ","), "}")
g.P("}")
g.P()
f.needRawDesc = true
}
// ExtensionRangeArray method.
extRanges := m.Desc.ExtensionRanges()
if m.genExtRangeMethod && extRanges.Len() > 0 {
protoExtRange := protoifacePackage.Ident("ExtensionRangeV1")
extRangeVar := "extRange_" + m.GoIdent.GoName
g.P("var ", extRangeVar, " = []", protoExtRange, " {")
for i := 0; i < extRanges.Len(); i++ {
r := extRanges.Get(i)
g.P("{Start:", r[0], ", End:", r[1]-1 /* inclusive */, "},")
}
g.P("}")
g.P()
g.P("// Deprecated: Use ", m.GoIdent, ".ProtoReflect.Descriptor.ExtensionRanges instead.")
g.P("func (*", m.GoIdent, ") ExtensionRangeArray() []", protoExtRange, " {")
g.P("return ", extRangeVar)
g.P("}")
g.P()
}
}
func genMessageGetterMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) {
for _, field := range m.Fields {
genNoInterfacePragma(g, m.isTracked)
// Getter for parent oneof.
if oneof := field.Oneof; oneof != nil && oneof.Fields[0] == field {
g.Annotate(m.GoIdent.GoName+".Get"+oneof.GoName, oneof.Location)
g.P("func (m *", m.GoIdent.GoName, ") Get", oneof.GoName, "() ", oneofInterfaceName(oneof), " {")
g.P("if m != nil {")
g.P("return m.", oneof.GoName)
g.P("}")
g.P("return nil")
g.P("}")
g.P()
}
// Getter for message field.
goType, pointer := fieldGoType(g, f, field)
defaultValue := fieldDefaultValue(g, m, field)
g.Annotate(m.GoIdent.GoName+".Get"+field.GoName, field.Location)
leadingComments := appendDeprecationSuffix("",
field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated())
switch {
case field.Desc.IsWeak():
g.P(leadingComments, "func (x *", m.GoIdent, ") Get", field.GoName, "() ", protoifacePackage.Ident("MessageV1"), "{")
g.P("if x != nil {")
g.P("v := x.", genname.WeakFields, "[", field.Desc.Number(), "]")
g.P("_ = x.", genname.WeakFieldPrefix+field.GoName) // for field-tracking
g.P("if v != nil {")
g.P("return v")
g.P("}")
g.P("}")
g.P("return ", protoimplPackage.Ident("X"), ".WeakNil(", strconv.Quote(string(field.Message.Desc.FullName())), ")")
g.P("}")
case field.Oneof != nil:
g.P(leadingComments, "func (x *", m.GoIdent, ") Get", field.GoName, "() ", goType, " {")
g.P("if x, ok := x.Get", field.Oneof.GoName, "().(*", field.GoIdent, "); ok {")
g.P("return x.", field.GoName)
g.P("}")
g.P("return ", defaultValue)
g.P("}")
default:
g.P(leadingComments, "func (x *", m.GoIdent, ") Get", field.GoName, "() ", goType, " {")
if field.Desc.Syntax() == protoreflect.Proto3 || defaultValue == "nil" {
g.P("if x != nil {")
} else {
g.P("if x != nil && x.", field.GoName, " != nil {")
}
star := ""
if pointer {
star = "*"
}
g.P("return ", star, " x.", field.GoName)
g.P("}")
g.P("return ", defaultValue)
g.P("}")
}
g.P()
}
}
func genMessageSetterMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) {
for _, field := range m.Fields {
if !field.Desc.IsWeak() {
continue
}
genNoInterfacePragma(g, m.isTracked)
g.Annotate(m.GoIdent.GoName+".Set"+field.GoName, field.Location)
leadingComments := appendDeprecationSuffix("",
field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated())
g.P(leadingComments, "func (x *", m.GoIdent, ") Set", field.GoName, "(v ", protoifacePackage.Ident("MessageV1"), ") {")
g.P("if x.", genname.WeakFields, " == nil {")
g.P("x.", genname.WeakFields, " = make(", protoimplPackage.Ident("WeakFields"), ")")
g.P("}")
g.P("if v == nil {")
g.P("delete(x.", genname.WeakFields, ", ", field.Desc.Number(), ")")
g.P("} else {")
g.P("x.", genname.WeakFields, "[", field.Desc.Number(), "] = v")
g.P("x.", genname.WeakFieldPrefix+field.GoName, " = struct{}{}") // for field-tracking
g.P("}")
g.P("}")
g.P()
}
}
// fieldGoType returns the Go type used for a field.
//
// If it returns pointer=true, the struct field is a pointer to the type.
func fieldGoType(g *protogen.GeneratedFile, f *fileInfo, field *protogen.Field) (goType string, pointer bool) {
if field.Desc.IsWeak() {
return "struct{}", false
}
pointer = true
switch field.Desc.Kind() {
case protoreflect.BoolKind:
goType = "bool"
case protoreflect.EnumKind:
goType = g.QualifiedGoIdent(field.Enum.GoIdent)
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
goType = "int32"
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
goType = "uint32"
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
goType = "int64"
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
goType = "uint64"
case protoreflect.FloatKind:
goType = "float32"
case protoreflect.DoubleKind:
goType = "float64"
case protoreflect.StringKind:
goType = "string"
case protoreflect.BytesKind:
goType = "[]byte"
pointer = false
case protoreflect.MessageKind, protoreflect.GroupKind:
goType = "*" + g.QualifiedGoIdent(field.Message.GoIdent)
pointer = false
}
switch {
case field.Desc.IsList():
goType = "[]" + goType
pointer = false
case field.Desc.IsMap():
keyType, _ := fieldGoType(g, f, field.Message.Fields[0])
valType, _ := fieldGoType(g, f, field.Message.Fields[1])
return fmt.Sprintf("map[%v]%v", keyType, valType), false
}
// Extension fields always have pointer type, even when defined in a proto3 file.
if field.Desc.Syntax() == protoreflect.Proto3 && !field.Desc.IsExtension() {
pointer = false
}
return goType, pointer
}
func fieldProtobufTagValue(field *protogen.Field) string {
var enumName string
if field.Desc.Kind() == protoreflect.EnumKind {
enumName = protoimpl.X.LegacyEnumName(field.Enum.Desc)
}
return tag.Marshal(field.Desc, enumName)
}
func fieldDefaultValue(g *protogen.GeneratedFile, m *messageInfo, field *protogen.Field) string {
if field.Desc.IsList() {
return "nil"
}
if field.Desc.HasDefault() {
defVarName := "Default_" + m.GoIdent.GoName + "_" + field.GoName
if field.Desc.Kind() == protoreflect.BytesKind {
return "append([]byte(nil), " + defVarName + "...)"
}
return defVarName
}
switch field.Desc.Kind() {
case protoreflect.BoolKind:
return "false"
case protoreflect.StringKind:
return `""`
case protoreflect.MessageKind, protoreflect.GroupKind, protoreflect.BytesKind:
return "nil"
case protoreflect.EnumKind:
return g.QualifiedGoIdent(field.Enum.Values[0].GoIdent)
default:
return "0"
}
}
func fieldJSONTagValue(field *protogen.Field) string {
return string(field.Desc.Name()) + ",omitempty"
}
func genExtensions(g *protogen.GeneratedFile, f *fileInfo) {
if len(f.allExtensions) == 0 {
return
}
g.P("var ", extensionTypesVarName(f), " = []", protoimplPackage.Ident("ExtensionInfo"), "{")
for _, x := range f.allExtensions {
// For MessageSet extensions, the name used is the parent message.
name := x.Desc.FullName()
if messageset.IsMessageSetExtension(x.Desc) {
name = name.Parent()
}
g.P("{")
g.P("ExtendedType: (*", x.Extendee.GoIdent, ")(nil),")
goType, pointer := fieldGoType(g, f, x.Extension)
if pointer {
goType = "*" + goType
}
g.P("ExtensionType: (", goType, ")(nil),")
g.P("Field: ", x.Desc.Number(), ",")
g.P("Name: ", strconv.Quote(string(name)), ",")
g.P("Tag: ", strconv.Quote(fieldProtobufTagValue(x.Extension)), ",")
g.P("Filename: ", strconv.Quote(f.Desc.Path()), ",")
g.P("},")
}
g.P("}")
g.P()
// Group extensions by the target message.
var orderedTargets []protogen.GoIdent
allExtensionsByTarget := make(map[protogen.GoIdent][]*extensionInfo)
allExtensionsByPtr := make(map[*extensionInfo]int)
for i, x := range f.allExtensions {
target := x.Extendee.GoIdent
if len(allExtensionsByTarget[target]) == 0 {
orderedTargets = append(orderedTargets, target)
}
allExtensionsByTarget[target] = append(allExtensionsByTarget[target], x)
allExtensionsByPtr[x] = i
}
for _, target := range orderedTargets {
g.P("// Extension fields to ", target, ".")
g.P("var (")
for _, x := range allExtensionsByTarget[target] {
xd := x.Desc
typeName := xd.Kind().String()
switch xd.Kind() {
case protoreflect.EnumKind:
typeName = string(xd.Enum().FullName())
case protoreflect.MessageKind, protoreflect.GroupKind:
typeName = string(xd.Message().FullName())
}
fieldName := string(xd.Name())
leadingComments := x.Comments.Leading
if leadingComments != "" {
leadingComments += "\n"
}
leadingComments += protogen.Comments(fmt.Sprintf(" %v %v %v = %v;\n",
xd.Cardinality(), typeName, fieldName, xd.Number()))
leadingComments = appendDeprecationSuffix(leadingComments,
x.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated())
g.P(leadingComments,
"E_", x.GoIdent, " = &", extensionTypesVarName(f), "[", allExtensionsByPtr[x], "]",
trailingComment(x.Comments.Trailing))
}
g.P(")")
g.P()
}
}
// genMessageOneofWrapperTypes generates the oneof wrapper types and
// associates the types with the parent message type.
func genMessageOneofWrapperTypes(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) {
for _, oneof := range m.Oneofs {
ifName := oneofInterfaceName(oneof)
g.P("type ", ifName, " interface {")
g.P(ifName, "()")
g.P("}")
g.P()
for _, field := range oneof.Fields {
g.Annotate(field.GoIdent.GoName, field.Location)
g.Annotate(field.GoIdent.GoName+"."+field.GoName, field.Location)
g.P("type ", field.GoIdent, " struct {")
goType, _ := fieldGoType(g, f, field)
tags := structTags{
{"protobuf", fieldProtobufTagValue(field)},
}
if m.isTracked {
tags = append(tags, gotrackTags...)
}
leadingComments := appendDeprecationSuffix(field.Comments.Leading,
field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated())
g.P(leadingComments,
field.GoName, " ", goType, tags,
trailingComment(field.Comments.Trailing))
g.P("}")
g.P()
}
for _, field := range oneof.Fields {
g.P("func (*", field.GoIdent, ") ", ifName, "() {}")
g.P()
}
}
}
// oneofInterfaceName returns the name of the interface type implemented by
// the oneof field value types.
func oneofInterfaceName(oneof *protogen.Oneof) string {
return "is" + oneof.GoIdent.GoName
}
// genNoInterfacePragma generates a standalone "nointerface" pragma to
// decorate methods with field-tracking support.
func genNoInterfacePragma(g *protogen.GeneratedFile, tracked bool) {
if tracked {
g.P("//go:nointerface")
g.P()
}
}
var gotrackTags = structTags{{"go", "track"}}
// structTags is a data structure for build idiomatic Go struct tags.
// Each [2]string is a key-value pair, where value is the unescaped string.
//
// Example: structTags{{"key", "value"}}.String() -> `key:"value"`
type structTags [][2]string
func (tags structTags) String() string {
if len(tags) == 0 {
return ""
}
var ss []string
for _, tag := range tags {
// NOTE: When quoting the value, we need to make sure the backtick
// character does not appear. Convert all cases to the escaped hex form.
key := tag[0]
val := strings.Replace(strconv.Quote(tag[1]), "`", `\x60`, -1)
ss = append(ss, fmt.Sprintf("%s:%s", key, val))
}
return "`" + strings.Join(ss, " ") + "`"
}
// appendDeprecationSuffix optionally appends a deprecation notice as a suffix.
func appendDeprecationSuffix(prefix protogen.Comments, deprecated bool) protogen.Comments {
if !deprecated {
return prefix
}
if prefix != "" {
prefix += "\n"
}
return prefix + " Deprecated: Do not use.\n"
}
// trailingComment is like protogen.Comments, but lacks a trailing newline.
type trailingComment protogen.Comments
func (c trailingComment) String() string {
s := strings.TrimSuffix(protogen.Comments(c).String(), "\n")
if strings.Contains(s, "\n") {
// We don't support multi-lined trailing comments as it is unclear
// how to best render them in the generated code.
return ""
}
return s
}

View file

@ -0,0 +1,349 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package internal_gengo
import (
"fmt"
"math"
"strings"
"unicode/utf8"
"google.golang.org/protobuf/compiler/protogen"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/types/descriptorpb"
)
func genReflectFileDescriptor(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo) {
g.P("var ", f.GoDescriptorIdent, " ", protoreflectPackage.Ident("FileDescriptor"))
g.P()
genFileDescriptor(gen, g, f)
if len(f.allEnums) > 0 {
g.P("var ", enumTypesVarName(f), " = make([]", protoimplPackage.Ident("EnumInfo"), ",", len(f.allEnums), ")")
}
if len(f.allMessages) > 0 {
g.P("var ", messageTypesVarName(f), " = make([]", protoimplPackage.Ident("MessageInfo"), ",", len(f.allMessages), ")")
}
// Generate a unique list of Go types for all declarations and dependencies,
// and the associated index into the type list for all dependencies.
var goTypes []string
var depIdxs []string
seen := map[protoreflect.FullName]int{}
genDep := func(name protoreflect.FullName, depSource string) {
if depSource != "" {
line := fmt.Sprintf("%d, // %d: %s -> %s", seen[name], len(depIdxs), depSource, name)
depIdxs = append(depIdxs, line)
}
}
genEnum := func(e *protogen.Enum, depSource string) {
if e != nil {
name := e.Desc.FullName()
if _, ok := seen[name]; !ok {
line := fmt.Sprintf("(%s)(0), // %d: %s", g.QualifiedGoIdent(e.GoIdent), len(goTypes), name)
goTypes = append(goTypes, line)
seen[name] = len(seen)
}
if depSource != "" {
genDep(name, depSource)
}
}
}
genMessage := func(m *protogen.Message, depSource string) {
if m != nil {
name := m.Desc.FullName()
if _, ok := seen[name]; !ok {
line := fmt.Sprintf("(*%s)(nil), // %d: %s", g.QualifiedGoIdent(m.GoIdent), len(goTypes), name)
if m.Desc.IsMapEntry() {
// Map entry messages have no associated Go type.
line = fmt.Sprintf("nil, // %d: %s", len(goTypes), name)
}
goTypes = append(goTypes, line)
seen[name] = len(seen)
}
if depSource != "" {
genDep(name, depSource)
}
}
}
// This ordering is significant.
// See filetype.TypeBuilder.DependencyIndexes.
type offsetEntry struct {
start int
name string
}
var depOffsets []offsetEntry
for _, enum := range f.allEnums {
genEnum(enum.Enum, "")
}
for _, message := range f.allMessages {
genMessage(message.Message, "")
}
depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "field type_name"})
for _, message := range f.allMessages {
for _, field := range message.Fields {
if field.Desc.IsWeak() {
continue
}
source := string(field.Desc.FullName())
genEnum(field.Enum, source+":type_name")
genMessage(field.Message, source+":type_name")
}
}
depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "extension extendee"})
for _, extension := range f.allExtensions {
source := string(extension.Desc.FullName())
genMessage(extension.Extendee, source+":extendee")
}
depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "extension type_name"})
for _, extension := range f.allExtensions {
source := string(extension.Desc.FullName())
genEnum(extension.Enum, source+":type_name")
genMessage(extension.Message, source+":type_name")
}
depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "method input_type"})
for _, service := range f.Services {
for _, method := range service.Methods {
source := string(method.Desc.FullName())
genMessage(method.Input, source+":input_type")
}
}
depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "method output_type"})
for _, service := range f.Services {
for _, method := range service.Methods {
source := string(method.Desc.FullName())
genMessage(method.Output, source+":output_type")
}
}
depOffsets = append(depOffsets, offsetEntry{len(depIdxs), ""})
for i := len(depOffsets) - 2; i >= 0; i-- {
curr, next := depOffsets[i], depOffsets[i+1]
depIdxs = append(depIdxs, fmt.Sprintf("%d, // [%d:%d] is the sub-list for %s",
curr.start, curr.start, next.start, curr.name))
}
if len(depIdxs) > math.MaxInt32 {
panic("too many dependencies") // sanity check
}
g.P("var ", goTypesVarName(f), " = []interface{}{")
for _, s := range goTypes {
g.P(s)
}
g.P("}")
g.P("var ", depIdxsVarName(f), " = []int32{")
for _, s := range depIdxs {
g.P(s)
}
g.P("}")
g.P("func init() { ", initFuncName(f.File), "() }")
g.P("func ", initFuncName(f.File), "() {")
g.P("if ", f.GoDescriptorIdent, " != nil {")
g.P("return")
g.P("}")
// Ensure that initialization functions for different files in the same Go
// package run in the correct order: Call the init funcs for every .proto file
// imported by this one that is in the same Go package.
for i, imps := 0, f.Desc.Imports(); i < imps.Len(); i++ {
impFile := gen.FilesByPath[imps.Get(i).Path()]
if impFile.GoImportPath != f.GoImportPath {
continue
}
g.P(initFuncName(impFile), "()")
}
if len(f.allMessages) > 0 {
// Populate MessageInfo.Exporters.
g.P("if !", protoimplPackage.Ident("UnsafeEnabled"), " {")
for _, message := range f.allMessages {
if sf := f.allMessageFieldsByPtr[message]; len(sf.unexported) > 0 {
idx := f.allMessagesByPtr[message]
typesVar := messageTypesVarName(f)
g.P(typesVar, "[", idx, "].Exporter = func(v interface{}, i int) interface{} {")
g.P("switch v := v.(*", message.GoIdent, "); i {")
for i := 0; i < sf.count; i++ {
if name := sf.unexported[i]; name != "" {
g.P("case ", i, ": return &v.", name)
}
}
g.P("default: return nil")
g.P("}")
g.P("}")
}
}
g.P("}")
// Populate MessageInfo.OneofWrappers.
for _, message := range f.allMessages {
if len(message.Oneofs) > 0 {
idx := f.allMessagesByPtr[message]
typesVar := messageTypesVarName(f)
// Associate the wrapper types by directly passing them to the MessageInfo.
g.P(typesVar, "[", idx, "].OneofWrappers = []interface{} {")
for _, oneof := range message.Oneofs {
for _, field := range oneof.Fields {
g.P("(*", field.GoIdent, ")(nil),")
}
}
g.P("}")
}
}
}
g.P("type x struct{}")
g.P("out := ", protoimplPackage.Ident("TypeBuilder"), "{")
g.P("File: ", protoimplPackage.Ident("DescBuilder"), "{")
g.P("GoPackagePath: ", reflectPackage.Ident("TypeOf"), "(x{}).PkgPath(),")
g.P("RawDescriptor: ", rawDescVarName(f), ",")
g.P("NumEnums: ", len(f.allEnums), ",")
g.P("NumMessages: ", len(f.allMessages), ",")
g.P("NumExtensions: ", len(f.allExtensions), ",")
g.P("NumServices: ", len(f.Services), ",")
g.P("},")
g.P("GoTypes: ", goTypesVarName(f), ",")
g.P("DependencyIndexes: ", depIdxsVarName(f), ",")
if len(f.allEnums) > 0 {
g.P("EnumInfos: ", enumTypesVarName(f), ",")
}
if len(f.allMessages) > 0 {
g.P("MessageInfos: ", messageTypesVarName(f), ",")
}
if len(f.allExtensions) > 0 {
g.P("ExtensionInfos: ", extensionTypesVarName(f), ",")
}
g.P("}.Build()")
g.P(f.GoDescriptorIdent, " = out.File")
// Set inputs to nil to allow GC to reclaim resources.
g.P(rawDescVarName(f), " = nil")
g.P(goTypesVarName(f), " = nil")
g.P(depIdxsVarName(f), " = nil")
g.P("}")
}
func genFileDescriptor(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo) {
descProto := proto.Clone(f.Proto).(*descriptorpb.FileDescriptorProto)
descProto.SourceCodeInfo = nil // drop source code information
b, err := proto.MarshalOptions{AllowPartial: true, Deterministic: true}.Marshal(descProto)
if err != nil {
gen.Error(err)
return
}
g.P("var ", rawDescVarName(f), " = []byte{")
for len(b) > 0 {
n := 16
if n > len(b) {
n = len(b)
}
s := ""
for _, c := range b[:n] {
s += fmt.Sprintf("0x%02x,", c)
}
g.P(s)
b = b[n:]
}
g.P("}")
g.P()
if f.needRawDesc {
onceVar := rawDescVarName(f) + "Once"
dataVar := rawDescVarName(f) + "Data"
g.P("var (")
g.P(onceVar, " ", syncPackage.Ident("Once"))
g.P(dataVar, " = ", rawDescVarName(f))
g.P(")")
g.P()
g.P("func ", rawDescVarName(f), "GZIP() []byte {")
g.P(onceVar, ".Do(func() {")
g.P(dataVar, " = ", protoimplPackage.Ident("X"), ".CompressGZIP(", dataVar, ")")
g.P("})")
g.P("return ", dataVar)
g.P("}")
g.P()
}
}
func genEnumReflectMethods(g *protogen.GeneratedFile, f *fileInfo, e *enumInfo) {
idx := f.allEnumsByPtr[e]
typesVar := enumTypesVarName(f)
// Descriptor method.
g.P("func (", e.GoIdent, ") Descriptor() ", protoreflectPackage.Ident("EnumDescriptor"), " {")
g.P("return ", typesVar, "[", idx, "].Descriptor()")
g.P("}")
g.P()
// Type method.
g.P("func (", e.GoIdent, ") Type() ", protoreflectPackage.Ident("EnumType"), " {")
g.P("return &", typesVar, "[", idx, "]")
g.P("}")
g.P()
// Number method.
g.P("func (x ", e.GoIdent, ") Number() ", protoreflectPackage.Ident("EnumNumber"), " {")
g.P("return ", protoreflectPackage.Ident("EnumNumber"), "(x)")
g.P("}")
g.P()
}
func genMessageReflectMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) {
idx := f.allMessagesByPtr[m]
typesVar := messageTypesVarName(f)
// ProtoReflect method.
g.P("func (x *", m.GoIdent, ") ProtoReflect() ", protoreflectPackage.Ident("Message"), " {")
g.P("mi := &", typesVar, "[", idx, "]")
g.P("if ", protoimplPackage.Ident("UnsafeEnabled"), " && x != nil {")
g.P("ms := ", protoimplPackage.Ident("X"), ".MessageStateOf(", protoimplPackage.Ident("Pointer"), "(x))")
g.P("if ms.LoadMessageInfo() == nil {")
g.P("ms.StoreMessageInfo(mi)")
g.P("}")
g.P("return ms")
g.P("}")
g.P("return mi.MessageOf(x)")
g.P("}")
g.P()
}
func fileVarName(f *protogen.File, suffix string) string {
prefix := f.GoDescriptorIdent.GoName
_, n := utf8.DecodeRuneInString(prefix)
prefix = strings.ToLower(prefix[:n]) + prefix[n:]
return prefix + "_" + suffix
}
func rawDescVarName(f *fileInfo) string {
return fileVarName(f.File, "rawDesc")
}
func goTypesVarName(f *fileInfo) string {
return fileVarName(f.File, "goTypes")
}
func depIdxsVarName(f *fileInfo) string {
return fileVarName(f.File, "depIdxs")
}
func enumTypesVarName(f *fileInfo) string {
return fileVarName(f.File, "enumTypes")
}
func messageTypesVarName(f *fileInfo) string {
return fileVarName(f.File, "msgTypes")
}
func extensionTypesVarName(f *fileInfo) string {
return fileVarName(f.File, "extTypes")
}
func initFuncName(f *protogen.File) string {
return fileVarName(f, "init")
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,788 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package prototext
import (
"fmt"
"strings"
"unicode/utf8"
"google.golang.org/protobuf/internal/encoding/messageset"
"google.golang.org/protobuf/internal/encoding/text"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/internal/fieldnum"
"google.golang.org/protobuf/internal/flags"
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/internal/set"
"google.golang.org/protobuf/proto"
pref "google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
// Unmarshal reads the given []byte into the given proto.Message.
func Unmarshal(b []byte, m proto.Message) error {
return UnmarshalOptions{}.Unmarshal(b, m)
}
// UnmarshalOptions is a configurable textproto format unmarshaler.
type UnmarshalOptions struct {
pragma.NoUnkeyedLiterals
// AllowPartial accepts input for messages that will result in missing
// required fields. If AllowPartial is false (the default), Unmarshal will
// return error if there are any missing required fields.
AllowPartial bool
// DiscardUnknown specifies whether to ignore unknown fields when parsing.
// An unknown field is any field whose field name or field number does not
// resolve to any known or extension field in the message.
// By default, unmarshal rejects unknown fields as an error.
DiscardUnknown bool
// Resolver is used for looking up types when unmarshaling
// google.protobuf.Any messages or extension fields.
// If nil, this defaults to using protoregistry.GlobalTypes.
Resolver interface {
protoregistry.MessageTypeResolver
protoregistry.ExtensionTypeResolver
}
}
// Unmarshal reads the given []byte and populates the given proto.Message using options in
// UnmarshalOptions object.
func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error {
proto.Reset(m)
if o.Resolver == nil {
o.Resolver = protoregistry.GlobalTypes
}
dec := decoder{text.NewDecoder(b), o}
if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil {
return err
}
if o.AllowPartial {
return nil
}
return proto.CheckInitialized(m)
}
type decoder struct {
*text.Decoder
opts UnmarshalOptions
}
// newError returns an error object with position info.
func (d decoder) newError(pos int, f string, x ...interface{}) error {
line, column := d.Position(pos)
head := fmt.Sprintf("(line %d:%d): ", line, column)
return errors.New(head+f, x...)
}
// unexpectedTokenError returns a syntax error for the given unexpected token.
func (d decoder) unexpectedTokenError(tok text.Token) error {
return d.syntaxError(tok.Pos(), "unexpected token: %s", tok.RawString())
}
// syntaxError returns a syntax error for given position.
func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
line, column := d.Position(pos)
head := fmt.Sprintf("syntax error (line %d:%d): ", line, column)
return errors.New(head+f, x...)
}
// unmarshalMessage unmarshals into the given protoreflect.Message.
func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error {
messageDesc := m.Descriptor()
if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) {
return errors.New("no support for proto1 MessageSets")
}
if messageDesc.FullName() == "google.protobuf.Any" {
return d.unmarshalAny(m, checkDelims)
}
if checkDelims {
tok, err := d.Read()
if err != nil {
return err
}
if tok.Kind() != text.MessageOpen {
return d.unexpectedTokenError(tok)
}
}
var seenNums set.Ints
var seenOneofs set.Ints
fieldDescs := messageDesc.Fields()
for {
// Read field name.
tok, err := d.Read()
if err != nil {
return err
}
switch typ := tok.Kind(); typ {
case text.Name:
// Continue below.
case text.EOF:
if checkDelims {
return text.ErrUnexpectedEOF
}
return nil
default:
if checkDelims && typ == text.MessageClose {
return nil
}
return d.unexpectedTokenError(tok)
}
// Resolve the field descriptor.
var name pref.Name
var fd pref.FieldDescriptor
var xt pref.ExtensionType
var xtErr error
var isFieldNumberName bool
switch tok.NameKind() {
case text.IdentName:
name = pref.Name(tok.IdentName())
fd = fieldDescs.ByName(name)
if fd == nil {
// The proto name of a group field is in all lowercase,
// while the textproto field name is the group message name.
gd := fieldDescs.ByName(pref.Name(strings.ToLower(string(name))))
if gd != nil && gd.Kind() == pref.GroupKind && gd.Message().Name() == name {
fd = gd
}
} else if fd.Kind() == pref.GroupKind && fd.Message().Name() != name {
fd = nil // reset since field name is actually the message name
}
case text.TypeName:
// Handle extensions only. This code path is not for Any.
xt, xtErr = d.findExtension(pref.FullName(tok.TypeName()))
case text.FieldNumber:
isFieldNumberName = true
num := pref.FieldNumber(tok.FieldNumber())
if !num.IsValid() {
return d.newError(tok.Pos(), "invalid field number: %d", num)
}
fd = fieldDescs.ByNumber(num)
if fd == nil {
xt, xtErr = d.opts.Resolver.FindExtensionByNumber(messageDesc.FullName(), num)
}
}
if xt != nil {
fd = xt.TypeDescriptor()
if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() {
return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName())
}
} else if xtErr != nil && xtErr != protoregistry.NotFound {
return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr)
}
if flags.ProtoLegacy {
if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() {
fd = nil // reset since the weak reference is not linked in
}
}
// Handle unknown fields.
if fd == nil {
if d.opts.DiscardUnknown || messageDesc.ReservedNames().Has(name) {
d.skipValue()
continue
}
return d.newError(tok.Pos(), "unknown field: %v", tok.RawString())
}
// Handle fields identified by field number.
if isFieldNumberName {
// TODO: Add an option to permit parsing field numbers.
//
// This requires careful thought as the MarshalOptions.EmitUnknown
// option allows formatting unknown fields as the field number and the
// best-effort textual representation of the field value. In that case,
// it may not be possible to unmarshal the value from a parser that does
// have information about the unknown field.
return d.newError(tok.Pos(), "cannot specify field by number: %v", tok.RawString())
}
switch {
case fd.IsList():
kind := fd.Kind()
if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() {
return d.syntaxError(tok.Pos(), "missing field separator :")
}
list := m.Mutable(fd).List()
if err := d.unmarshalList(fd, list); err != nil {
return err
}
case fd.IsMap():
mmap := m.Mutable(fd).Map()
if err := d.unmarshalMap(fd, mmap); err != nil {
return err
}
default:
kind := fd.Kind()
if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() {
return d.syntaxError(tok.Pos(), "missing field separator :")
}
// If field is a oneof, check if it has already been set.
if od := fd.ContainingOneof(); od != nil {
idx := uint64(od.Index())
if seenOneofs.Has(idx) {
return d.newError(tok.Pos(), "error parsing %q, oneof %v is already set", tok.RawString(), od.FullName())
}
seenOneofs.Set(idx)
}
num := uint64(fd.Number())
if seenNums.Has(num) {
return d.newError(tok.Pos(), "non-repeated field %q is repeated", tok.RawString())
}
if err := d.unmarshalSingular(fd, m); err != nil {
return err
}
seenNums.Set(num)
}
}
return nil
}
// findExtension returns protoreflect.ExtensionType from the Resolver if found.
func (d decoder) findExtension(xtName pref.FullName) (pref.ExtensionType, error) {
xt, err := d.opts.Resolver.FindExtensionByName(xtName)
if err == nil {
return xt, nil
}
return messageset.FindMessageSetExtension(d.opts.Resolver, xtName)
}
// unmarshalSingular unmarshals a non-repeated field value specified by the
// given FieldDescriptor.
func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) error {
var val pref.Value
var err error
switch fd.Kind() {
case pref.MessageKind, pref.GroupKind:
val = m.NewField(fd)
err = d.unmarshalMessage(val.Message(), true)
default:
val, err = d.unmarshalScalar(fd)
}
if err == nil {
m.Set(fd, val)
}
return err
}
// unmarshalScalar unmarshals a scalar/enum protoreflect.Value specified by the
// given FieldDescriptor.
func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) {
tok, err := d.Read()
if err != nil {
return pref.Value{}, err
}
if tok.Kind() != text.Scalar {
return pref.Value{}, d.unexpectedTokenError(tok)
}
kind := fd.Kind()
switch kind {
case pref.BoolKind:
if b, ok := tok.Bool(); ok {
return pref.ValueOfBool(b), nil
}
case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
if n, ok := tok.Int32(); ok {
return pref.ValueOfInt32(n), nil
}
case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
if n, ok := tok.Int64(); ok {
return pref.ValueOfInt64(n), nil
}
case pref.Uint32Kind, pref.Fixed32Kind:
if n, ok := tok.Uint32(); ok {
return pref.ValueOfUint32(n), nil
}
case pref.Uint64Kind, pref.Fixed64Kind:
if n, ok := tok.Uint64(); ok {
return pref.ValueOfUint64(n), nil
}
case pref.FloatKind:
if n, ok := tok.Float32(); ok {
return pref.ValueOfFloat32(n), nil
}
case pref.DoubleKind:
if n, ok := tok.Float64(); ok {
return pref.ValueOfFloat64(n), nil
}
case pref.StringKind:
if s, ok := tok.String(); ok {
if utf8.ValidString(s) {
return pref.ValueOfString(s), nil
}
return pref.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8")
}
case pref.BytesKind:
if b, ok := tok.String(); ok {
return pref.ValueOfBytes([]byte(b)), nil
}
case pref.EnumKind:
if lit, ok := tok.Enum(); ok {
// Lookup EnumNumber based on name.
if enumVal := fd.Enum().Values().ByName(pref.Name(lit)); enumVal != nil {
return pref.ValueOfEnum(enumVal.Number()), nil
}
}
if num, ok := tok.Int32(); ok {
return pref.ValueOfEnum(pref.EnumNumber(num)), nil
}
default:
panic(fmt.Sprintf("invalid scalar kind %v", kind))
}
return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString())
}
// unmarshalList unmarshals into given protoreflect.List. A list value can
// either be in [] syntax or simply just a single scalar/message value.
func (d decoder) unmarshalList(fd pref.FieldDescriptor, list pref.List) error {
tok, err := d.Peek()
if err != nil {
return err
}
switch fd.Kind() {
case pref.MessageKind, pref.GroupKind:
switch tok.Kind() {
case text.ListOpen:
d.Read()
for {
tok, err := d.Peek()
if err != nil {
return err
}
switch tok.Kind() {
case text.ListClose:
d.Read()
return nil
case text.MessageOpen:
pval := list.NewElement()
if err := d.unmarshalMessage(pval.Message(), true); err != nil {
return err
}
list.Append(pval)
default:
return d.unexpectedTokenError(tok)
}
}
case text.MessageOpen:
pval := list.NewElement()
if err := d.unmarshalMessage(pval.Message(), true); err != nil {
return err
}
list.Append(pval)
return nil
}
default:
switch tok.Kind() {
case text.ListOpen:
d.Read()
for {
tok, err := d.Peek()
if err != nil {
return err
}
switch tok.Kind() {
case text.ListClose:
d.Read()
return nil
case text.Scalar:
pval, err := d.unmarshalScalar(fd)
if err != nil {
return err
}
list.Append(pval)
default:
return d.unexpectedTokenError(tok)
}
}
case text.Scalar:
pval, err := d.unmarshalScalar(fd)
if err != nil {
return err
}
list.Append(pval)
return nil
}
}
return d.unexpectedTokenError(tok)
}
// unmarshalMap unmarshals into given protoreflect.Map. A map value is a
// textproto message containing {key: <kvalue>, value: <mvalue>}.
func (d decoder) unmarshalMap(fd pref.FieldDescriptor, mmap pref.Map) error {
// Determine ahead whether map entry is a scalar type or a message type in
// order to call the appropriate unmarshalMapValue func inside
// unmarshalMapEntry.
var unmarshalMapValue func() (pref.Value, error)
switch fd.MapValue().Kind() {
case pref.MessageKind, pref.GroupKind:
unmarshalMapValue = func() (pref.Value, error) {
pval := mmap.NewValue()
if err := d.unmarshalMessage(pval.Message(), true); err != nil {
return pref.Value{}, err
}
return pval, nil
}
default:
unmarshalMapValue = func() (pref.Value, error) {
return d.unmarshalScalar(fd.MapValue())
}
}
tok, err := d.Read()
if err != nil {
return err
}
switch tok.Kind() {
case text.MessageOpen:
return d.unmarshalMapEntry(fd, mmap, unmarshalMapValue)
case text.ListOpen:
for {
tok, err := d.Read()
if err != nil {
return err
}
switch tok.Kind() {
case text.ListClose:
return nil
case text.MessageOpen:
if err := d.unmarshalMapEntry(fd, mmap, unmarshalMapValue); err != nil {
return err
}
default:
return d.unexpectedTokenError(tok)
}
}
default:
return d.unexpectedTokenError(tok)
}
}
// unmarshalMap unmarshals into given protoreflect.Map. A map value is a
// textproto message containing {key: <kvalue>, value: <mvalue>}.
func (d decoder) unmarshalMapEntry(fd pref.FieldDescriptor, mmap pref.Map, unmarshalMapValue func() (pref.Value, error)) error {
var key pref.MapKey
var pval pref.Value
Loop:
for {
// Read field name.
tok, err := d.Read()
if err != nil {
return err
}
switch tok.Kind() {
case text.Name:
if tok.NameKind() != text.IdentName {
if !d.opts.DiscardUnknown {
return d.newError(tok.Pos(), "unknown map entry field %q", tok.RawString())
}
d.skipValue()
continue Loop
}
// Continue below.
case text.MessageClose:
break Loop
default:
return d.unexpectedTokenError(tok)
}
name := tok.IdentName()
switch name {
case "key":
if !tok.HasSeparator() {
return d.syntaxError(tok.Pos(), "missing field separator :")
}
if key.IsValid() {
return d.newError(tok.Pos(), `map entry "key" cannot be repeated`)
}
val, err := d.unmarshalScalar(fd.MapKey())
if err != nil {
return err
}
key = val.MapKey()
case "value":
if kind := fd.MapValue().Kind(); (kind != pref.MessageKind) && (kind != pref.GroupKind) {
if !tok.HasSeparator() {
return d.syntaxError(tok.Pos(), "missing field separator :")
}
}
if pval.IsValid() {
return d.newError(tok.Pos(), `map entry "value" cannot be repeated`)
}
pval, err = unmarshalMapValue()
if err != nil {
return err
}
default:
if !d.opts.DiscardUnknown {
return d.newError(tok.Pos(), "unknown map entry field %q", name)
}
d.skipValue()
}
}
if !key.IsValid() {
key = fd.MapKey().Default().MapKey()
}
if !pval.IsValid() {
switch fd.MapValue().Kind() {
case pref.MessageKind, pref.GroupKind:
// If value field is not set for message/group types, construct an
// empty one as default.
pval = mmap.NewValue()
default:
pval = fd.MapValue().Default()
}
}
mmap.Set(key, pval)
return nil
}
// unmarshalAny unmarshals an Any textproto. It can either be in expanded form
// or non-expanded form.
func (d decoder) unmarshalAny(m pref.Message, checkDelims bool) error {
var typeURL string
var bValue []byte
// hasFields tracks which valid fields have been seen in the loop below in
// order to flag an error if there are duplicates or conflicts. It may
// contain the strings "type_url", "value" and "expanded". The literal
// "expanded" is used to indicate that the expanded form has been
// encountered already.
hasFields := map[string]bool{}
if checkDelims {
tok, err := d.Read()
if err != nil {
return err
}
if tok.Kind() != text.MessageOpen {
return d.unexpectedTokenError(tok)
}
}
Loop:
for {
// Read field name. Can only have 3 possible field names, i.e. type_url,
// value and type URL name inside [].
tok, err := d.Read()
if err != nil {
return err
}
if typ := tok.Kind(); typ != text.Name {
if checkDelims {
if typ == text.MessageClose {
break Loop
}
} else if typ == text.EOF {
break Loop
}
return d.unexpectedTokenError(tok)
}
switch tok.NameKind() {
case text.IdentName:
// Both type_url and value fields require field separator :.
if !tok.HasSeparator() {
return d.syntaxError(tok.Pos(), "missing field separator :")
}
switch tok.IdentName() {
case "type_url":
if hasFields["type_url"] {
return d.newError(tok.Pos(), "duplicate Any type_url field")
}
if hasFields["expanded"] {
return d.newError(tok.Pos(), "conflict with [%s] field", typeURL)
}
tok, err := d.Read()
if err != nil {
return err
}
var ok bool
typeURL, ok = tok.String()
if !ok {
return d.newError(tok.Pos(), "invalid Any type_url: %v", tok.RawString())
}
hasFields["type_url"] = true
case "value":
if hasFields["value"] {
return d.newError(tok.Pos(), "duplicate Any value field")
}
if hasFields["expanded"] {
return d.newError(tok.Pos(), "conflict with [%s] field", typeURL)
}
tok, err := d.Read()
if err != nil {
return err
}
s, ok := tok.String()
if !ok {
return d.newError(tok.Pos(), "invalid Any value: %v", tok.RawString())
}
bValue = []byte(s)
hasFields["value"] = true
default:
if !d.opts.DiscardUnknown {
return d.newError(tok.Pos(), "invalid field name %q in google.protobuf.Any message", tok.RawString())
}
}
case text.TypeName:
if hasFields["expanded"] {
return d.newError(tok.Pos(), "cannot have more than one type")
}
if hasFields["type_url"] {
return d.newError(tok.Pos(), "conflict with type_url field")
}
typeURL = tok.TypeName()
var err error
bValue, err = d.unmarshalExpandedAny(typeURL, tok.Pos())
if err != nil {
return err
}
hasFields["expanded"] = true
default:
if !d.opts.DiscardUnknown {
return d.newError(tok.Pos(), "invalid field name %q in google.protobuf.Any message", tok.RawString())
}
}
}
fds := m.Descriptor().Fields()
if len(typeURL) > 0 {
m.Set(fds.ByNumber(fieldnum.Any_TypeUrl), pref.ValueOfString(typeURL))
}
if len(bValue) > 0 {
m.Set(fds.ByNumber(fieldnum.Any_Value), pref.ValueOfBytes(bValue))
}
return nil
}
func (d decoder) unmarshalExpandedAny(typeURL string, pos int) ([]byte, error) {
mt, err := d.opts.Resolver.FindMessageByURL(typeURL)
if err != nil {
return nil, d.newError(pos, "unable to resolve message [%v]: %v", typeURL, err)
}
// Create new message for the embedded message type and unmarshal the value
// field into it.
m := mt.New()
if err := d.unmarshalMessage(m, true); err != nil {
return nil, err
}
// Serialize the embedded message and return the resulting bytes.
b, err := proto.MarshalOptions{
AllowPartial: true, // Never check required fields inside an Any.
Deterministic: true,
}.Marshal(m.Interface())
if err != nil {
return nil, d.newError(pos, "error in marshaling message into Any.value: %v", err)
}
return b, nil
}
// skipValue makes the decoder parse a field value in order to advance the read
// to the next field. It relies on Read returning an error if the types are not
// in valid sequence.
func (d decoder) skipValue() error {
tok, err := d.Read()
if err != nil {
return err
}
// Only need to continue reading for messages and lists.
switch tok.Kind() {
case text.MessageOpen:
return d.skipMessageValue()
case text.ListOpen:
for {
tok, err := d.Read()
if err != nil {
return err
}
switch tok.Kind() {
case text.ListClose:
return nil
case text.MessageOpen:
return d.skipMessageValue()
default:
// Skip items. This will not validate whether skipped values are
// of the same type or not, same behavior as C++
// TextFormat::Parser::AllowUnknownField(true) version 3.8.0.
if err := d.skipValue(); err != nil {
return err
}
}
}
}
return nil
}
// skipMessageValue makes the decoder parse and skip over all fields in a
// message. It assumes that the previous read type is MessageOpen.
func (d decoder) skipMessageValue() error {
for {
tok, err := d.Read()
if err != nil {
return err
}
switch tok.Kind() {
case text.MessageClose:
return nil
case text.Name:
if err := d.skipValue(); err != nil {
return err
}
}
}
}

View file

@ -0,0 +1,7 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package prototext marshals and unmarshals protocol buffer messages as the
// textproto format.
package prototext

View file

@ -0,0 +1,410 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package prototext
import (
"fmt"
"sort"
"strconv"
"unicode/utf8"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/encoding/messageset"
"google.golang.org/protobuf/internal/encoding/text"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/internal/fieldnum"
"google.golang.org/protobuf/internal/flags"
"google.golang.org/protobuf/internal/mapsort"
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/proto"
pref "google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
const defaultIndent = " "
// Format formats the message as a multiline string.
// This function is only intended for human consumption and ignores errors.
// Do not depend on the output being stable. It may change over time across
// different versions of the program.
func Format(m proto.Message) string {
return MarshalOptions{Multiline: true}.Format(m)
}
// Marshal writes the given proto.Message in textproto format using default
// options. Do not depend on the output being stable. It may change over time
// across different versions of the program.
func Marshal(m proto.Message) ([]byte, error) {
return MarshalOptions{}.Marshal(m)
}
// MarshalOptions is a configurable text format marshaler.
type MarshalOptions struct {
pragma.NoUnkeyedLiterals
// Multiline specifies whether the marshaler should format the output in
// indented-form with every textual element on a new line.
// If Indent is an empty string, then an arbitrary indent is chosen.
Multiline bool
// Indent specifies the set of indentation characters to use in a multiline
// formatted output such that every entry is preceded by Indent and
// terminated by a newline. If non-empty, then Multiline is treated as true.
// Indent can only be composed of space or tab characters.
Indent string
// AllowPartial allows messages that have missing required fields to marshal
// without returning an error. If AllowPartial is false (the default),
// Marshal will return error if there are any missing required fields.
AllowPartial bool
// EmitUnknown specifies whether to emit unknown fields in the output.
// If specified, the unmarshaler may be unable to parse the output.
// The default is to exclude unknown fields.
EmitUnknown bool
// Resolver is used for looking up types when expanding google.protobuf.Any
// messages. If nil, this defaults to using protoregistry.GlobalTypes.
Resolver interface {
protoregistry.ExtensionTypeResolver
protoregistry.MessageTypeResolver
}
}
// Format formats the message as a string.
// This method is only intended for human consumption and ignores errors.
// Do not depend on the output being stable. It may change over time across
// different versions of the program.
func (o MarshalOptions) Format(m proto.Message) string {
if m == nil || !m.ProtoReflect().IsValid() {
return "<nil>" // invalid syntax, but okay since this is for debugging
}
o.AllowPartial = true
o.EmitUnknown = true
b, _ := o.Marshal(m)
return string(b)
}
// Marshal writes the given proto.Message in textproto format using options in
// MarshalOptions object. Do not depend on the output being stable. It may
// change over time across different versions of the program.
func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
const outputASCII = false
var delims = [2]byte{'{', '}'}
if o.Multiline && o.Indent == "" {
o.Indent = defaultIndent
}
if o.Resolver == nil {
o.Resolver = protoregistry.GlobalTypes
}
internalEnc, err := text.NewEncoder(o.Indent, delims, outputASCII)
if err != nil {
return nil, err
}
enc := encoder{internalEnc, o}
err = enc.marshalMessage(m.ProtoReflect(), false)
if err != nil {
return nil, err
}
out := enc.Bytes()
if len(o.Indent) > 0 && len(out) > 0 {
out = append(out, '\n')
}
if o.AllowPartial {
return out, nil
}
return out, proto.CheckInitialized(m)
}
type encoder struct {
*text.Encoder
opts MarshalOptions
}
// marshalMessage marshals the given protoreflect.Message.
func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error {
messageDesc := m.Descriptor()
if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) {
return errors.New("no support for proto1 MessageSets")
}
if inclDelims {
e.StartMessage()
defer e.EndMessage()
}
// Handle Any expansion.
if messageDesc.FullName() == "google.protobuf.Any" {
if e.marshalAny(m) {
return nil
}
// If unable to expand, continue on to marshal Any as a regular message.
}
// Marshal known fields.
fieldDescs := messageDesc.Fields()
size := fieldDescs.Len()
for i := 0; i < size; {
fd := fieldDescs.Get(i)
if od := fd.ContainingOneof(); od != nil {
fd = m.WhichOneof(od)
i += od.Fields().Len()
} else {
i++
}
if fd == nil || !m.Has(fd) {
continue
}
name := fd.Name()
// Use type name for group field name.
if fd.Kind() == pref.GroupKind {
name = fd.Message().Name()
}
val := m.Get(fd)
if err := e.marshalField(string(name), val, fd); err != nil {
return err
}
}
// Marshal extensions.
if err := e.marshalExtensions(m); err != nil {
return err
}
// Marshal unknown fields.
if e.opts.EmitUnknown {
e.marshalUnknown(m.GetUnknown())
}
return nil
}
// marshalField marshals the given field with protoreflect.Value.
func (e encoder) marshalField(name string, val pref.Value, fd pref.FieldDescriptor) error {
switch {
case fd.IsList():
return e.marshalList(name, val.List(), fd)
case fd.IsMap():
return e.marshalMap(name, val.Map(), fd)
default:
e.WriteName(name)
return e.marshalSingular(val, fd)
}
}
// marshalSingular marshals the given non-repeated field value. This includes
// all scalar types, enums, messages, and groups.
func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error {
kind := fd.Kind()
switch kind {
case pref.BoolKind:
e.WriteBool(val.Bool())
case pref.StringKind:
s := val.String()
if !utf8.ValidString(s) {
return errors.InvalidUTF8(string(fd.FullName()))
}
e.WriteString(s)
case pref.Int32Kind, pref.Int64Kind,
pref.Sint32Kind, pref.Sint64Kind,
pref.Sfixed32Kind, pref.Sfixed64Kind:
e.WriteInt(val.Int())
case pref.Uint32Kind, pref.Uint64Kind,
pref.Fixed32Kind, pref.Fixed64Kind:
e.WriteUint(val.Uint())
case pref.FloatKind:
// Encoder.WriteFloat handles the special numbers NaN and infinites.
e.WriteFloat(val.Float(), 32)
case pref.DoubleKind:
// Encoder.WriteFloat handles the special numbers NaN and infinites.
e.WriteFloat(val.Float(), 64)
case pref.BytesKind:
e.WriteString(string(val.Bytes()))
case pref.EnumKind:
num := val.Enum()
if desc := fd.Enum().Values().ByNumber(num); desc != nil {
e.WriteLiteral(string(desc.Name()))
} else {
// Use numeric value if there is no enum description.
e.WriteInt(int64(num))
}
case pref.MessageKind, pref.GroupKind:
return e.marshalMessage(val.Message(), true)
default:
panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind))
}
return nil
}
// marshalList marshals the given protoreflect.List as multiple name-value fields.
func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescriptor) error {
size := list.Len()
for i := 0; i < size; i++ {
e.WriteName(name)
if err := e.marshalSingular(list.Get(i), fd); err != nil {
return err
}
}
return nil
}
// marshalMap marshals the given protoreflect.Map as multiple name-value fields.
func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) error {
var err error
mapsort.Range(mmap, fd.MapKey().Kind(), func(key pref.MapKey, val pref.Value) bool {
e.WriteName(name)
e.StartMessage()
defer e.EndMessage()
e.WriteName("key")
err = e.marshalSingular(key.Value(), fd.MapKey())
if err != nil {
return false
}
e.WriteName("value")
err = e.marshalSingular(val, fd.MapValue())
if err != nil {
return false
}
return true
})
return err
}
// marshalExtensions marshals extension fields.
func (e encoder) marshalExtensions(m pref.Message) error {
type entry struct {
key string
value pref.Value
desc pref.FieldDescriptor
}
// Get a sorted list based on field key first.
var entries []entry
m.Range(func(fd pref.FieldDescriptor, v pref.Value) bool {
if !fd.IsExtension() {
return true
}
// For MessageSet extensions, the name used is the parent message.
name := fd.FullName()
if messageset.IsMessageSetExtension(fd) {
name = name.Parent()
}
entries = append(entries, entry{
key: string(name),
value: v,
desc: fd,
})
return true
})
// Sort extensions lexicographically.
sort.Slice(entries, func(i, j int) bool {
return entries[i].key < entries[j].key
})
// Write out sorted list.
for _, entry := range entries {
// Extension field name is the proto field name enclosed in [].
name := "[" + entry.key + "]"
if err := e.marshalField(name, entry.value, entry.desc); err != nil {
return err
}
}
return nil
}
// marshalUnknown parses the given []byte and marshals fields out.
// This function assumes proper encoding in the given []byte.
func (e encoder) marshalUnknown(b []byte) {
const dec = 10
const hex = 16
for len(b) > 0 {
num, wtype, n := protowire.ConsumeTag(b)
b = b[n:]
e.WriteName(strconv.FormatInt(int64(num), dec))
switch wtype {
case protowire.VarintType:
var v uint64
v, n = protowire.ConsumeVarint(b)
e.WriteUint(v)
case protowire.Fixed32Type:
var v uint32
v, n = protowire.ConsumeFixed32(b)
e.WriteLiteral("0x" + strconv.FormatUint(uint64(v), hex))
case protowire.Fixed64Type:
var v uint64
v, n = protowire.ConsumeFixed64(b)
e.WriteLiteral("0x" + strconv.FormatUint(v, hex))
case protowire.BytesType:
var v []byte
v, n = protowire.ConsumeBytes(b)
e.WriteString(string(v))
case protowire.StartGroupType:
e.StartMessage()
var v []byte
v, n = protowire.ConsumeGroup(num, b)
e.marshalUnknown(v)
e.EndMessage()
default:
panic(fmt.Sprintf("prototext: error parsing unknown field wire type: %v", wtype))
}
b = b[n:]
}
}
// marshalAny marshals the given google.protobuf.Any message in expanded form.
// It returns true if it was able to marshal, else false.
func (e encoder) marshalAny(any pref.Message) bool {
// Construct the embedded message.
fds := any.Descriptor().Fields()
fdType := fds.ByNumber(fieldnum.Any_TypeUrl)
typeURL := any.Get(fdType).String()
mt, err := e.opts.Resolver.FindMessageByURL(typeURL)
if err != nil {
return false
}
m := mt.New().Interface()
// Unmarshal bytes into embedded message.
fdValue := fds.ByNumber(fieldnum.Any_Value)
value := any.Get(fdValue)
err = proto.UnmarshalOptions{
AllowPartial: true,
Resolver: e.opts.Resolver,
}.Unmarshal(value.Bytes(), m)
if err != nil {
return false
}
// Get current encoder position. If marshaling fails, reset encoder output
// back to this position.
pos := e.Snapshot()
// Field name is the proto field name enclosed in [].
e.WriteName("[" + typeURL + "]")
err = e.marshalMessage(m.ProtoReflect(), true)
if err != nil {
e.Reset(pos)
return false
}
return true
}

View file

@ -0,0 +1,538 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package protowire parses and formats the raw wire encoding.
// See https://developers.google.com/protocol-buffers/docs/encoding.
//
// For marshaling and unmarshaling entire protobuf messages,
// use the "google.golang.org/protobuf/proto" package instead.
package protowire
import (
"io"
"math"
"math/bits"
"google.golang.org/protobuf/internal/errors"
)
// Number represents the field number.
type Number int32
const (
MinValidNumber Number = 1
FirstReservedNumber Number = 19000
LastReservedNumber Number = 19999
MaxValidNumber Number = 1<<29 - 1
)
// IsValid reports whether the field number is semantically valid.
//
// Note that while numbers within the reserved range are semantically invalid,
// they are syntactically valid in the wire format.
// Implementations may treat records with reserved field numbers as unknown.
func (n Number) IsValid() bool {
return MinValidNumber <= n && n < FirstReservedNumber || LastReservedNumber < n && n <= MaxValidNumber
}
// Type represents the wire type.
type Type int8
const (
VarintType Type = 0
Fixed32Type Type = 5
Fixed64Type Type = 1
BytesType Type = 2
StartGroupType Type = 3
EndGroupType Type = 4
)
const (
_ = -iota
errCodeTruncated
errCodeFieldNumber
errCodeOverflow
errCodeReserved
errCodeEndGroup
)
var (
errFieldNumber = errors.New("invalid field number")
errOverflow = errors.New("variable length integer overflow")
errReserved = errors.New("cannot parse reserved wire type")
errEndGroup = errors.New("mismatching end group marker")
errParse = errors.New("parse error")
)
// ParseError converts an error code into an error value.
// This returns nil if n is a non-negative number.
func ParseError(n int) error {
if n >= 0 {
return nil
}
switch n {
case errCodeTruncated:
return io.ErrUnexpectedEOF
case errCodeFieldNumber:
return errFieldNumber
case errCodeOverflow:
return errOverflow
case errCodeReserved:
return errReserved
case errCodeEndGroup:
return errEndGroup
default:
return errParse
}
}
// ConsumeField parses an entire field record (both tag and value) and returns
// the field number, the wire type, and the total length.
// This returns a negative length upon an error (see ParseError).
//
// The total length includes the tag header and the end group marker (if the
// field is a group).
func ConsumeField(b []byte) (Number, Type, int) {
num, typ, n := ConsumeTag(b)
if n < 0 {
return 0, 0, n // forward error code
}
m := ConsumeFieldValue(num, typ, b[n:])
if m < 0 {
return 0, 0, m // forward error code
}
return num, typ, n + m
}
// ConsumeFieldValue parses a field value and returns its length.
// This assumes that the field Number and wire Type have already been parsed.
// This returns a negative length upon an error (see ParseError).
//
// When parsing a group, the length includes the end group marker and
// the end group is verified to match the starting field number.
func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) {
switch typ {
case VarintType:
_, n = ConsumeVarint(b)
return n
case Fixed32Type:
_, n = ConsumeFixed32(b)
return n
case Fixed64Type:
_, n = ConsumeFixed64(b)
return n
case BytesType:
_, n = ConsumeBytes(b)
return n
case StartGroupType:
n0 := len(b)
for {
num2, typ2, n := ConsumeTag(b)
if n < 0 {
return n // forward error code
}
b = b[n:]
if typ2 == EndGroupType {
if num != num2 {
return errCodeEndGroup
}
return n0 - len(b)
}
n = ConsumeFieldValue(num2, typ2, b)
if n < 0 {
return n // forward error code
}
b = b[n:]
}
case EndGroupType:
return errCodeEndGroup
default:
return errCodeReserved
}
}
// AppendTag encodes num and typ as a varint-encoded tag and appends it to b.
func AppendTag(b []byte, num Number, typ Type) []byte {
return AppendVarint(b, EncodeTag(num, typ))
}
// ConsumeTag parses b as a varint-encoded tag, reporting its length.
// This returns a negative length upon an error (see ParseError).
func ConsumeTag(b []byte) (Number, Type, int) {
v, n := ConsumeVarint(b)
if n < 0 {
return 0, 0, n // forward error code
}
num, typ := DecodeTag(v)
if num < MinValidNumber {
return 0, 0, errCodeFieldNumber
}
return num, typ, n
}
func SizeTag(num Number) int {
return SizeVarint(EncodeTag(num, 0)) // wire type has no effect on size
}
// AppendVarint appends v to b as a varint-encoded uint64.
func AppendVarint(b []byte, v uint64) []byte {
switch {
case v < 1<<7:
b = append(b, byte(v))
case v < 1<<14:
b = append(b,
byte((v>>0)&0x7f|0x80),
byte(v>>7))
case v < 1<<21:
b = append(b,
byte((v>>0)&0x7f|0x80),
byte((v>>7)&0x7f|0x80),
byte(v>>14))
case v < 1<<28:
b = append(b,
byte((v>>0)&0x7f|0x80),
byte((v>>7)&0x7f|0x80),
byte((v>>14)&0x7f|0x80),
byte(v>>21))
case v < 1<<35:
b = append(b,
byte((v>>0)&0x7f|0x80),
byte((v>>7)&0x7f|0x80),
byte((v>>14)&0x7f|0x80),
byte((v>>21)&0x7f|0x80),
byte(v>>28))
case v < 1<<42:
b = append(b,
byte((v>>0)&0x7f|0x80),
byte((v>>7)&0x7f|0x80),
byte((v>>14)&0x7f|0x80),
byte((v>>21)&0x7f|0x80),
byte((v>>28)&0x7f|0x80),
byte(v>>35))
case v < 1<<49:
b = append(b,
byte((v>>0)&0x7f|0x80),
byte((v>>7)&0x7f|0x80),
byte((v>>14)&0x7f|0x80),
byte((v>>21)&0x7f|0x80),
byte((v>>28)&0x7f|0x80),
byte((v>>35)&0x7f|0x80),
byte(v>>42))
case v < 1<<56:
b = append(b,
byte((v>>0)&0x7f|0x80),
byte((v>>7)&0x7f|0x80),
byte((v>>14)&0x7f|0x80),
byte((v>>21)&0x7f|0x80),
byte((v>>28)&0x7f|0x80),
byte((v>>35)&0x7f|0x80),
byte((v>>42)&0x7f|0x80),
byte(v>>49))
case v < 1<<63:
b = append(b,
byte((v>>0)&0x7f|0x80),
byte((v>>7)&0x7f|0x80),
byte((v>>14)&0x7f|0x80),
byte((v>>21)&0x7f|0x80),
byte((v>>28)&0x7f|0x80),
byte((v>>35)&0x7f|0x80),
byte((v>>42)&0x7f|0x80),
byte((v>>49)&0x7f|0x80),
byte(v>>56))
default:
b = append(b,
byte((v>>0)&0x7f|0x80),
byte((v>>7)&0x7f|0x80),
byte((v>>14)&0x7f|0x80),
byte((v>>21)&0x7f|0x80),
byte((v>>28)&0x7f|0x80),
byte((v>>35)&0x7f|0x80),
byte((v>>42)&0x7f|0x80),
byte((v>>49)&0x7f|0x80),
byte((v>>56)&0x7f|0x80),
1)
}
return b
}
// ConsumeVarint parses b as a varint-encoded uint64, reporting its length.
// This returns a negative length upon an error (see ParseError).
func ConsumeVarint(b []byte) (v uint64, n int) {
var y uint64
if len(b) <= 0 {
return 0, errCodeTruncated
}
v = uint64(b[0])
if v < 0x80 {
return v, 1
}
v -= 0x80
if len(b) <= 1 {
return 0, errCodeTruncated
}
y = uint64(b[1])
v += y << 7
if y < 0x80 {
return v, 2
}
v -= 0x80 << 7
if len(b) <= 2 {
return 0, errCodeTruncated
}
y = uint64(b[2])
v += y << 14
if y < 0x80 {
return v, 3
}
v -= 0x80 << 14
if len(b) <= 3 {
return 0, errCodeTruncated
}
y = uint64(b[3])
v += y << 21
if y < 0x80 {
return v, 4
}
v -= 0x80 << 21
if len(b) <= 4 {
return 0, errCodeTruncated
}
y = uint64(b[4])
v += y << 28
if y < 0x80 {
return v, 5
}
v -= 0x80 << 28
if len(b) <= 5 {
return 0, errCodeTruncated
}
y = uint64(b[5])
v += y << 35
if y < 0x80 {
return v, 6
}
v -= 0x80 << 35
if len(b) <= 6 {
return 0, errCodeTruncated
}
y = uint64(b[6])
v += y << 42
if y < 0x80 {
return v, 7
}
v -= 0x80 << 42
if len(b) <= 7 {
return 0, errCodeTruncated
}
y = uint64(b[7])
v += y << 49
if y < 0x80 {
return v, 8
}
v -= 0x80 << 49
if len(b) <= 8 {
return 0, errCodeTruncated
}
y = uint64(b[8])
v += y << 56
if y < 0x80 {
return v, 9
}
v -= 0x80 << 56
if len(b) <= 9 {
return 0, errCodeTruncated
}
y = uint64(b[9])
v += y << 63
if y < 2 {
return v, 10
}
return 0, errCodeOverflow
}
// SizeVarint returns the encoded size of a varint.
// The size is guaranteed to be within 1 and 10, inclusive.
func SizeVarint(v uint64) int {
// This computes 1 + (bits.Len64(v)-1)/7.
// 9/64 is a good enough approximation of 1/7
return int(9*uint32(bits.Len64(v))+64) / 64
}
// AppendFixed32 appends v to b as a little-endian uint32.
func AppendFixed32(b []byte, v uint32) []byte {
return append(b,
byte(v>>0),
byte(v>>8),
byte(v>>16),
byte(v>>24))
}
// ConsumeFixed32 parses b as a little-endian uint32, reporting its length.
// This returns a negative length upon an error (see ParseError).
func ConsumeFixed32(b []byte) (v uint32, n int) {
if len(b) < 4 {
return 0, errCodeTruncated
}
v = uint32(b[0])<<0 | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
return v, 4
}
// SizeFixed32 returns the encoded size of a fixed32; which is always 4.
func SizeFixed32() int {
return 4
}
// AppendFixed64 appends v to b as a little-endian uint64.
func AppendFixed64(b []byte, v uint64) []byte {
return append(b,
byte(v>>0),
byte(v>>8),
byte(v>>16),
byte(v>>24),
byte(v>>32),
byte(v>>40),
byte(v>>48),
byte(v>>56))
}
// ConsumeFixed64 parses b as a little-endian uint64, reporting its length.
// This returns a negative length upon an error (see ParseError).
func ConsumeFixed64(b []byte) (v uint64, n int) {
if len(b) < 8 {
return 0, errCodeTruncated
}
v = uint64(b[0])<<0 | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
return v, 8
}
// SizeFixed64 returns the encoded size of a fixed64; which is always 8.
func SizeFixed64() int {
return 8
}
// AppendBytes appends v to b as a length-prefixed bytes value.
func AppendBytes(b []byte, v []byte) []byte {
return append(AppendVarint(b, uint64(len(v))), v...)
}
// ConsumeBytes parses b as a length-prefixed bytes value, reporting its length.
// This returns a negative length upon an error (see ParseError).
func ConsumeBytes(b []byte) (v []byte, n int) {
m, n := ConsumeVarint(b)
if n < 0 {
return nil, n // forward error code
}
if m > uint64(len(b[n:])) {
return nil, errCodeTruncated
}
return b[n:][:m], n + int(m)
}
// SizeBytes returns the encoded size of a length-prefixed bytes value,
// given only the length.
func SizeBytes(n int) int {
return SizeVarint(uint64(n)) + n
}
// AppendString appends v to b as a length-prefixed bytes value.
func AppendString(b []byte, v string) []byte {
return append(AppendVarint(b, uint64(len(v))), v...)
}
// ConsumeString parses b as a length-prefixed bytes value, reporting its length.
// This returns a negative length upon an error (see ParseError).
func ConsumeString(b []byte) (v string, n int) {
bb, n := ConsumeBytes(b)
return string(bb), n
}
// AppendGroup appends v to b as group value, with a trailing end group marker.
// The value v must not contain the end marker.
func AppendGroup(b []byte, num Number, v []byte) []byte {
return AppendVarint(append(b, v...), EncodeTag(num, EndGroupType))
}
// ConsumeGroup parses b as a group value until the trailing end group marker,
// and verifies that the end marker matches the provided num. The value v
// does not contain the end marker, while the length does contain the end marker.
// This returns a negative length upon an error (see ParseError).
func ConsumeGroup(num Number, b []byte) (v []byte, n int) {
n = ConsumeFieldValue(num, StartGroupType, b)
if n < 0 {
return nil, n // forward error code
}
b = b[:n]
// Truncate off end group marker, but need to handle denormalized varints.
// Assuming end marker is never 0 (which is always the case since
// EndGroupType is non-zero), we can truncate all trailing bytes where the
// lower 7 bits are all zero (implying that the varint is denormalized).
for len(b) > 0 && b[len(b)-1]&0x7f == 0 {
b = b[:len(b)-1]
}
b = b[:len(b)-SizeTag(num)]
return b, n
}
// SizeGroup returns the encoded size of a group, given only the length.
func SizeGroup(num Number, n int) int {
return n + SizeTag(num)
}
// DecodeTag decodes the field Number and wire Type from its unified form.
// The Number is -1 if the decoded field number overflows int32.
// Other than overflow, this does not check for field number validity.
func DecodeTag(x uint64) (Number, Type) {
// NOTE: MessageSet allows for larger field numbers than normal.
if x>>3 > uint64(math.MaxInt32) {
return -1, 0
}
return Number(x >> 3), Type(x & 7)
}
// EncodeTag encodes the field Number and wire Type into its unified form.
func EncodeTag(num Number, typ Type) uint64 {
return uint64(num)<<3 | uint64(typ&7)
}
// DecodeZigZag decodes a zig-zag-encoded uint64 as an int64.
// Input: {…, 5, 3, 1, 0, 2, 4, 6, …}
// Output: {…, -3, -2, -1, 0, +1, +2, +3, …}
func DecodeZigZag(x uint64) int64 {
return int64(x>>1) ^ int64(x)<<63>>63
}
// EncodeZigZag encodes an int64 as a zig-zag-encoded uint64.
// Input: {…, -3, -2, -1, 0, +1, +2, +3, …}
// Output: {…, 5, 3, 1, 0, 2, 4, 6, …}
func EncodeZigZag(x int64) uint64 {
return uint64(x<<1) ^ uint64(x>>63)
}
// DecodeBool decodes a uint64 as a bool.
// Input: { 0, 1, 2, …}
// Output: {false, true, true, …}
func DecodeBool(x uint64) bool {
return x != 0
}
// EncodeBool encodes a bool as a uint64.
// Input: {false, true}
// Output: { 0, 1}
func EncodeBool(x bool) uint64 {
if x {
return 1
}
return 0
}

View file

@ -0,0 +1,316 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package descfmt provides functionality to format descriptors.
package descfmt
import (
"fmt"
"io"
"reflect"
"strconv"
"strings"
"google.golang.org/protobuf/internal/detrand"
"google.golang.org/protobuf/internal/pragma"
pref "google.golang.org/protobuf/reflect/protoreflect"
)
type list interface {
Len() int
pragma.DoNotImplement
}
func FormatList(s fmt.State, r rune, vs list) {
io.WriteString(s, formatListOpt(vs, true, r == 'v' && (s.Flag('+') || s.Flag('#'))))
}
func formatListOpt(vs list, isRoot, allowMulti bool) string {
start, end := "[", "]"
if isRoot {
var name string
switch vs.(type) {
case pref.Names:
name = "Names"
case pref.FieldNumbers:
name = "FieldNumbers"
case pref.FieldRanges:
name = "FieldRanges"
case pref.EnumRanges:
name = "EnumRanges"
case pref.FileImports:
name = "FileImports"
case pref.Descriptor:
name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s"
}
start, end = name+"{", "}"
}
var ss []string
switch vs := vs.(type) {
case pref.Names:
for i := 0; i < vs.Len(); i++ {
ss = append(ss, fmt.Sprint(vs.Get(i)))
}
return start + joinStrings(ss, false) + end
case pref.FieldNumbers:
for i := 0; i < vs.Len(); i++ {
ss = append(ss, fmt.Sprint(vs.Get(i)))
}
return start + joinStrings(ss, false) + end
case pref.FieldRanges:
for i := 0; i < vs.Len(); i++ {
r := vs.Get(i)
if r[0]+1 == r[1] {
ss = append(ss, fmt.Sprintf("%d", r[0]))
} else {
ss = append(ss, fmt.Sprintf("%d:%d", r[0], r[1])) // enum ranges are end exclusive
}
}
return start + joinStrings(ss, false) + end
case pref.EnumRanges:
for i := 0; i < vs.Len(); i++ {
r := vs.Get(i)
if r[0] == r[1] {
ss = append(ss, fmt.Sprintf("%d", r[0]))
} else {
ss = append(ss, fmt.Sprintf("%d:%d", r[0], int64(r[1])+1)) // enum ranges are end inclusive
}
}
return start + joinStrings(ss, false) + end
case pref.FileImports:
for i := 0; i < vs.Len(); i++ {
var rs records
rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak")
ss = append(ss, "{"+rs.Join()+"}")
}
return start + joinStrings(ss, allowMulti) + end
default:
_, isEnumValue := vs.(pref.EnumValueDescriptors)
for i := 0; i < vs.Len(); i++ {
m := reflect.ValueOf(vs).MethodByName("Get")
v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface()
ss = append(ss, formatDescOpt(v.(pref.Descriptor), false, allowMulti && !isEnumValue))
}
return start + joinStrings(ss, allowMulti && isEnumValue) + end
}
}
// descriptorAccessors is a list of accessors to print for each descriptor.
//
// Do not print all accessors since some contain redundant information,
// while others are pointers that we do not want to follow since the descriptor
// is actually a cyclic graph.
//
// Using a list allows us to print the accessors in a sensible order.
var descriptorAccessors = map[reflect.Type][]string{
reflect.TypeOf((*pref.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"},
reflect.TypeOf((*pref.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"},
reflect.TypeOf((*pref.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "IsPacked", "IsExtension", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"},
reflect.TypeOf((*pref.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt
reflect.TypeOf((*pref.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"},
reflect.TypeOf((*pref.EnumValueDescriptor)(nil)).Elem(): {"Number"},
reflect.TypeOf((*pref.ServiceDescriptor)(nil)).Elem(): {"Methods"},
reflect.TypeOf((*pref.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"},
}
func FormatDesc(s fmt.State, r rune, t pref.Descriptor) {
io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#'))))
}
func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string {
rv := reflect.ValueOf(t)
rt := rv.MethodByName("ProtoType").Type().In(0)
start, end := "{", "}"
if isRoot {
start = rt.Name() + "{"
}
_, isFile := t.(pref.FileDescriptor)
rs := records{allowMulti: allowMulti}
if t.IsPlaceholder() {
if isFile {
rs.Append(rv, "Path", "Package", "IsPlaceholder")
} else {
rs.Append(rv, "FullName", "IsPlaceholder")
}
} else {
switch {
case isFile:
rs.Append(rv, "Syntax")
case isRoot:
rs.Append(rv, "Syntax", "FullName")
default:
rs.Append(rv, "Name")
}
switch t := t.(type) {
case pref.FieldDescriptor:
for _, s := range descriptorAccessors[rt] {
switch s {
case "MapKey":
if k := t.MapKey(); k != nil {
rs.recs = append(rs.recs, [2]string{"MapKey", k.Kind().String()})
}
case "MapValue":
if v := t.MapValue(); v != nil {
switch v.Kind() {
case pref.EnumKind:
rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())})
case pref.MessageKind, pref.GroupKind:
rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())})
default:
rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()})
}
}
case "ContainingOneof":
if od := t.ContainingOneof(); od != nil {
rs.recs = append(rs.recs, [2]string{"Oneof", string(od.Name())})
}
case "ContainingMessage":
if t.IsExtension() {
rs.recs = append(rs.recs, [2]string{"Extendee", string(t.ContainingMessage().FullName())})
}
case "Message":
if !t.IsMap() {
rs.Append(rv, s)
}
default:
rs.Append(rv, s)
}
}
case pref.OneofDescriptor:
var ss []string
fs := t.Fields()
for i := 0; i < fs.Len(); i++ {
ss = append(ss, string(fs.Get(i).Name()))
}
if len(ss) > 0 {
rs.recs = append(rs.recs, [2]string{"Fields", "[" + joinStrings(ss, false) + "]"})
}
default:
rs.Append(rv, descriptorAccessors[rt]...)
}
if rv.MethodByName("GoType").IsValid() {
rs.Append(rv, "GoType")
}
}
return start + rs.Join() + end
}
type records struct {
recs [][2]string
allowMulti bool
}
func (rs *records) Append(v reflect.Value, accessors ...string) {
for _, a := range accessors {
var rv reflect.Value
if m := v.MethodByName(a); m.IsValid() {
rv = m.Call(nil)[0]
}
if v.Kind() == reflect.Struct && !rv.IsValid() {
rv = v.FieldByName(a)
}
if !rv.IsValid() {
panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a))
}
if _, ok := rv.Interface().(pref.Value); ok {
rv = rv.MethodByName("Interface").Call(nil)[0]
if !rv.IsNil() {
rv = rv.Elem()
}
}
// Ignore zero values.
var isZero bool
switch rv.Kind() {
case reflect.Interface, reflect.Slice:
isZero = rv.IsNil()
case reflect.Bool:
isZero = rv.Bool() == false
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
isZero = rv.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
isZero = rv.Uint() == 0
case reflect.String:
isZero = rv.String() == ""
}
if n, ok := rv.Interface().(list); ok {
isZero = n.Len() == 0
}
if isZero {
continue
}
// Format the value.
var s string
v := rv.Interface()
switch v := v.(type) {
case list:
s = formatListOpt(v, false, rs.allowMulti)
case pref.FieldDescriptor, pref.OneofDescriptor, pref.EnumValueDescriptor, pref.MethodDescriptor:
s = string(v.(pref.Descriptor).Name())
case pref.Descriptor:
s = string(v.FullName())
case string:
s = strconv.Quote(v)
case []byte:
s = fmt.Sprintf("%q", v)
default:
s = fmt.Sprint(v)
}
rs.recs = append(rs.recs, [2]string{a, s})
}
}
func (rs *records) Join() string {
var ss []string
// In single line mode, simply join all records with commas.
if !rs.allowMulti {
for _, r := range rs.recs {
ss = append(ss, r[0]+formatColon(0)+r[1])
}
return joinStrings(ss, false)
}
// In allowMulti line mode, align single line records for more readable output.
var maxLen int
flush := func(i int) {
for _, r := range rs.recs[len(ss):i] {
ss = append(ss, r[0]+formatColon(maxLen-len(r[0]))+r[1])
}
maxLen = 0
}
for i, r := range rs.recs {
if isMulti := strings.Contains(r[1], "\n"); isMulti {
flush(i)
ss = append(ss, r[0]+formatColon(0)+strings.Join(strings.Split(r[1], "\n"), "\n\t"))
} else if maxLen < len(r[0]) {
maxLen = len(r[0])
}
}
flush(len(rs.recs))
return joinStrings(ss, true)
}
func formatColon(padding int) string {
// Deliberately introduce instability into the debug output to
// discourage users from performing string comparisons.
// This provides us flexibility to change the output in the future.
if detrand.Bool() {
return ":" + strings.Repeat(" ", 1+padding) // use non-breaking spaces (U+00a0)
} else {
return ":" + strings.Repeat(" ", 1+padding) // use regular spaces (U+0020)
}
}
func joinStrings(ss []string, isMulti bool) string {
if len(ss) == 0 {
return ""
}
if isMulti {
return "\n\t" + strings.Join(ss, "\n\t") + "\n"
}
return strings.Join(ss, ", ")
}

View file

@ -0,0 +1,29 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package descopts contains the nil pointers to concrete descriptor options.
//
// This package exists as a form of reverse dependency injection so that certain
// packages (e.g., internal/filedesc and internal/filetype can avoid a direct
// dependency on the descriptor proto package).
package descopts
import pref "google.golang.org/protobuf/reflect/protoreflect"
// These variables are set by the init function in descriptor.pb.go via logic
// in internal/filetype. In other words, so long as the descriptor proto package
// is linked in, these variables will be populated.
//
// Each variable is populated with a nil pointer to the options struct.
var (
File pref.ProtoMessage
Enum pref.ProtoMessage
EnumValue pref.ProtoMessage
Message pref.ProtoMessage
Field pref.ProtoMessage
Oneof pref.ProtoMessage
ExtensionRange pref.ProtoMessage
Service pref.ProtoMessage
Method pref.ProtoMessage
)

View file

@ -0,0 +1,61 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package detrand provides deterministically random functionality.
//
// The pseudo-randomness of these functions is seeded by the program binary
// itself and guarantees that the output does not change within a program,
// while ensuring that the output is unstable across different builds.
package detrand
import (
"encoding/binary"
"hash/fnv"
"os"
)
// Disable disables detrand such that all functions returns the zero value.
// This function is not concurrent-safe and must be called during program init.
func Disable() {
randSeed = 0
}
// Bool returns a deterministically random boolean.
func Bool() bool {
return randSeed%2 == 1
}
// randSeed is a best-effort at an approximate hash of the Go binary.
var randSeed = binaryHash()
func binaryHash() uint64 {
// Open the Go binary.
s, err := os.Executable()
if err != nil {
return 0
}
f, err := os.Open(s)
if err != nil {
return 0
}
defer f.Close()
// Hash the size and several samples of the Go binary.
const numSamples = 8
var buf [64]byte
h := fnv.New64()
fi, err := f.Stat()
if err != nil {
return 0
}
binary.LittleEndian.PutUint64(buf[:8], uint64(fi.Size()))
h.Write(buf[:8])
for i := int64(0); i < numSamples; i++ {
if _, err := f.ReadAt(buf[:], i*fi.Size()/numSamples); err != nil {
return 0
}
h.Write(buf[:])
}
return h.Sum64()
}

View file

@ -0,0 +1,213 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package defval marshals and unmarshals textual forms of default values.
//
// This package handles both the form historically used in Go struct field tags
// and also the form used by google.protobuf.FieldDescriptorProto.default_value
// since they differ in superficial ways.
package defval
import (
"fmt"
"math"
"strconv"
ptext "google.golang.org/protobuf/internal/encoding/text"
errors "google.golang.org/protobuf/internal/errors"
pref "google.golang.org/protobuf/reflect/protoreflect"
)
// Format is the serialization format used to represent the default value.
type Format int
const (
_ Format = iota
// Descriptor uses the serialization format that protoc uses with the
// google.protobuf.FieldDescriptorProto.default_value field.
Descriptor
// GoTag uses the historical serialization format in Go struct field tags.
GoTag
)
// Unmarshal deserializes the default string s according to the given kind k.
// When k is an enum, a list of enum value descriptors must be provided.
func Unmarshal(s string, k pref.Kind, evs pref.EnumValueDescriptors, f Format) (pref.Value, pref.EnumValueDescriptor, error) {
switch k {
case pref.BoolKind:
if f == GoTag {
switch s {
case "1":
return pref.ValueOfBool(true), nil, nil
case "0":
return pref.ValueOfBool(false), nil, nil
}
} else {
switch s {
case "true":
return pref.ValueOfBool(true), nil, nil
case "false":
return pref.ValueOfBool(false), nil, nil
}
}
case pref.EnumKind:
if f == GoTag {
// Go tags use the numeric form of the enum value.
if n, err := strconv.ParseInt(s, 10, 32); err == nil {
if ev := evs.ByNumber(pref.EnumNumber(n)); ev != nil {
return pref.ValueOfEnum(ev.Number()), ev, nil
}
}
} else {
// Descriptor default_value use the enum identifier.
ev := evs.ByName(pref.Name(s))
if ev != nil {
return pref.ValueOfEnum(ev.Number()), ev, nil
}
}
case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
if v, err := strconv.ParseInt(s, 10, 32); err == nil {
return pref.ValueOfInt32(int32(v)), nil, nil
}
case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
if v, err := strconv.ParseInt(s, 10, 64); err == nil {
return pref.ValueOfInt64(int64(v)), nil, nil
}
case pref.Uint32Kind, pref.Fixed32Kind:
if v, err := strconv.ParseUint(s, 10, 32); err == nil {
return pref.ValueOfUint32(uint32(v)), nil, nil
}
case pref.Uint64Kind, pref.Fixed64Kind:
if v, err := strconv.ParseUint(s, 10, 64); err == nil {
return pref.ValueOfUint64(uint64(v)), nil, nil
}
case pref.FloatKind, pref.DoubleKind:
var v float64
var err error
switch s {
case "-inf":
v = math.Inf(-1)
case "inf":
v = math.Inf(+1)
case "nan":
v = math.NaN()
default:
v, err = strconv.ParseFloat(s, 64)
}
if err == nil {
if k == pref.FloatKind {
return pref.ValueOfFloat32(float32(v)), nil, nil
} else {
return pref.ValueOfFloat64(float64(v)), nil, nil
}
}
case pref.StringKind:
// String values are already unescaped and can be used as is.
return pref.ValueOfString(s), nil, nil
case pref.BytesKind:
if b, ok := unmarshalBytes(s); ok {
return pref.ValueOfBytes(b), nil, nil
}
}
return pref.Value{}, nil, errors.New("could not parse value for %v: %q", k, s)
}
// Marshal serializes v as the default string according to the given kind k.
// When specifying the Descriptor format for an enum kind, the associated
// enum value descriptor must be provided.
func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) (string, error) {
switch k {
case pref.BoolKind:
if f == GoTag {
if v.Bool() {
return "1", nil
} else {
return "0", nil
}
} else {
if v.Bool() {
return "true", nil
} else {
return "false", nil
}
}
case pref.EnumKind:
if f == GoTag {
return strconv.FormatInt(int64(v.Enum()), 10), nil
} else {
return string(ev.Name()), nil
}
case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind, pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
return strconv.FormatInt(v.Int(), 10), nil
case pref.Uint32Kind, pref.Fixed32Kind, pref.Uint64Kind, pref.Fixed64Kind:
return strconv.FormatUint(v.Uint(), 10), nil
case pref.FloatKind, pref.DoubleKind:
f := v.Float()
switch {
case math.IsInf(f, -1):
return "-inf", nil
case math.IsInf(f, +1):
return "inf", nil
case math.IsNaN(f):
return "nan", nil
default:
if k == pref.FloatKind {
return strconv.FormatFloat(f, 'g', -1, 32), nil
} else {
return strconv.FormatFloat(f, 'g', -1, 64), nil
}
}
case pref.StringKind:
// String values are serialized as is without any escaping.
return v.String(), nil
case pref.BytesKind:
if s, ok := marshalBytes(v.Bytes()); ok {
return s, nil
}
}
return "", errors.New("could not format value for %v: %v", k, v)
}
// unmarshalBytes deserializes bytes by applying C unescaping.
func unmarshalBytes(s string) ([]byte, bool) {
// Bytes values use the same escaping as the text format,
// however they lack the surrounding double quotes.
v, err := ptext.UnmarshalString(`"` + s + `"`)
if err != nil {
return nil, false
}
return []byte(v), true
}
// marshalBytes serializes bytes by using C escaping.
// To match the exact output of protoc, this is identical to the
// CEscape function in strutil.cc of the protoc source code.
func marshalBytes(b []byte) (string, bool) {
var s []byte
for _, c := range b {
switch c {
case '\n':
s = append(s, `\n`...)
case '\r':
s = append(s, `\r`...)
case '\t':
s = append(s, `\t`...)
case '"':
s = append(s, `\"`...)
case '\'':
s = append(s, `\'`...)
case '\\':
s = append(s, `\\`...)
default:
if printableASCII := c >= 0x20 && c <= 0x7e; printableASCII {
s = append(s, c)
} else {
s = append(s, fmt.Sprintf(`\%03o`, c)...)
}
}
}
return string(s), true
}

View file

@ -0,0 +1,258 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package messageset encodes and decodes the obsolete MessageSet wire format.
package messageset
import (
"math"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/errors"
pref "google.golang.org/protobuf/reflect/protoreflect"
preg "google.golang.org/protobuf/reflect/protoregistry"
)
// The MessageSet wire format is equivalent to a message defiend as follows,
// where each Item defines an extension field with a field number of 'type_id'
// and content of 'message'. MessageSet extensions must be non-repeated message
// fields.
//
// message MessageSet {
// repeated group Item = 1 {
// required int32 type_id = 2;
// required string message = 3;
// }
// }
const (
FieldItem = protowire.Number(1)
FieldTypeID = protowire.Number(2)
FieldMessage = protowire.Number(3)
)
// ExtensionName is the field name for extensions of MessageSet.
//
// A valid MessageSet extension must be of the form:
// message MyMessage {
// extend proto2.bridge.MessageSet {
// optional MyMessage message_set_extension = 1234;
// }
// ...
// }
const ExtensionName = "message_set_extension"
// IsMessageSet returns whether the message uses the MessageSet wire format.
func IsMessageSet(md pref.MessageDescriptor) bool {
xmd, ok := md.(interface{ IsMessageSet() bool })
return ok && xmd.IsMessageSet()
}
// IsMessageSetExtension reports this field extends a MessageSet.
func IsMessageSetExtension(fd pref.FieldDescriptor) bool {
if fd.Name() != ExtensionName {
return false
}
if fd.FullName().Parent() != fd.Message().FullName() {
return false
}
return IsMessageSet(fd.ContainingMessage())
}
// FindMessageSetExtension locates a MessageSet extension field by name.
// In text and JSON formats, the extension name used is the message itself.
// The extension field name is derived by appending ExtensionName.
func FindMessageSetExtension(r preg.ExtensionTypeResolver, s pref.FullName) (pref.ExtensionType, error) {
name := s.Append(ExtensionName)
xt, err := r.FindExtensionByName(name)
if err != nil {
if err == preg.NotFound {
return nil, err
}
return nil, errors.Wrap(err, "%q", name)
}
if !IsMessageSetExtension(xt.TypeDescriptor()) {
return nil, preg.NotFound
}
return xt, nil
}
// SizeField returns the size of a MessageSet item field containing an extension
// with the given field number, not counting the contents of the message subfield.
func SizeField(num protowire.Number) int {
return 2*protowire.SizeTag(FieldItem) + protowire.SizeTag(FieldTypeID) + protowire.SizeVarint(uint64(num))
}
// Unmarshal parses a MessageSet.
//
// It calls fn with the type ID and value of each item in the MessageSet.
// Unknown fields are discarded.
//
// If wantLen is true, the item values include the varint length prefix.
// This is ugly, but simplifies the fast-path decoder in internal/impl.
func Unmarshal(b []byte, wantLen bool, fn func(typeID protowire.Number, value []byte) error) error {
for len(b) > 0 {
num, wtyp, n := protowire.ConsumeTag(b)
if n < 0 {
return protowire.ParseError(n)
}
b = b[n:]
if num != FieldItem || wtyp != protowire.StartGroupType {
n := protowire.ConsumeFieldValue(num, wtyp, b)
if n < 0 {
return protowire.ParseError(n)
}
b = b[n:]
continue
}
typeID, value, n, err := ConsumeFieldValue(b, wantLen)
if err != nil {
return err
}
b = b[n:]
if typeID == 0 {
continue
}
if err := fn(typeID, value); err != nil {
return err
}
}
return nil
}
// ConsumeFieldValue parses b as a MessageSet item field value until and including
// the trailing end group marker. It assumes the start group tag has already been parsed.
// It returns the contents of the type_id and message subfields and the total
// item length.
//
// If wantLen is true, the returned message value includes the length prefix.
func ConsumeFieldValue(b []byte, wantLen bool) (typeid protowire.Number, message []byte, n int, err error) {
ilen := len(b)
for {
num, wtyp, n := protowire.ConsumeTag(b)
if n < 0 {
return 0, nil, 0, protowire.ParseError(n)
}
b = b[n:]
switch {
case num == FieldItem && wtyp == protowire.EndGroupType:
if wantLen && len(message) == 0 {
// The message field was missing, which should never happen.
// Be prepared for this case anyway.
message = protowire.AppendVarint(message, 0)
}
return typeid, message, ilen - len(b), nil
case num == FieldTypeID && wtyp == protowire.VarintType:
v, n := protowire.ConsumeVarint(b)
if n < 0 {
return 0, nil, 0, protowire.ParseError(n)
}
b = b[n:]
if v < 1 || v > math.MaxInt32 {
return 0, nil, 0, errors.New("invalid type_id in message set")
}
typeid = protowire.Number(v)
case num == FieldMessage && wtyp == protowire.BytesType:
m, n := protowire.ConsumeBytes(b)
if n < 0 {
return 0, nil, 0, protowire.ParseError(n)
}
if message == nil {
if wantLen {
message = b[:n:n]
} else {
message = m[:len(m):len(m)]
}
} else {
// This case should never happen in practice, but handle it for
// correctness: The MessageSet item contains multiple message
// fields, which need to be merged.
//
// In the case where we're returning the length, this becomes
// quite inefficient since we need to strip the length off
// the existing data and reconstruct it with the combined length.
if wantLen {
_, nn := protowire.ConsumeVarint(message)
m0 := message[nn:]
message = nil
message = protowire.AppendVarint(message, uint64(len(m0)+len(m)))
message = append(message, m0...)
message = append(message, m...)
} else {
message = append(message, m...)
}
}
b = b[n:]
default:
// We have no place to put it, so we just ignore unknown fields.
n := protowire.ConsumeFieldValue(num, wtyp, b)
if n < 0 {
return 0, nil, 0, protowire.ParseError(n)
}
b = b[n:]
}
}
}
// AppendFieldStart appends the start of a MessageSet item field containing
// an extension with the given number. The caller must add the message
// subfield (including the tag).
func AppendFieldStart(b []byte, num protowire.Number) []byte {
b = protowire.AppendTag(b, FieldItem, protowire.StartGroupType)
b = protowire.AppendTag(b, FieldTypeID, protowire.VarintType)
b = protowire.AppendVarint(b, uint64(num))
return b
}
// AppendFieldEnd appends the trailing end group marker for a MessageSet item field.
func AppendFieldEnd(b []byte) []byte {
return protowire.AppendTag(b, FieldItem, protowire.EndGroupType)
}
// SizeUnknown returns the size of an unknown fields section in MessageSet format.
//
// See AppendUnknown.
func SizeUnknown(unknown []byte) (size int) {
for len(unknown) > 0 {
num, typ, n := protowire.ConsumeTag(unknown)
if n < 0 || typ != protowire.BytesType {
return 0
}
unknown = unknown[n:]
_, n = protowire.ConsumeBytes(unknown)
if n < 0 {
return 0
}
unknown = unknown[n:]
size += SizeField(num) + protowire.SizeTag(FieldMessage) + n
}
return size
}
// AppendUnknown appends unknown fields to b in MessageSet format.
//
// For historic reasons, unresolved items in a MessageSet are stored in a
// message's unknown fields section in non-MessageSet format. That is, an
// unknown item with typeID T and value V appears in the unknown fields as
// a field with number T and value V.
//
// This function converts the unknown fields back into MessageSet form.
func AppendUnknown(b, unknown []byte) ([]byte, error) {
for len(unknown) > 0 {
num, typ, n := protowire.ConsumeTag(unknown)
if n < 0 || typ != protowire.BytesType {
return nil, errors.New("invalid data in message set unknown fields")
}
unknown = unknown[n:]
_, n = protowire.ConsumeBytes(unknown)
if n < 0 {
return nil, errors.New("invalid data in message set unknown fields")
}
b = AppendFieldStart(b, num)
b = protowire.AppendTag(b, FieldMessage, protowire.BytesType)
b = append(b, unknown[:n]...)
b = AppendFieldEnd(b)
unknown = unknown[n:]
}
return b, nil
}

View file

@ -0,0 +1,207 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package tag marshals and unmarshals the legacy struct tags as generated
// by historical versions of protoc-gen-go.
package tag
import (
"reflect"
"strconv"
"strings"
defval "google.golang.org/protobuf/internal/encoding/defval"
fdesc "google.golang.org/protobuf/internal/filedesc"
"google.golang.org/protobuf/internal/strs"
pref "google.golang.org/protobuf/reflect/protoreflect"
)
var byteType = reflect.TypeOf(byte(0))
// Unmarshal decodes the tag into a prototype.Field.
//
// The goType is needed to determine the original protoreflect.Kind since the
// tag does not record sufficient information to determine that.
// The type is the underlying field type (e.g., a repeated field may be
// represented by []T, but the Go type passed in is just T).
// A list of enum value descriptors must be provided for enum fields.
// This does not populate the Enum or Message (except for weak message).
//
// This function is a best effort attempt; parsing errors are ignored.
func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) pref.FieldDescriptor {
f := new(fdesc.Field)
f.L0.ParentFile = fdesc.SurrogateProto2
for len(tag) > 0 {
i := strings.IndexByte(tag, ',')
if i < 0 {
i = len(tag)
}
switch s := tag[:i]; {
case strings.HasPrefix(s, "name="):
f.L0.FullName = pref.FullName(s[len("name="):])
case strings.Trim(s, "0123456789") == "":
n, _ := strconv.ParseUint(s, 10, 32)
f.L1.Number = pref.FieldNumber(n)
case s == "opt":
f.L1.Cardinality = pref.Optional
case s == "req":
f.L1.Cardinality = pref.Required
case s == "rep":
f.L1.Cardinality = pref.Repeated
case s == "varint":
switch goType.Kind() {
case reflect.Bool:
f.L1.Kind = pref.BoolKind
case reflect.Int32:
f.L1.Kind = pref.Int32Kind
case reflect.Int64:
f.L1.Kind = pref.Int64Kind
case reflect.Uint32:
f.L1.Kind = pref.Uint32Kind
case reflect.Uint64:
f.L1.Kind = pref.Uint64Kind
}
case s == "zigzag32":
if goType.Kind() == reflect.Int32 {
f.L1.Kind = pref.Sint32Kind
}
case s == "zigzag64":
if goType.Kind() == reflect.Int64 {
f.L1.Kind = pref.Sint64Kind
}
case s == "fixed32":
switch goType.Kind() {
case reflect.Int32:
f.L1.Kind = pref.Sfixed32Kind
case reflect.Uint32:
f.L1.Kind = pref.Fixed32Kind
case reflect.Float32:
f.L1.Kind = pref.FloatKind
}
case s == "fixed64":
switch goType.Kind() {
case reflect.Int64:
f.L1.Kind = pref.Sfixed64Kind
case reflect.Uint64:
f.L1.Kind = pref.Fixed64Kind
case reflect.Float64:
f.L1.Kind = pref.DoubleKind
}
case s == "bytes":
switch {
case goType.Kind() == reflect.String:
f.L1.Kind = pref.StringKind
case goType.Kind() == reflect.Slice && goType.Elem() == byteType:
f.L1.Kind = pref.BytesKind
default:
f.L1.Kind = pref.MessageKind
}
case s == "group":
f.L1.Kind = pref.GroupKind
case strings.HasPrefix(s, "enum="):
f.L1.Kind = pref.EnumKind
case strings.HasPrefix(s, "json="):
jsonName := s[len("json="):]
if jsonName != strs.JSONCamelCase(string(f.L0.FullName.Name())) {
f.L1.JSONName.Init(jsonName)
}
case s == "packed":
f.L1.HasPacked = true
f.L1.IsPacked = true
case strings.HasPrefix(s, "weak="):
f.L1.IsWeak = true
f.L1.Message = fdesc.PlaceholderMessage(pref.FullName(s[len("weak="):]))
case strings.HasPrefix(s, "def="):
// The default tag is special in that everything afterwards is the
// default regardless of the presence of commas.
s, i = tag[len("def="):], len(tag)
v, ev, _ := defval.Unmarshal(s, f.L1.Kind, evs, defval.GoTag)
f.L1.Default = fdesc.DefaultValue(v, ev)
case s == "proto3":
f.L0.ParentFile = fdesc.SurrogateProto3
}
tag = strings.TrimPrefix(tag[i:], ",")
}
// The generator uses the group message name instead of the field name.
// We obtain the real field name by lowercasing the group name.
if f.L1.Kind == pref.GroupKind {
f.L0.FullName = pref.FullName(strings.ToLower(string(f.L0.FullName)))
}
return f
}
// Marshal encodes the protoreflect.FieldDescriptor as a tag.
//
// The enumName must be provided if the kind is an enum.
// Historically, the formulation of the enum "name" was the proto package
// dot-concatenated with the generated Go identifier for the enum type.
// Depending on the context on how Marshal is called, there are different ways
// through which that information is determined. As such it is the caller's
// responsibility to provide a function to obtain that information.
func Marshal(fd pref.FieldDescriptor, enumName string) string {
var tag []string
switch fd.Kind() {
case pref.BoolKind, pref.EnumKind, pref.Int32Kind, pref.Uint32Kind, pref.Int64Kind, pref.Uint64Kind:
tag = append(tag, "varint")
case pref.Sint32Kind:
tag = append(tag, "zigzag32")
case pref.Sint64Kind:
tag = append(tag, "zigzag64")
case pref.Sfixed32Kind, pref.Fixed32Kind, pref.FloatKind:
tag = append(tag, "fixed32")
case pref.Sfixed64Kind, pref.Fixed64Kind, pref.DoubleKind:
tag = append(tag, "fixed64")
case pref.StringKind, pref.BytesKind, pref.MessageKind:
tag = append(tag, "bytes")
case pref.GroupKind:
tag = append(tag, "group")
}
tag = append(tag, strconv.Itoa(int(fd.Number())))
switch fd.Cardinality() {
case pref.Optional:
tag = append(tag, "opt")
case pref.Required:
tag = append(tag, "req")
case pref.Repeated:
tag = append(tag, "rep")
}
if fd.IsPacked() {
tag = append(tag, "packed")
}
name := string(fd.Name())
if fd.Kind() == pref.GroupKind {
// The name of the FieldDescriptor for a group field is
// lowercased. To find the original capitalization, we
// look in the field's MessageType.
name = string(fd.Message().Name())
}
tag = append(tag, "name="+name)
if jsonName := fd.JSONName(); jsonName != "" && jsonName != name && !fd.IsExtension() {
// NOTE: The jsonName != name condition is suspect, but it preserve
// the exact same semantics from the previous generator.
tag = append(tag, "json="+jsonName)
}
if fd.IsWeak() {
tag = append(tag, "weak="+string(fd.Message().FullName()))
}
// The previous implementation does not tag extension fields as proto3,
// even when the field is defined in a proto3 file. Match that behavior
// for consistency.
if fd.Syntax() == pref.Proto3 && !fd.IsExtension() {
tag = append(tag, "proto3")
}
if fd.Kind() == pref.EnumKind && enumName != "" {
tag = append(tag, "enum="+enumName)
}
if fd.ContainingOneof() != nil {
tag = append(tag, "oneof")
}
// This must appear last in the tag, since commas in strings aren't escaped.
if fd.HasDefault() {
def, _ := defval.Marshal(fd.Default(), fd.DefaultEnumValue(), fd.Kind(), defval.GoTag)
tag = append(tag, "def="+def)
}
return strings.Join(tag, ",")
}

View file

@ -0,0 +1,665 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package text
import (
"bytes"
"fmt"
"io"
"regexp"
"strconv"
"unicode/utf8"
"google.golang.org/protobuf/internal/errors"
)
// Decoder is a token-based textproto decoder.
type Decoder struct {
// lastCall is last method called, either readCall or peekCall.
// Initial value is readCall.
lastCall call
// lastToken contains the last read token.
lastToken Token
// lastErr contains the last read error.
lastErr error
// openStack is a stack containing the byte characters for MessageOpen and
// ListOpen kinds. The top of stack represents the message or the list that
// the current token is nested in. An empty stack means the current token is
// at the top level message. The characters '{' and '<' both represent the
// MessageOpen kind.
openStack []byte
// orig is used in reporting line and column.
orig []byte
// in contains the unconsumed input.
in []byte
}
// NewDecoder returns a Decoder to read the given []byte.
func NewDecoder(b []byte) *Decoder {
return &Decoder{orig: b, in: b}
}
// ErrUnexpectedEOF means that EOF was encountered in the middle of the input.
var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF)
// call specifies which Decoder method was invoked.
type call uint8
const (
readCall call = iota
peekCall
)
// Peek looks ahead and returns the next token and error without advancing a read.
func (d *Decoder) Peek() (Token, error) {
defer func() { d.lastCall = peekCall }()
if d.lastCall == readCall {
d.lastToken, d.lastErr = d.Read()
}
return d.lastToken, d.lastErr
}
// Read returns the next token.
// It will return an error if there is no valid token.
func (d *Decoder) Read() (Token, error) {
defer func() { d.lastCall = readCall }()
if d.lastCall == peekCall {
return d.lastToken, d.lastErr
}
tok, err := d.parseNext(d.lastToken.kind)
if err != nil {
return Token{}, err
}
switch tok.kind {
case comma, semicolon:
tok, err = d.parseNext(tok.kind)
if err != nil {
return Token{}, err
}
}
d.lastToken = tok
return tok, nil
}
const (
mismatchedFmt = "mismatched close character %q"
unexpectedFmt = "unexpected character %q"
)
// parseNext parses the next Token based on given last kind.
func (d *Decoder) parseNext(lastKind Kind) (Token, error) {
// Trim leading spaces.
d.consume(0)
isEOF := false
if len(d.in) == 0 {
isEOF = true
}
switch lastKind {
case EOF:
return d.consumeToken(EOF, 0, 0), nil
case bof:
// Start of top level message. Next token can be EOF or Name.
if isEOF {
return d.consumeToken(EOF, 0, 0), nil
}
return d.parseFieldName()
case Name:
// Next token can be MessageOpen, ListOpen or Scalar.
if isEOF {
return Token{}, ErrUnexpectedEOF
}
switch ch := d.in[0]; ch {
case '{', '<':
d.pushOpenStack(ch)
return d.consumeToken(MessageOpen, 1, 0), nil
case '[':
d.pushOpenStack(ch)
return d.consumeToken(ListOpen, 1, 0), nil
default:
return d.parseScalar()
}
case Scalar:
openKind, closeCh := d.currentOpenKind()
switch openKind {
case bof:
// Top level message.
// Next token can be EOF, comma, semicolon or Name.
if isEOF {
return d.consumeToken(EOF, 0, 0), nil
}
switch d.in[0] {
case ',':
return d.consumeToken(comma, 1, 0), nil
case ';':
return d.consumeToken(semicolon, 1, 0), nil
default:
return d.parseFieldName()
}
case MessageOpen:
// Next token can be MessageClose, comma, semicolon or Name.
if isEOF {
return Token{}, ErrUnexpectedEOF
}
switch ch := d.in[0]; ch {
case closeCh:
d.popOpenStack()
return d.consumeToken(MessageClose, 1, 0), nil
case otherCloseChar[closeCh]:
return Token{}, d.newSyntaxError(mismatchedFmt, ch)
case ',':
return d.consumeToken(comma, 1, 0), nil
case ';':
return d.consumeToken(semicolon, 1, 0), nil
default:
return d.parseFieldName()
}
case ListOpen:
// Next token can be ListClose or comma.
if isEOF {
return Token{}, ErrUnexpectedEOF
}
switch ch := d.in[0]; ch {
case ']':
d.popOpenStack()
return d.consumeToken(ListClose, 1, 0), nil
case ',':
return d.consumeToken(comma, 1, 0), nil
default:
return Token{}, d.newSyntaxError(unexpectedFmt, ch)
}
}
case MessageOpen:
// Next token can be MessageClose or Name.
if isEOF {
return Token{}, ErrUnexpectedEOF
}
_, closeCh := d.currentOpenKind()
switch ch := d.in[0]; ch {
case closeCh:
d.popOpenStack()
return d.consumeToken(MessageClose, 1, 0), nil
case otherCloseChar[closeCh]:
return Token{}, d.newSyntaxError(mismatchedFmt, ch)
default:
return d.parseFieldName()
}
case MessageClose:
openKind, closeCh := d.currentOpenKind()
switch openKind {
case bof:
// Top level message.
// Next token can be EOF, comma, semicolon or Name.
if isEOF {
return d.consumeToken(EOF, 0, 0), nil
}
switch ch := d.in[0]; ch {
case ',':
return d.consumeToken(comma, 1, 0), nil
case ';':
return d.consumeToken(semicolon, 1, 0), nil
default:
return d.parseFieldName()
}
case MessageOpen:
// Next token can be MessageClose, comma, semicolon or Name.
if isEOF {
return Token{}, ErrUnexpectedEOF
}
switch ch := d.in[0]; ch {
case closeCh:
d.popOpenStack()
return d.consumeToken(MessageClose, 1, 0), nil
case otherCloseChar[closeCh]:
return Token{}, d.newSyntaxError(mismatchedFmt, ch)
case ',':
return d.consumeToken(comma, 1, 0), nil
case ';':
return d.consumeToken(semicolon, 1, 0), nil
default:
return d.parseFieldName()
}
case ListOpen:
// Next token can be ListClose or comma
if isEOF {
return Token{}, ErrUnexpectedEOF
}
switch ch := d.in[0]; ch {
case closeCh:
d.popOpenStack()
return d.consumeToken(ListClose, 1, 0), nil
case ',':
return d.consumeToken(comma, 1, 0), nil
default:
return Token{}, d.newSyntaxError(unexpectedFmt, ch)
}
}
case ListOpen:
// Next token can be ListClose, MessageStart or Scalar.
if isEOF {
return Token{}, ErrUnexpectedEOF
}
switch ch := d.in[0]; ch {
case ']':
d.popOpenStack()
return d.consumeToken(ListClose, 1, 0), nil
case '{', '<':
d.pushOpenStack(ch)
return d.consumeToken(MessageOpen, 1, 0), nil
default:
return d.parseScalar()
}
case ListClose:
openKind, closeCh := d.currentOpenKind()
switch openKind {
case bof:
// Top level message.
// Next token can be EOF, comma, semicolon or Name.
if isEOF {
return d.consumeToken(EOF, 0, 0), nil
}
switch ch := d.in[0]; ch {
case ',':
return d.consumeToken(comma, 1, 0), nil
case ';':
return d.consumeToken(semicolon, 1, 0), nil
default:
return d.parseFieldName()
}
case MessageOpen:
// Next token can be MessageClose, comma, semicolon or Name.
if isEOF {
return Token{}, ErrUnexpectedEOF
}
switch ch := d.in[0]; ch {
case closeCh:
d.popOpenStack()
return d.consumeToken(MessageClose, 1, 0), nil
case otherCloseChar[closeCh]:
return Token{}, d.newSyntaxError(mismatchedFmt, ch)
case ',':
return d.consumeToken(comma, 1, 0), nil
case ';':
return d.consumeToken(semicolon, 1, 0), nil
default:
return d.parseFieldName()
}
default:
// It is not possible to have this case. Let it panic below.
}
case comma, semicolon:
openKind, closeCh := d.currentOpenKind()
switch openKind {
case bof:
// Top level message. Next token can be EOF or Name.
if isEOF {
return d.consumeToken(EOF, 0, 0), nil
}
return d.parseFieldName()
case MessageOpen:
// Next token can be MessageClose or Name.
if isEOF {
return Token{}, ErrUnexpectedEOF
}
switch ch := d.in[0]; ch {
case closeCh:
d.popOpenStack()
return d.consumeToken(MessageClose, 1, 0), nil
case otherCloseChar[closeCh]:
return Token{}, d.newSyntaxError(mismatchedFmt, ch)
default:
return d.parseFieldName()
}
case ListOpen:
if lastKind == semicolon {
// It is not be possible to have this case as logic here
// should not have produced a semicolon Token when inside a
// list. Let it panic below.
break
}
// Next token can be MessageOpen or Scalar.
if isEOF {
return Token{}, ErrUnexpectedEOF
}
switch ch := d.in[0]; ch {
case '{', '<':
d.pushOpenStack(ch)
return d.consumeToken(MessageOpen, 1, 0), nil
default:
return d.parseScalar()
}
}
}
line, column := d.Position(len(d.orig) - len(d.in))
panic(fmt.Sprintf("Decoder.parseNext: bug at handling line %d:%d with lastKind=%v", line, column, lastKind))
}
var otherCloseChar = map[byte]byte{
'}': '>',
'>': '}',
}
// currentOpenKind indicates whether current position is inside a message, list
// or top-level message by returning MessageOpen, ListOpen or bof respectively.
// If the returned kind is either a MessageOpen or ListOpen, it also returns the
// corresponding closing character.
func (d *Decoder) currentOpenKind() (Kind, byte) {
if len(d.openStack) == 0 {
return bof, 0
}
openCh := d.openStack[len(d.openStack)-1]
switch openCh {
case '{':
return MessageOpen, '}'
case '<':
return MessageOpen, '>'
case '[':
return ListOpen, ']'
}
panic(fmt.Sprintf("Decoder: openStack contains invalid byte %s", string(openCh)))
}
func (d *Decoder) pushOpenStack(ch byte) {
d.openStack = append(d.openStack, ch)
}
func (d *Decoder) popOpenStack() {
d.openStack = d.openStack[:len(d.openStack)-1]
}
// parseFieldName parses field name and separator.
func (d *Decoder) parseFieldName() (tok Token, err error) {
defer func() {
if err == nil && d.tryConsumeChar(':') {
tok.attrs |= hasSeparator
}
}()
// Extension or Any type URL.
if d.in[0] == '[' {
return d.parseTypeName()
}
// Identifier.
if size := parseIdent(d.in, false); size > 0 {
return d.consumeToken(Name, size, uint8(IdentName)), nil
}
// Field number. Identify if input is a valid number that is not negative
// and is decimal integer within 32-bit range.
if num := parseNumber(d.in); num.size > 0 {
if !num.neg && num.kind == numDec {
if _, err := strconv.ParseInt(string(d.in[:num.size]), 10, 32); err == nil {
return d.consumeToken(Name, num.size, uint8(FieldNumber)), nil
}
}
return Token{}, d.newSyntaxError("invalid field number: %s", d.in[:num.size])
}
return Token{}, d.newSyntaxError("invalid field name: %s", errRegexp.Find(d.in))
}
// parseTypeName parses Any type URL or extension field name. The name is
// enclosed in [ and ] characters. The C++ parser does not handle many legal URL
// strings. This implementation is more liberal and allows for the pattern
// ^[-_a-zA-Z0-9]+([./][-_a-zA-Z0-9]+)*`). Whitespaces and comments are allowed
// in between [ ], '.', '/' and the sub names.
func (d *Decoder) parseTypeName() (Token, error) {
startPos := len(d.orig) - len(d.in)
// Use alias s to advance first in order to use d.in for error handling.
// Caller already checks for [ as first character.
s := consume(d.in[1:], 0)
if len(s) == 0 {
return Token{}, ErrUnexpectedEOF
}
var name []byte
for len(s) > 0 && isTypeNameChar(s[0]) {
name = append(name, s[0])
s = s[1:]
}
s = consume(s, 0)
var closed bool
for len(s) > 0 && !closed {
switch {
case s[0] == ']':
s = s[1:]
closed = true
case s[0] == '/', s[0] == '.':
if len(name) > 0 && (name[len(name)-1] == '/' || name[len(name)-1] == '.') {
return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s",
d.orig[startPos:len(d.orig)-len(s)+1])
}
name = append(name, s[0])
s = s[1:]
s = consume(s, 0)
for len(s) > 0 && isTypeNameChar(s[0]) {
name = append(name, s[0])
s = s[1:]
}
s = consume(s, 0)
default:
return Token{}, d.newSyntaxError(
"invalid type URL/extension field name: %s", d.orig[startPos:len(d.orig)-len(s)+1])
}
}
if !closed {
return Token{}, ErrUnexpectedEOF
}
// First character cannot be '.'. Last character cannot be '.' or '/'.
size := len(name)
if size == 0 || name[0] == '.' || name[size-1] == '.' || name[size-1] == '/' {
return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s",
d.orig[startPos:len(d.orig)-len(s)])
}
d.in = s
endPos := len(d.orig) - len(d.in)
d.consume(0)
return Token{
kind: Name,
attrs: uint8(TypeName),
pos: startPos,
raw: d.orig[startPos:endPos],
str: string(name),
}, nil
}
func isTypeNameChar(b byte) bool {
return (b == '-' || b == '_' ||
('0' <= b && b <= '9') ||
('a' <= b && b <= 'z') ||
('A' <= b && b <= 'Z'))
}
func isWhiteSpace(b byte) bool {
switch b {
case ' ', '\n', '\r', '\t':
return true
default:
return false
}
}
// parseIdent parses an unquoted proto identifier and returns size.
// If allowNeg is true, it allows '-' to be the first character in the
// identifier. This is used when parsing literal values like -infinity, etc.
// Regular expression matches an identifier: `^[_a-zA-Z][_a-zA-Z0-9]*`
func parseIdent(input []byte, allowNeg bool) int {
var size int
s := input
if len(s) == 0 {
return 0
}
if allowNeg && s[0] == '-' {
s = s[1:]
size++
if len(s) == 0 {
return 0
}
}
switch {
case s[0] == '_',
'a' <= s[0] && s[0] <= 'z',
'A' <= s[0] && s[0] <= 'Z':
s = s[1:]
size++
default:
return 0
}
for len(s) > 0 && (s[0] == '_' ||
'a' <= s[0] && s[0] <= 'z' ||
'A' <= s[0] && s[0] <= 'Z' ||
'0' <= s[0] && s[0] <= '9') {
s = s[1:]
size++
}
if len(s) > 0 && !isDelim(s[0]) {
return 0
}
return size
}
// parseScalar parses for a string, literal or number value.
func (d *Decoder) parseScalar() (Token, error) {
if d.in[0] == '"' || d.in[0] == '\'' {
return d.parseStringValue()
}
if tok, ok := d.parseLiteralValue(); ok {
return tok, nil
}
if tok, ok := d.parseNumberValue(); ok {
return tok, nil
}
return Token{}, d.newSyntaxError("invalid scalar value: %s", errRegexp.Find(d.in))
}
// parseLiteralValue parses a literal value. A literal value is used for
// bools, special floats and enums. This function simply identifies that the
// field value is a literal.
func (d *Decoder) parseLiteralValue() (Token, bool) {
size := parseIdent(d.in, true)
if size == 0 {
return Token{}, false
}
return d.consumeToken(Scalar, size, literalValue), true
}
// consumeToken constructs a Token for given Kind from d.in and consumes given
// size-length from it.
func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token {
// Important to compute raw and pos before consuming.
tok := Token{
kind: kind,
attrs: attrs,
pos: len(d.orig) - len(d.in),
raw: d.in[:size],
}
d.consume(size)
return tok
}
// newSyntaxError returns a syntax error with line and column information for
// current position.
func (d *Decoder) newSyntaxError(f string, x ...interface{}) error {
e := errors.New(f, x...)
line, column := d.Position(len(d.orig) - len(d.in))
return errors.New("syntax error (line %d:%d): %v", line, column, e)
}
// Position returns line and column number of given index of the original input.
// It will panic if index is out of range.
func (d *Decoder) Position(idx int) (line int, column int) {
b := d.orig[:idx]
line = bytes.Count(b, []byte("\n")) + 1
if i := bytes.LastIndexByte(b, '\n'); i >= 0 {
b = b[i+1:]
}
column = utf8.RuneCount(b) + 1 // ignore multi-rune characters
return line, column
}
func (d *Decoder) tryConsumeChar(c byte) bool {
if len(d.in) > 0 && d.in[0] == c {
d.consume(1)
return true
}
return false
}
// consume consumes n bytes of input and any subsequent whitespace or comments.
func (d *Decoder) consume(n int) {
d.in = consume(d.in, n)
return
}
// consume consumes n bytes of input and any subsequent whitespace or comments.
func consume(b []byte, n int) []byte {
b = b[n:]
for len(b) > 0 {
switch b[0] {
case ' ', '\n', '\r', '\t':
b = b[1:]
case '#':
if i := bytes.IndexByte(b, '\n'); i >= 0 {
b = b[i+len("\n"):]
} else {
b = nil
}
default:
return b
}
}
return b
}
// Any sequence that looks like a non-delimiter (for error reporting).
var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9\/]+|.)`)
// isDelim returns true if given byte is a delimiter character.
func isDelim(c byte) bool {
return !(c == '-' || c == '+' || c == '.' || c == '_' ||
('a' <= c && c <= 'z') ||
('A' <= c && c <= 'Z') ||
('0' <= c && c <= '9'))
}

View file

@ -0,0 +1,190 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package text
// parseNumberValue parses a number from the input and returns a Token object.
func (d *Decoder) parseNumberValue() (Token, bool) {
in := d.in
num := parseNumber(in)
if num.size == 0 {
return Token{}, false
}
numAttrs := num.kind
if num.neg {
numAttrs |= isNegative
}
strSize := num.size
last := num.size - 1
if num.kind == numFloat && (d.in[last] == 'f' || d.in[last] == 'F') {
strSize = last
}
tok := Token{
kind: Scalar,
attrs: numberValue,
pos: len(d.orig) - len(d.in),
raw: d.in[:num.size],
str: string(d.in[:strSize]),
numAttrs: numAttrs,
}
d.consume(num.size)
return tok, true
}
const (
numDec uint8 = (1 << iota) / 2
numHex
numOct
numFloat
)
// number is the result of parsing out a valid number from parseNumber. It
// contains data for doing float or integer conversion via the strconv package
// in conjunction with the input bytes.
type number struct {
kind uint8
neg bool
size int
}
// parseNumber constructs a number object from given input. It allows for the
// following patterns:
// integer: ^-?([1-9][0-9]*|0[xX][0-9a-fA-F]+|0[0-7]*)
// float: ^-?((0|[1-9][0-9]*)?([.][0-9]*)?([eE][+-]?[0-9]+)?[fF]?)
// It also returns the number of parsed bytes for the given number, 0 if it is
// not a number.
func parseNumber(input []byte) number {
kind := numDec
var size int
var neg bool
s := input
if len(s) == 0 {
return number{}
}
// Optional -
if s[0] == '-' {
neg = true
s = s[1:]
size++
if len(s) == 0 {
return number{}
}
}
// C++ allows for whitespace and comments in between the negative sign and
// the rest of the number. This logic currently does not but is consistent
// with v1.
switch {
case s[0] == '0':
if len(s) > 1 {
switch {
case s[1] == 'x' || s[1] == 'X':
// Parse as hex number.
kind = numHex
n := 2
s = s[2:]
for len(s) > 0 && (('0' <= s[0] && s[0] <= '9') ||
('a' <= s[0] && s[0] <= 'f') ||
('A' <= s[0] && s[0] <= 'F')) {
s = s[1:]
n++
}
if n == 2 {
return number{}
}
size += n
case '0' <= s[1] && s[1] <= '7':
// Parse as octal number.
kind = numOct
n := 2
s = s[2:]
for len(s) > 0 && '0' <= s[0] && s[0] <= '7' {
s = s[1:]
n++
}
size += n
}
if kind&(numHex|numOct) > 0 {
if len(s) > 0 && !isDelim(s[0]) {
return number{}
}
return number{kind: kind, neg: neg, size: size}
}
}
s = s[1:]
size++
case '1' <= s[0] && s[0] <= '9':
n := 1
s = s[1:]
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
s = s[1:]
n++
}
size += n
case s[0] == '.':
// Set kind to numFloat to signify the intent to parse as float. And
// that it needs to have other digits after '.'.
kind = numFloat
default:
return number{}
}
// . followed by 0 or more digits.
if len(s) > 0 && s[0] == '.' {
n := 1
s = s[1:]
// If decimal point was before any digits, it should be followed by
// other digits.
if len(s) == 0 && kind == numFloat {
return number{}
}
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
s = s[1:]
n++
}
size += n
kind = numFloat
}
// e or E followed by an optional - or + and 1 or more digits.
if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
kind = numFloat
s = s[1:]
n := 1
if s[0] == '+' || s[0] == '-' {
s = s[1:]
n++
if len(s) == 0 {
return number{}
}
}
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
s = s[1:]
n++
}
size += n
}
// Optional suffix f or F for floats.
if len(s) > 0 && (s[0] == 'f' || s[0] == 'F') {
kind = numFloat
s = s[1:]
size++
}
// Check that next byte is a delimiter or it is at the end.
if len(s) > 0 && !isDelim(s[0]) {
return number{}
}
return number{kind: kind, neg: neg, size: size}
}

View file

@ -0,0 +1,161 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package text
import (
"bytes"
"strconv"
"strings"
"unicode"
"unicode/utf16"
"unicode/utf8"
"google.golang.org/protobuf/internal/strs"
)
// parseStringValue parses string field token.
// This differs from parseString since the text format allows
// multiple back-to-back string literals where they are semantically treated
// as a single large string with all values concatenated.
//
// E.g., `"foo" "bar" "baz"` => "foobarbaz"
func (d *Decoder) parseStringValue() (Token, error) {
// Note that the ending quote is sufficient to unambiguously mark the end
// of a string. Thus, the text grammar does not require intervening
// whitespace or control characters in-between strings.
// Thus, the following is valid:
// `"foo"'bar'"baz"` => "foobarbaz"
in0 := d.in
var ss []string
for len(d.in) > 0 && (d.in[0] == '"' || d.in[0] == '\'') {
s, err := d.parseString()
if err != nil {
return Token{}, err
}
ss = append(ss, s)
}
// d.in already points to the end of the value at this point.
return Token{
kind: Scalar,
attrs: stringValue,
pos: len(d.orig) - len(in0),
raw: in0[:len(in0)-len(d.in)],
str: strings.Join(ss, ""),
}, nil
}
// parseString parses a string value enclosed in " or '.
func (d *Decoder) parseString() (string, error) {
in := d.in
if len(in) == 0 {
return "", ErrUnexpectedEOF
}
quote := in[0]
in = in[1:]
i := indexNeedEscapeInBytes(in)
in, out := in[i:], in[:i:i] // set cap to prevent mutations
for len(in) > 0 {
switch r, n := utf8.DecodeRune(in); {
case r == utf8.RuneError && n == 1:
return "", d.newSyntaxError("invalid UTF-8 detected")
case r == 0 || r == '\n':
return "", d.newSyntaxError("invalid character %q in string", r)
case r == rune(quote):
in = in[1:]
d.consume(len(d.in) - len(in))
return string(out), nil
case r == '\\':
if len(in) < 2 {
return "", ErrUnexpectedEOF
}
switch r := in[1]; r {
case '"', '\'', '\\', '?':
in, out = in[2:], append(out, r)
case 'a':
in, out = in[2:], append(out, '\a')
case 'b':
in, out = in[2:], append(out, '\b')
case 'n':
in, out = in[2:], append(out, '\n')
case 'r':
in, out = in[2:], append(out, '\r')
case 't':
in, out = in[2:], append(out, '\t')
case 'v':
in, out = in[2:], append(out, '\v')
case 'f':
in, out = in[2:], append(out, '\f')
case '0', '1', '2', '3', '4', '5', '6', '7':
// One, two, or three octal characters.
n := len(in[1:]) - len(bytes.TrimLeft(in[1:], "01234567"))
if n > 3 {
n = 3
}
v, err := strconv.ParseUint(string(in[1:1+n]), 8, 8)
if err != nil {
return "", d.newSyntaxError("invalid octal escape code %q in string", in[:1+n])
}
in, out = in[1+n:], append(out, byte(v))
case 'x':
// One or two hexadecimal characters.
n := len(in[2:]) - len(bytes.TrimLeft(in[2:], "0123456789abcdefABCDEF"))
if n > 2 {
n = 2
}
v, err := strconv.ParseUint(string(in[2:2+n]), 16, 8)
if err != nil {
return "", d.newSyntaxError("invalid hex escape code %q in string", in[:2+n])
}
in, out = in[2+n:], append(out, byte(v))
case 'u', 'U':
// Four or eight hexadecimal characters
n := 6
if r == 'U' {
n = 10
}
if len(in) < n {
return "", ErrUnexpectedEOF
}
v, err := strconv.ParseUint(string(in[2:n]), 16, 32)
if utf8.MaxRune < v || err != nil {
return "", d.newSyntaxError("invalid Unicode escape code %q in string", in[:n])
}
in = in[n:]
r := rune(v)
if utf16.IsSurrogate(r) {
if len(in) < 6 {
return "", ErrUnexpectedEOF
}
v, err := strconv.ParseUint(string(in[2:6]), 16, 16)
r = utf16.DecodeRune(r, rune(v))
if in[0] != '\\' || in[1] != 'u' || r == unicode.ReplacementChar || err != nil {
return "", d.newSyntaxError("invalid Unicode escape code %q in string", in[:6])
}
in = in[6:]
}
out = append(out, string(r)...)
default:
return "", d.newSyntaxError("invalid escape code %q in string", in[:2])
}
default:
i := indexNeedEscapeInBytes(in[n:])
in, out = in[n+i:], append(out, in[:n+i]...)
}
}
return "", ErrUnexpectedEOF
}
// indexNeedEscapeInString returns the index of the character that needs
// escaping. If no characters need escaping, this returns the input length.
func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) }
// UnmarshalString returns an unescaped string given a textproto string value.
// String value needs to contain single or double quotes. This is only used by
// internal/encoding/defval package for unmarshaling bytes.
func UnmarshalString(s string) (string, error) {
d := NewDecoder([]byte(s))
return d.parseString()
}

View file

@ -0,0 +1,373 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package text
import (
"bytes"
"fmt"
"math"
"strconv"
"strings"
"google.golang.org/protobuf/internal/flags"
)
// Kind represents a token kind expressible in the textproto format.
type Kind uint8
// Kind values.
const (
Invalid Kind = iota
EOF
Name // Name indicates the field name.
Scalar // Scalar are scalar values, e.g. "string", 47, ENUM_LITERAL, true.
MessageOpen
MessageClose
ListOpen
ListClose
// comma and semi-colon are only for parsing in between values and should not be exposed.
comma
semicolon
// bof indicates beginning of file, which is the default token
// kind at the beginning of parsing.
bof = Invalid
)
func (t Kind) String() string {
switch t {
case Invalid:
return "<invalid>"
case EOF:
return "eof"
case Scalar:
return "scalar"
case Name:
return "name"
case MessageOpen:
return "{"
case MessageClose:
return "}"
case ListOpen:
return "["
case ListClose:
return "]"
case comma:
return ","
case semicolon:
return ";"
default:
return fmt.Sprintf("<invalid:%v>", uint8(t))
}
}
// NameKind represents different types of field names.
type NameKind uint8
// NameKind values.
const (
IdentName NameKind = iota + 1
TypeName
FieldNumber
)
func (t NameKind) String() string {
switch t {
case IdentName:
return "IdentName"
case TypeName:
return "TypeName"
case FieldNumber:
return "FieldNumber"
default:
return fmt.Sprintf("<invalid:%v>", uint8(t))
}
}
// Bit mask in Token.attrs to indicate if a Name token is followed by the
// separator char ':'. The field name separator char is optional for message
// field or repeated message field, but required for all other types. Decoder
// simply indicates whether a Name token is followed by separator or not. It is
// up to the prototext package to validate.
const hasSeparator = 1 << 7
// Scalar value types.
const (
numberValue = iota + 1
stringValue
literalValue
)
// Bit mask in Token.numAttrs to indicate that the number is a negative.
const isNegative = 1 << 7
// Token provides a parsed token kind and value. Values are provided by the
// different accessor methods.
type Token struct {
// Kind of the Token object.
kind Kind
// attrs contains metadata for the following Kinds:
// Name: hasSeparator bit and one of NameKind.
// Scalar: one of numberValue, stringValue, literalValue.
attrs uint8
// numAttrs contains metadata for numberValue:
// - highest bit is whether negative or positive.
// - lower bits indicate one of numDec, numHex, numOct, numFloat.
numAttrs uint8
// pos provides the position of the token in the original input.
pos int
// raw bytes of the serialized token.
// This is a subslice into the original input.
raw []byte
// str contains parsed string for the following:
// - stringValue of Scalar kind
// - numberValue of Scalar kind
// - TypeName of Name kind
str string
}
// Kind returns the token kind.
func (t Token) Kind() Kind {
return t.kind
}
// RawString returns the read value in string.
func (t Token) RawString() string {
return string(t.raw)
}
// Pos returns the token position from the input.
func (t Token) Pos() int {
return t.pos
}
// NameKind returns IdentName, TypeName or FieldNumber.
// It panics if type is not Name.
func (t Token) NameKind() NameKind {
if t.kind == Name {
return NameKind(t.attrs &^ hasSeparator)
}
panic(fmt.Sprintf("Token is not a Name type: %s", t.kind))
}
// HasSeparator returns true if the field name is followed by the separator char
// ':', else false. It panics if type is not Name.
func (t Token) HasSeparator() bool {
if t.kind == Name {
return t.attrs&hasSeparator != 0
}
panic(fmt.Sprintf("Token is not a Name type: %s", t.kind))
}
// IdentName returns the value for IdentName type.
func (t Token) IdentName() string {
if t.kind == Name && t.attrs&uint8(IdentName) != 0 {
return string(t.raw)
}
panic(fmt.Sprintf("Token is not an IdentName: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator)))
}
// TypeName returns the value for TypeName type.
func (t Token) TypeName() string {
if t.kind == Name && t.attrs&uint8(TypeName) != 0 {
return t.str
}
panic(fmt.Sprintf("Token is not a TypeName: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator)))
}
// FieldNumber returns the value for FieldNumber type. It returns a
// non-negative int32 value. Caller will still need to validate for the correct
// field number range.
func (t Token) FieldNumber() int32 {
if t.kind != Name || t.attrs&uint8(FieldNumber) == 0 {
panic(fmt.Sprintf("Token is not a FieldNumber: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator)))
}
// Following should not return an error as it had already been called right
// before this Token was constructed.
num, _ := strconv.ParseInt(string(t.raw), 10, 32)
return int32(num)
}
// String returns the string value for a Scalar type.
func (t Token) String() (string, bool) {
if t.kind != Scalar || t.attrs != stringValue {
return "", false
}
return t.str, true
}
// Enum returns the literal value for a Scalar type for use as enum literals.
func (t Token) Enum() (string, bool) {
if t.kind != Scalar || t.attrs != literalValue || (len(t.raw) > 0 && t.raw[0] == '-') {
return "", false
}
return string(t.raw), true
}
// Bool returns the bool value for a Scalar type.
func (t Token) Bool() (bool, bool) {
if t.kind != Scalar {
return false, false
}
switch t.attrs {
case literalValue:
if b, ok := boolLits[string(t.raw)]; ok {
return b, true
}
case numberValue:
// Unsigned integer representation of 0 or 1 is permitted: 00, 0x0, 01,
// 0x1, etc.
n, err := strconv.ParseUint(t.str, 0, 64)
if err == nil {
switch n {
case 0:
return false, true
case 1:
return true, true
}
}
}
return false, false
}
// These exact boolean literals are the ones supported in C++.
var boolLits = map[string]bool{
"t": true,
"true": true,
"True": true,
"f": false,
"false": false,
"False": false,
}
// Uint64 returns the uint64 value for a Scalar type.
func (t Token) Uint64() (uint64, bool) {
if t.kind != Scalar || t.attrs != numberValue ||
t.numAttrs&isNegative > 0 || t.numAttrs&numFloat > 0 {
return 0, false
}
n, err := strconv.ParseUint(t.str, 0, 64)
if err != nil {
return 0, false
}
return n, true
}
// Uint32 returns the uint32 value for a Scalar type.
func (t Token) Uint32() (uint32, bool) {
if t.kind != Scalar || t.attrs != numberValue ||
t.numAttrs&isNegative > 0 || t.numAttrs&numFloat > 0 {
return 0, false
}
n, err := strconv.ParseUint(t.str, 0, 32)
if err != nil {
return 0, false
}
return uint32(n), true
}
// Int64 returns the int64 value for a Scalar type.
func (t Token) Int64() (int64, bool) {
if t.kind != Scalar || t.attrs != numberValue || t.numAttrs&numFloat > 0 {
return 0, false
}
if n, err := strconv.ParseInt(t.str, 0, 64); err == nil {
return n, true
}
// C++ accepts large positive hex numbers as negative values.
// This feature is here for proto1 backwards compatibility purposes.
if flags.ProtoLegacy && (t.numAttrs == numHex) {
if n, err := strconv.ParseUint(t.str, 0, 64); err == nil {
return int64(n), true
}
}
return 0, false
}
// Int32 returns the int32 value for a Scalar type.
func (t Token) Int32() (int32, bool) {
if t.kind != Scalar || t.attrs != numberValue || t.numAttrs&numFloat > 0 {
return 0, false
}
if n, err := strconv.ParseInt(t.str, 0, 32); err == nil {
return int32(n), true
}
// C++ accepts large positive hex numbers as negative values.
// This feature is here for proto1 backwards compatibility purposes.
if flags.ProtoLegacy && (t.numAttrs == numHex) {
if n, err := strconv.ParseUint(t.str, 0, 32); err == nil {
return int32(n), true
}
}
return 0, false
}
// Float64 returns the float64 value for a Scalar type.
func (t Token) Float64() (float64, bool) {
if t.kind != Scalar {
return 0, false
}
switch t.attrs {
case literalValue:
if f, ok := floatLits[strings.ToLower(string(t.raw))]; ok {
return f, true
}
case numberValue:
n, err := strconv.ParseFloat(t.str, 64)
if err == nil {
return n, true
}
nerr := err.(*strconv.NumError)
if nerr.Err == strconv.ErrRange {
return n, true
}
}
return 0, false
}
// Float32 returns the float32 value for a Scalar type.
func (t Token) Float32() (float32, bool) {
if t.kind != Scalar {
return 0, false
}
switch t.attrs {
case literalValue:
if f, ok := floatLits[strings.ToLower(string(t.raw))]; ok {
return float32(f), true
}
case numberValue:
n, err := strconv.ParseFloat(t.str, 64)
if err == nil {
// Overflows are treated as (-)infinity.
return float32(n), true
}
nerr := err.(*strconv.NumError)
if nerr.Err == strconv.ErrRange {
return float32(n), true
}
}
return 0, false
}
// These are the supported float literals which C++ permits case-insensitive
// variants of these.
var floatLits = map[string]float64{
"nan": math.NaN(),
"inf": math.Inf(1),
"infinity": math.Inf(1),
"-inf": math.Inf(-1),
"-infinity": math.Inf(-1),
}
// TokenEquals returns true if given Tokens are equal, else false.
func TokenEquals(x, y Token) bool {
return x.kind == y.kind &&
x.attrs == y.attrs &&
x.numAttrs == y.numAttrs &&
x.pos == y.pos &&
bytes.Equal(x.raw, y.raw) &&
x.str == y.str
}

View file

@ -0,0 +1,29 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package text implements the text format for protocol buffers.
// This package has no semantic understanding for protocol buffers and is only
// a parser and composer for the format.
//
// There is no formal specification for the protobuf text format, as such the
// C++ implementation (see google::protobuf::TextFormat) is the reference
// implementation of the text format.
//
// This package is neither a superset nor a subset of the C++ implementation.
// This implementation permits a more liberal grammar in some cases to be
// backwards compatible with the historical Go implementation.
// Future parsings unique to Go should not be added.
// Some grammars allowed by the C++ implementation are deliberately
// not implemented here because they are considered a bug by the protobuf team
// and should not be replicated.
//
// The Go implementation should implement a sufficient amount of the C++
// grammar such that the default text serialization by C++ can be parsed by Go.
// However, just because the C++ parser accepts some input does not mean that
// the Go implementation should as well.
//
// The text format is almost a superset of JSON except:
// * message keys are not quoted strings, but identifiers
// * the top-level value must be a message without the delimiters
package text

View file

@ -0,0 +1,267 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package text
import (
"math"
"math/bits"
"strconv"
"strings"
"unicode/utf8"
"google.golang.org/protobuf/internal/detrand"
"google.golang.org/protobuf/internal/errors"
)
// encType represents an encoding type.
type encType uint8
const (
_ encType = (1 << iota) / 2
name
scalar
messageOpen
messageClose
)
// Encoder provides methods to write out textproto constructs and values. The user is
// responsible for producing valid sequences of constructs and values.
type Encoder struct {
encoderState
indent string
newline string // set to "\n" if len(indent) > 0
delims [2]byte
outputASCII bool
}
type encoderState struct {
lastType encType
indents []byte
out []byte
}
// NewEncoder returns an Encoder.
//
// If indent is a non-empty string, it causes every entry in a List or Message
// to be preceded by the indent and trailed by a newline.
//
// If delims is not the zero value, it controls the delimiter characters used
// for messages (e.g., "{}" vs "<>").
//
// If outputASCII is true, strings will be serialized in such a way that
// multi-byte UTF-8 sequences are escaped. This property ensures that the
// overall output is ASCII (as opposed to UTF-8).
func NewEncoder(indent string, delims [2]byte, outputASCII bool) (*Encoder, error) {
e := &Encoder{}
if len(indent) > 0 {
if strings.Trim(indent, " \t") != "" {
return nil, errors.New("indent may only be composed of space and tab characters")
}
e.indent = indent
e.newline = "\n"
}
switch delims {
case [2]byte{0, 0}:
e.delims = [2]byte{'{', '}'}
case [2]byte{'{', '}'}, [2]byte{'<', '>'}:
e.delims = delims
default:
return nil, errors.New("delimiters may only be \"{}\" or \"<>\"")
}
e.outputASCII = outputASCII
return e, nil
}
// Bytes returns the content of the written bytes.
func (e *Encoder) Bytes() []byte {
return e.out
}
// StartMessage writes out the '{' or '<' symbol.
func (e *Encoder) StartMessage() {
e.prepareNext(messageOpen)
e.out = append(e.out, e.delims[0])
}
// EndMessage writes out the '}' or '>' symbol.
func (e *Encoder) EndMessage() {
e.prepareNext(messageClose)
e.out = append(e.out, e.delims[1])
}
// WriteName writes out the field name and the separator ':'.
func (e *Encoder) WriteName(s string) {
e.prepareNext(name)
e.out = append(e.out, s...)
e.out = append(e.out, ':')
}
// WriteBool writes out the given boolean value.
func (e *Encoder) WriteBool(b bool) {
if b {
e.WriteLiteral("true")
} else {
e.WriteLiteral("false")
}
}
// WriteString writes out the given string value.
func (e *Encoder) WriteString(s string) {
e.prepareNext(scalar)
e.out = appendString(e.out, s, e.outputASCII)
}
func appendString(out []byte, in string, outputASCII bool) []byte {
out = append(out, '"')
i := indexNeedEscapeInString(in)
in, out = in[i:], append(out, in[:i]...)
for len(in) > 0 {
switch r, n := utf8.DecodeRuneInString(in); {
case r == utf8.RuneError && n == 1:
// We do not report invalid UTF-8 because strings in the text format
// are used to represent both the proto string and bytes type.
r = rune(in[0])
fallthrough
case r < ' ' || r == '"' || r == '\\':
out = append(out, '\\')
switch r {
case '"', '\\':
out = append(out, byte(r))
case '\n':
out = append(out, 'n')
case '\r':
out = append(out, 'r')
case '\t':
out = append(out, 't')
default:
out = append(out, 'x')
out = append(out, "00"[1+(bits.Len32(uint32(r))-1)/4:]...)
out = strconv.AppendUint(out, uint64(r), 16)
}
in = in[n:]
case outputASCII && r >= utf8.RuneSelf:
out = append(out, '\\')
if r <= math.MaxUint16 {
out = append(out, 'u')
out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...)
out = strconv.AppendUint(out, uint64(r), 16)
} else {
out = append(out, 'U')
out = append(out, "00000000"[1+(bits.Len32(uint32(r))-1)/4:]...)
out = strconv.AppendUint(out, uint64(r), 16)
}
in = in[n:]
default:
i := indexNeedEscapeInString(in[n:])
in, out = in[n+i:], append(out, in[:n+i]...)
}
}
out = append(out, '"')
return out
}
// indexNeedEscapeInString returns the index of the character that needs
// escaping. If no characters need escaping, this returns the input length.
func indexNeedEscapeInString(s string) int {
for i := 0; i < len(s); i++ {
if c := s[i]; c < ' ' || c == '"' || c == '\'' || c == '\\' || c >= utf8.RuneSelf {
return i
}
}
return len(s)
}
// WriteFloat writes out the given float value for given bitSize.
func (e *Encoder) WriteFloat(n float64, bitSize int) {
e.prepareNext(scalar)
e.out = appendFloat(e.out, n, bitSize)
}
func appendFloat(out []byte, n float64, bitSize int) []byte {
switch {
case math.IsNaN(n):
return append(out, "nan"...)
case math.IsInf(n, +1):
return append(out, "inf"...)
case math.IsInf(n, -1):
return append(out, "-inf"...)
default:
return strconv.AppendFloat(out, n, 'g', -1, bitSize)
}
}
// WriteInt writes out the given signed integer value.
func (e *Encoder) WriteInt(n int64) {
e.prepareNext(scalar)
e.out = append(e.out, strconv.FormatInt(n, 10)...)
}
// WriteUint writes out the given unsigned integer value.
func (e *Encoder) WriteUint(n uint64) {
e.prepareNext(scalar)
e.out = append(e.out, strconv.FormatUint(n, 10)...)
}
// WriteLiteral writes out the given string as a literal value without quotes.
// This is used for writing enum literal strings.
func (e *Encoder) WriteLiteral(s string) {
e.prepareNext(scalar)
e.out = append(e.out, s...)
}
// prepareNext adds possible space and indentation for the next value based
// on last encType and indent option. It also updates e.lastType to next.
func (e *Encoder) prepareNext(next encType) {
defer func() {
e.lastType = next
}()
// Single line.
if len(e.indent) == 0 {
// Add space after each field before the next one.
if e.lastType&(scalar|messageClose) != 0 && next == name {
e.out = append(e.out, ' ')
// Add a random extra space to make output unstable.
if detrand.Bool() {
e.out = append(e.out, ' ')
}
}
return
}
// Multi-line.
switch {
case e.lastType == name:
e.out = append(e.out, ' ')
// Add a random extra space after name: to make output unstable.
if detrand.Bool() {
e.out = append(e.out, ' ')
}
case e.lastType == messageOpen && next != messageClose:
e.indents = append(e.indents, e.indent...)
e.out = append(e.out, '\n')
e.out = append(e.out, e.indents...)
case e.lastType&(scalar|messageClose) != 0:
if next == messageClose {
e.indents = e.indents[:len(e.indents)-len(e.indent)]
}
e.out = append(e.out, '\n')
e.out = append(e.out, e.indents...)
}
}
// Snapshot returns the current snapshot for use in Reset.
func (e *Encoder) Snapshot() encoderState {
return e.encoderState
}
// Reset resets the Encoder to the given encoderState from a Snapshot.
func (e *Encoder) Reset(es encoderState) {
e.encoderState = es
}

View file

@ -0,0 +1,89 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package errors implements functions to manipulate errors.
package errors
import (
"errors"
"fmt"
"google.golang.org/protobuf/internal/detrand"
)
// Error is a sentinel matching all errors produced by this package.
var Error = errors.New("protobuf error")
// New formats a string according to the format specifier and arguments and
// returns an error that has a "proto" prefix.
func New(f string, x ...interface{}) error {
return &prefixError{s: format(f, x...)}
}
type prefixError struct{ s string }
var prefix = func() string {
// Deliberately introduce instability into the error message string to
// discourage users from performing error string comparisons.
if detrand.Bool() {
return "proto: " // use non-breaking spaces (U+00a0)
} else {
return "proto: " // use regular spaces (U+0020)
}
}()
func (e *prefixError) Error() string {
return prefix + e.s
}
func (e *prefixError) Unwrap() error {
return Error
}
// Wrap returns an error that has a "proto" prefix, the formatted string described
// by the format specifier and arguments, and a suffix of err. The error wraps err.
func Wrap(err error, f string, x ...interface{}) error {
return &wrapError{
s: format(f, x...),
err: err,
}
}
type wrapError struct {
s string
err error
}
func (e *wrapError) Error() string {
return format("%v%v: %v", prefix, e.s, e.err)
}
func (e *wrapError) Unwrap() error {
return e.err
}
func (e *wrapError) Is(target error) bool {
return target == Error
}
func format(f string, x ...interface{}) string {
// avoid "proto: " prefix when chaining
for i := 0; i < len(x); i++ {
switch e := x[i].(type) {
case *prefixError:
x[i] = e.s
case *wrapError:
x[i] = format("%v: %v", e.s, e.err)
}
}
return fmt.Sprintf(f, x...)
}
func InvalidUTF8(name string) error {
return New("field %v contains invalid UTF-8", name)
}
func RequiredNotSet(name string) error {
return New("required field %v not set", name)
}

View file

@ -0,0 +1,39 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.13
package errors
import "reflect"
// Is is a copy of Go 1.13's errors.Is for use with older Go versions.
func Is(err, target error) bool {
if target == nil {
return err == target
}
isComparable := reflect.TypeOf(target).Comparable()
for {
if isComparable && err == target {
return true
}
if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) {
return true
}
if err = unwrap(err); err == nil {
return false
}
}
}
func unwrap(err error) error {
u, ok := err.(interface {
Unwrap() error
})
if !ok {
return nil
}
return u.Unwrap()
}

View file

@ -0,0 +1,12 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.13
package errors
import "errors"
// Is is errors.Is.
func Is(err, target error) bool { return errors.Is(err, target) }

View file

@ -0,0 +1,13 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by generate-protos. DO NOT EDIT.
package fieldnum
// Field numbers for google.protobuf.Any.
const (
Any_TypeUrl = 1 // optional string
Any_Value = 2 // optional bytes
)

View file

@ -0,0 +1,35 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by generate-protos. DO NOT EDIT.
package fieldnum
// Field numbers for google.protobuf.Api.
const (
Api_Name = 1 // optional string
Api_Methods = 2 // repeated google.protobuf.Method
Api_Options = 3 // repeated google.protobuf.Option
Api_Version = 4 // optional string
Api_SourceContext = 5 // optional google.protobuf.SourceContext
Api_Mixins = 6 // repeated google.protobuf.Mixin
Api_Syntax = 7 // optional google.protobuf.Syntax
)
// Field numbers for google.protobuf.Method.
const (
Method_Name = 1 // optional string
Method_RequestTypeUrl = 2 // optional string
Method_RequestStreaming = 3 // optional bool
Method_ResponseTypeUrl = 4 // optional string
Method_ResponseStreaming = 5 // optional bool
Method_Options = 6 // repeated google.protobuf.Option
Method_Syntax = 7 // optional google.protobuf.Syntax
)
// Field numbers for google.protobuf.Mixin.
const (
Mixin_Name = 1 // optional string
Mixin_Root = 2 // optional string
)

View file

@ -0,0 +1,239 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by generate-protos. DO NOT EDIT.
package fieldnum
// Field numbers for google.protobuf.FileDescriptorSet.
const (
FileDescriptorSet_File = 1 // repeated google.protobuf.FileDescriptorProto
)
// Field numbers for google.protobuf.FileDescriptorProto.
const (
FileDescriptorProto_Name = 1 // optional string
FileDescriptorProto_Package = 2 // optional string
FileDescriptorProto_Dependency = 3 // repeated string
FileDescriptorProto_PublicDependency = 10 // repeated int32
FileDescriptorProto_WeakDependency = 11 // repeated int32
FileDescriptorProto_MessageType = 4 // repeated google.protobuf.DescriptorProto
FileDescriptorProto_EnumType = 5 // repeated google.protobuf.EnumDescriptorProto
FileDescriptorProto_Service = 6 // repeated google.protobuf.ServiceDescriptorProto
FileDescriptorProto_Extension = 7 // repeated google.protobuf.FieldDescriptorProto
FileDescriptorProto_Options = 8 // optional google.protobuf.FileOptions
FileDescriptorProto_SourceCodeInfo = 9 // optional google.protobuf.SourceCodeInfo
FileDescriptorProto_Syntax = 12 // optional string
)
// Field numbers for google.protobuf.DescriptorProto.
const (
DescriptorProto_Name = 1 // optional string
DescriptorProto_Field = 2 // repeated google.protobuf.FieldDescriptorProto
DescriptorProto_Extension = 6 // repeated google.protobuf.FieldDescriptorProto
DescriptorProto_NestedType = 3 // repeated google.protobuf.DescriptorProto
DescriptorProto_EnumType = 4 // repeated google.protobuf.EnumDescriptorProto
DescriptorProto_ExtensionRange = 5 // repeated google.protobuf.DescriptorProto.ExtensionRange
DescriptorProto_OneofDecl = 8 // repeated google.protobuf.OneofDescriptorProto
DescriptorProto_Options = 7 // optional google.protobuf.MessageOptions
DescriptorProto_ReservedRange = 9 // repeated google.protobuf.DescriptorProto.ReservedRange
DescriptorProto_ReservedName = 10 // repeated string
)
// Field numbers for google.protobuf.DescriptorProto.ExtensionRange.
const (
DescriptorProto_ExtensionRange_Start = 1 // optional int32
DescriptorProto_ExtensionRange_End = 2 // optional int32
DescriptorProto_ExtensionRange_Options = 3 // optional google.protobuf.ExtensionRangeOptions
)
// Field numbers for google.protobuf.DescriptorProto.ReservedRange.
const (
DescriptorProto_ReservedRange_Start = 1 // optional int32
DescriptorProto_ReservedRange_End = 2 // optional int32
)
// Field numbers for google.protobuf.ExtensionRangeOptions.
const (
ExtensionRangeOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
)
// Field numbers for google.protobuf.FieldDescriptorProto.
const (
FieldDescriptorProto_Name = 1 // optional string
FieldDescriptorProto_Number = 3 // optional int32
FieldDescriptorProto_Label = 4 // optional google.protobuf.FieldDescriptorProto.Label
FieldDescriptorProto_Type = 5 // optional google.protobuf.FieldDescriptorProto.Type
FieldDescriptorProto_TypeName = 6 // optional string
FieldDescriptorProto_Extendee = 2 // optional string
FieldDescriptorProto_DefaultValue = 7 // optional string
FieldDescriptorProto_OneofIndex = 9 // optional int32
FieldDescriptorProto_JsonName = 10 // optional string
FieldDescriptorProto_Options = 8 // optional google.protobuf.FieldOptions
)
// Field numbers for google.protobuf.OneofDescriptorProto.
const (
OneofDescriptorProto_Name = 1 // optional string
OneofDescriptorProto_Options = 2 // optional google.protobuf.OneofOptions
)
// Field numbers for google.protobuf.EnumDescriptorProto.
const (
EnumDescriptorProto_Name = 1 // optional string
EnumDescriptorProto_Value = 2 // repeated google.protobuf.EnumValueDescriptorProto
EnumDescriptorProto_Options = 3 // optional google.protobuf.EnumOptions
EnumDescriptorProto_ReservedRange = 4 // repeated google.protobuf.EnumDescriptorProto.EnumReservedRange
EnumDescriptorProto_ReservedName = 5 // repeated string
)
// Field numbers for google.protobuf.EnumDescriptorProto.EnumReservedRange.
const (
EnumDescriptorProto_EnumReservedRange_Start = 1 // optional int32
EnumDescriptorProto_EnumReservedRange_End = 2 // optional int32
)
// Field numbers for google.protobuf.EnumValueDescriptorProto.
const (
EnumValueDescriptorProto_Name = 1 // optional string
EnumValueDescriptorProto_Number = 2 // optional int32
EnumValueDescriptorProto_Options = 3 // optional google.protobuf.EnumValueOptions
)
// Field numbers for google.protobuf.ServiceDescriptorProto.
const (
ServiceDescriptorProto_Name = 1 // optional string
ServiceDescriptorProto_Method = 2 // repeated google.protobuf.MethodDescriptorProto
ServiceDescriptorProto_Options = 3 // optional google.protobuf.ServiceOptions
)
// Field numbers for google.protobuf.MethodDescriptorProto.
const (
MethodDescriptorProto_Name = 1 // optional string
MethodDescriptorProto_InputType = 2 // optional string
MethodDescriptorProto_OutputType = 3 // optional string
MethodDescriptorProto_Options = 4 // optional google.protobuf.MethodOptions
MethodDescriptorProto_ClientStreaming = 5 // optional bool
MethodDescriptorProto_ServerStreaming = 6 // optional bool
)
// Field numbers for google.protobuf.FileOptions.
const (
FileOptions_JavaPackage = 1 // optional string
FileOptions_JavaOuterClassname = 8 // optional string
FileOptions_JavaMultipleFiles = 10 // optional bool
FileOptions_JavaGenerateEqualsAndHash = 20 // optional bool
FileOptions_JavaStringCheckUtf8 = 27 // optional bool
FileOptions_OptimizeFor = 9 // optional google.protobuf.FileOptions.OptimizeMode
FileOptions_GoPackage = 11 // optional string
FileOptions_CcGenericServices = 16 // optional bool
FileOptions_JavaGenericServices = 17 // optional bool
FileOptions_PyGenericServices = 18 // optional bool
FileOptions_PhpGenericServices = 42 // optional bool
FileOptions_Deprecated = 23 // optional bool
FileOptions_CcEnableArenas = 31 // optional bool
FileOptions_ObjcClassPrefix = 36 // optional string
FileOptions_CsharpNamespace = 37 // optional string
FileOptions_SwiftPrefix = 39 // optional string
FileOptions_PhpClassPrefix = 40 // optional string
FileOptions_PhpNamespace = 41 // optional string
FileOptions_PhpMetadataNamespace = 44 // optional string
FileOptions_RubyPackage = 45 // optional string
FileOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
)
// Field numbers for google.protobuf.MessageOptions.
const (
MessageOptions_MessageSetWireFormat = 1 // optional bool
MessageOptions_NoStandardDescriptorAccessor = 2 // optional bool
MessageOptions_Deprecated = 3 // optional bool
MessageOptions_MapEntry = 7 // optional bool
MessageOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
)
// Field numbers for google.protobuf.FieldOptions.
const (
FieldOptions_Ctype = 1 // optional google.protobuf.FieldOptions.CType
FieldOptions_Packed = 2 // optional bool
FieldOptions_Jstype = 6 // optional google.protobuf.FieldOptions.JSType
FieldOptions_Lazy = 5 // optional bool
FieldOptions_Deprecated = 3 // optional bool
FieldOptions_Weak = 10 // optional bool
FieldOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
)
// Field numbers for google.protobuf.OneofOptions.
const (
OneofOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
)
// Field numbers for google.protobuf.EnumOptions.
const (
EnumOptions_AllowAlias = 2 // optional bool
EnumOptions_Deprecated = 3 // optional bool
EnumOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
)
// Field numbers for google.protobuf.EnumValueOptions.
const (
EnumValueOptions_Deprecated = 1 // optional bool
EnumValueOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
)
// Field numbers for google.protobuf.ServiceOptions.
const (
ServiceOptions_Deprecated = 33 // optional bool
ServiceOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
)
// Field numbers for google.protobuf.MethodOptions.
const (
MethodOptions_Deprecated = 33 // optional bool
MethodOptions_IdempotencyLevel = 34 // optional google.protobuf.MethodOptions.IdempotencyLevel
MethodOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
)
// Field numbers for google.protobuf.UninterpretedOption.
const (
UninterpretedOption_Name = 2 // repeated google.protobuf.UninterpretedOption.NamePart
UninterpretedOption_IdentifierValue = 3 // optional string
UninterpretedOption_PositiveIntValue = 4 // optional uint64
UninterpretedOption_NegativeIntValue = 5 // optional int64
UninterpretedOption_DoubleValue = 6 // optional double
UninterpretedOption_StringValue = 7 // optional bytes
UninterpretedOption_AggregateValue = 8 // optional string
)
// Field numbers for google.protobuf.UninterpretedOption.NamePart.
const (
UninterpretedOption_NamePart_NamePart = 1 // required string
UninterpretedOption_NamePart_IsExtension = 2 // required bool
)
// Field numbers for google.protobuf.SourceCodeInfo.
const (
SourceCodeInfo_Location = 1 // repeated google.protobuf.SourceCodeInfo.Location
)
// Field numbers for google.protobuf.SourceCodeInfo.Location.
const (
SourceCodeInfo_Location_Path = 1 // repeated int32
SourceCodeInfo_Location_Span = 2 // repeated int32
SourceCodeInfo_Location_LeadingComments = 3 // optional string
SourceCodeInfo_Location_TrailingComments = 4 // optional string
SourceCodeInfo_Location_LeadingDetachedComments = 6 // repeated string
)
// Field numbers for google.protobuf.GeneratedCodeInfo.
const (
GeneratedCodeInfo_Annotation = 1 // repeated google.protobuf.GeneratedCodeInfo.Annotation
)
// Field numbers for google.protobuf.GeneratedCodeInfo.Annotation.
const (
GeneratedCodeInfo_Annotation_Path = 1 // repeated int32
GeneratedCodeInfo_Annotation_SourceFile = 2 // optional string
GeneratedCodeInfo_Annotation_Begin = 3 // optional int32
GeneratedCodeInfo_Annotation_End = 4 // optional int32
)

View file

@ -0,0 +1,7 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package fieldnum contains constants for field numbers of fields in messages
// declared in descriptor.proto and any of the well-known types.
package fieldnum

View file

@ -0,0 +1,13 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by generate-protos. DO NOT EDIT.
package fieldnum
// Field numbers for google.protobuf.Duration.
const (
Duration_Seconds = 1 // optional int64
Duration_Nanos = 2 // optional int32
)

View file

@ -0,0 +1,10 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by generate-protos. DO NOT EDIT.
package fieldnum
// Field numbers for google.protobuf.Empty.
const ()

View file

@ -0,0 +1,12 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by generate-protos. DO NOT EDIT.
package fieldnum
// Field numbers for google.protobuf.FieldMask.
const (
FieldMask_Paths = 1 // repeated string
)

View file

@ -0,0 +1,12 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by generate-protos. DO NOT EDIT.
package fieldnum
// Field numbers for google.protobuf.SourceContext.
const (
SourceContext_FileName = 1 // optional string
)

View file

@ -0,0 +1,33 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by generate-protos. DO NOT EDIT.
package fieldnum
// Field numbers for google.protobuf.Struct.
const (
Struct_Fields = 1 // repeated google.protobuf.Struct.FieldsEntry
)
// Field numbers for google.protobuf.Struct.FieldsEntry.
const (
Struct_FieldsEntry_Key = 1 // optional string
Struct_FieldsEntry_Value = 2 // optional google.protobuf.Value
)
// Field numbers for google.protobuf.Value.
const (
Value_NullValue = 1 // optional google.protobuf.NullValue
Value_NumberValue = 2 // optional double
Value_StringValue = 3 // optional string
Value_BoolValue = 4 // optional bool
Value_StructValue = 5 // optional google.protobuf.Struct
Value_ListValue = 6 // optional google.protobuf.ListValue
)
// Field numbers for google.protobuf.ListValue.
const (
ListValue_Values = 1 // repeated google.protobuf.Value
)

View file

@ -0,0 +1,13 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by generate-protos. DO NOT EDIT.
package fieldnum
// Field numbers for google.protobuf.Timestamp.
const (
Timestamp_Seconds = 1 // optional int64
Timestamp_Nanos = 2 // optional int32
)

View file

@ -0,0 +1,53 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by generate-protos. DO NOT EDIT.
package fieldnum
// Field numbers for google.protobuf.Type.
const (
Type_Name = 1 // optional string
Type_Fields = 2 // repeated google.protobuf.Field
Type_Oneofs = 3 // repeated string
Type_Options = 4 // repeated google.protobuf.Option
Type_SourceContext = 5 // optional google.protobuf.SourceContext
Type_Syntax = 6 // optional google.protobuf.Syntax
)
// Field numbers for google.protobuf.Field.
const (
Field_Kind = 1 // optional google.protobuf.Field.Kind
Field_Cardinality = 2 // optional google.protobuf.Field.Cardinality
Field_Number = 3 // optional int32
Field_Name = 4 // optional string
Field_TypeUrl = 6 // optional string
Field_OneofIndex = 7 // optional int32
Field_Packed = 8 // optional bool
Field_Options = 9 // repeated google.protobuf.Option
Field_JsonName = 10 // optional string
Field_DefaultValue = 11 // optional string
)
// Field numbers for google.protobuf.Enum.
const (
Enum_Name = 1 // optional string
Enum_Enumvalue = 2 // repeated google.protobuf.EnumValue
Enum_Options = 3 // repeated google.protobuf.Option
Enum_SourceContext = 4 // optional google.protobuf.SourceContext
Enum_Syntax = 5 // optional google.protobuf.Syntax
)
// Field numbers for google.protobuf.EnumValue.
const (
EnumValue_Name = 1 // optional string
EnumValue_Number = 2 // optional int32
EnumValue_Options = 3 // repeated google.protobuf.Option
)
// Field numbers for google.protobuf.Option.
const (
Option_Name = 1 // optional string
Option_Value = 2 // optional google.protobuf.Any
)

View file

@ -0,0 +1,52 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by generate-protos. DO NOT EDIT.
package fieldnum
// Field numbers for google.protobuf.DoubleValue.
const (
DoubleValue_Value = 1 // optional double
)
// Field numbers for google.protobuf.FloatValue.
const (
FloatValue_Value = 1 // optional float
)
// Field numbers for google.protobuf.Int64Value.
const (
Int64Value_Value = 1 // optional int64
)
// Field numbers for google.protobuf.UInt64Value.
const (
UInt64Value_Value = 1 // optional uint64
)
// Field numbers for google.protobuf.Int32Value.
const (
Int32Value_Value = 1 // optional int32
)
// Field numbers for google.protobuf.UInt32Value.
const (
UInt32Value_Value = 1 // optional uint32
)
// Field numbers for google.protobuf.BoolValue.
const (
BoolValue_Value = 1 // optional bool
)
// Field numbers for google.protobuf.StringValue.
const (
StringValue_Value = 1 // optional string
)
// Field numbers for google.protobuf.BytesValue.
const (
BytesValue_Value = 1 // optional bytes
)

View file

@ -0,0 +1,40 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package fieldsort defines an ordering of fields.
//
// The ordering defined by this package matches the historic behavior of the proto
// package, placing extensions first and oneofs last.
//
// There is no guarantee about stability of the wire encoding, and users should not
// depend on the order defined in this package as it is subject to change without
// notice.
package fieldsort
import (
"google.golang.org/protobuf/reflect/protoreflect"
)
// Less returns true if field a comes before field j in ordered wire marshal output.
func Less(a, b protoreflect.FieldDescriptor) bool {
ea := a.IsExtension()
eb := b.IsExtension()
oa := a.ContainingOneof()
ob := b.ContainingOneof()
switch {
case ea != eb:
return ea
case oa != nil && ob != nil:
if oa == ob {
return a.Number() < b.Number()
}
return oa.Index() < ob.Index()
case oa != nil:
return false
case ob != nil:
return true
default:
return a.Number() < b.Number()
}
}

View file

@ -0,0 +1,155 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package filedesc provides functionality for constructing descriptors.
package filedesc
import (
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/fieldnum"
"google.golang.org/protobuf/reflect/protoreflect"
pref "google.golang.org/protobuf/reflect/protoreflect"
preg "google.golang.org/protobuf/reflect/protoregistry"
)
// Builder construct a protoreflect.FileDescriptor from the raw descriptor.
type Builder struct {
// GoPackagePath is the Go package path that is invoking this builder.
GoPackagePath string
// RawDescriptor is the wire-encoded bytes of FileDescriptorProto
// and must be populated.
RawDescriptor []byte
// NumEnums is the total number of enums declared in the file.
NumEnums int32
// NumMessages is the total number of messages declared in the file.
// It includes the implicit message declarations for map entries.
NumMessages int32
// NumExtensions is the total number of extensions declared in the file.
NumExtensions int32
// NumServices is the total number of services declared in the file.
NumServices int32
// TypeResolver resolves extension field types for descriptor options.
// If nil, it uses protoregistry.GlobalTypes.
TypeResolver interface {
preg.ExtensionTypeResolver
}
// FileRegistry is use to lookup file, enum, and message dependencies.
// Once constructed, the file descriptor is registered here.
// If nil, it uses protoregistry.GlobalFiles.
FileRegistry interface {
FindFileByPath(string) (protoreflect.FileDescriptor, error)
FindDescriptorByName(pref.FullName) (pref.Descriptor, error)
RegisterFile(pref.FileDescriptor) error
}
}
// resolverByIndex is an interface Builder.FileRegistry may implement.
// If so, it permits looking up an enum or message dependency based on the
// sub-list and element index into filetype.Builder.DependencyIndexes.
type resolverByIndex interface {
FindEnumByIndex(int32, int32, []Enum, []Message) pref.EnumDescriptor
FindMessageByIndex(int32, int32, []Enum, []Message) pref.MessageDescriptor
}
// Indexes of each sub-list in filetype.Builder.DependencyIndexes.
const (
listFieldDeps int32 = iota
listExtTargets
listExtDeps
listMethInDeps
listMethOutDeps
)
// Out is the output of the Builder.
type Out struct {
File pref.FileDescriptor
// Enums is all enum descriptors in "flattened ordering".
Enums []Enum
// Messages is all message descriptors in "flattened ordering".
// It includes the implicit message declarations for map entries.
Messages []Message
// Extensions is all extension descriptors in "flattened ordering".
Extensions []Extension
// Service is all service descriptors in "flattened ordering".
Services []Service
}
// Build constructs a FileDescriptor given the parameters set in Builder.
// It assumes that the inputs are well-formed and panics if any inconsistencies
// are encountered.
//
// If NumEnums+NumMessages+NumExtensions+NumServices is zero,
// then Build automatically derives them from the raw descriptor.
func (db Builder) Build() (out Out) {
// Populate the counts if uninitialized.
if db.NumEnums+db.NumMessages+db.NumExtensions+db.NumServices == 0 {
db.unmarshalCounts(db.RawDescriptor, true)
}
// Initialize resolvers and registries if unpopulated.
if db.TypeResolver == nil {
db.TypeResolver = preg.GlobalTypes
}
if db.FileRegistry == nil {
db.FileRegistry = preg.GlobalFiles
}
fd := newRawFile(db)
out.File = fd
out.Enums = fd.allEnums
out.Messages = fd.allMessages
out.Extensions = fd.allExtensions
out.Services = fd.allServices
if err := db.FileRegistry.RegisterFile(fd); err != nil {
panic(err)
}
return out
}
// unmarshalCounts counts the number of enum, message, extension, and service
// declarations in the raw message, which is either a FileDescriptorProto
// or a MessageDescriptorProto depending on whether isFile is set.
func (db *Builder) unmarshalCounts(b []byte, isFile bool) {
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
if isFile {
switch num {
case fieldnum.FileDescriptorProto_EnumType:
db.NumEnums++
case fieldnum.FileDescriptorProto_MessageType:
db.unmarshalCounts(v, false)
db.NumMessages++
case fieldnum.FileDescriptorProto_Extension:
db.NumExtensions++
case fieldnum.FileDescriptorProto_Service:
db.NumServices++
}
} else {
switch num {
case fieldnum.DescriptorProto_EnumType:
db.NumEnums++
case fieldnum.DescriptorProto_NestedType:
db.unmarshalCounts(v, false)
db.NumMessages++
case fieldnum.DescriptorProto_Extension:
db.NumExtensions++
}
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
}

View file

@ -0,0 +1,598 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package filedesc
import (
"bytes"
"fmt"
"sync"
"sync/atomic"
"google.golang.org/protobuf/internal/descfmt"
"google.golang.org/protobuf/internal/descopts"
"google.golang.org/protobuf/internal/encoding/defval"
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/internal/strs"
pref "google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
// The types in this file may have a suffix:
// • L0: Contains fields common to all descriptors (except File) and
// must be initialized up front.
// • L1: Contains fields specific to a descriptor and
// must be initialized up front.
// • L2: Contains fields that are lazily initialized when constructing
// from the raw file descriptor. When constructing as a literal, the L2
// fields must be initialized up front.
//
// The types are exported so that packages like reflect/protodesc can
// directly construct descriptors.
type (
File struct {
fileRaw
L1 FileL1
once uint32 // atomically set if L2 is valid
mu sync.Mutex // protects L2
L2 *FileL2
}
FileL1 struct {
Syntax pref.Syntax
Path string
Package pref.FullName
Enums Enums
Messages Messages
Extensions Extensions
Services Services
}
FileL2 struct {
Options func() pref.ProtoMessage
Imports FileImports
Locations SourceLocations
}
)
func (fd *File) ParentFile() pref.FileDescriptor { return fd }
func (fd *File) Parent() pref.Descriptor { return nil }
func (fd *File) Index() int { return 0 }
func (fd *File) Syntax() pref.Syntax { return fd.L1.Syntax }
func (fd *File) Name() pref.Name { return fd.L1.Package.Name() }
func (fd *File) FullName() pref.FullName { return fd.L1.Package }
func (fd *File) IsPlaceholder() bool { return false }
func (fd *File) Options() pref.ProtoMessage {
if f := fd.lazyInit().Options; f != nil {
return f()
}
return descopts.File
}
func (fd *File) Path() string { return fd.L1.Path }
func (fd *File) Package() pref.FullName { return fd.L1.Package }
func (fd *File) Imports() pref.FileImports { return &fd.lazyInit().Imports }
func (fd *File) Enums() pref.EnumDescriptors { return &fd.L1.Enums }
func (fd *File) Messages() pref.MessageDescriptors { return &fd.L1.Messages }
func (fd *File) Extensions() pref.ExtensionDescriptors { return &fd.L1.Extensions }
func (fd *File) Services() pref.ServiceDescriptors { return &fd.L1.Services }
func (fd *File) SourceLocations() pref.SourceLocations { return &fd.L2.Locations }
func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) }
func (fd *File) ProtoType(pref.FileDescriptor) {}
func (fd *File) ProtoInternal(pragma.DoNotImplement) {}
func (fd *File) lazyInit() *FileL2 {
if atomic.LoadUint32(&fd.once) == 0 {
fd.lazyInitOnce()
}
return fd.L2
}
func (fd *File) lazyInitOnce() {
fd.mu.Lock()
if fd.L2 == nil {
fd.lazyRawInit() // recursively initializes all L2 structures
}
atomic.StoreUint32(&fd.once, 1)
fd.mu.Unlock()
}
// ProtoLegacyRawDesc is a pseudo-internal API for allowing the v1 code
// to be able to retrieve the raw descriptor.
//
// WARNING: This method is exempt from the compatibility promise and may be
// removed in the future without warning.
func (fd *File) ProtoLegacyRawDesc() []byte {
return fd.builder.RawDescriptor
}
// GoPackagePath is a pseudo-internal API for determining the Go package path
// that this file descriptor is declared in.
//
// WARNING: This method is exempt from the compatibility promise and may be
// removed in the future without warning.
func (fd *File) GoPackagePath() string {
return fd.builder.GoPackagePath
}
type (
Enum struct {
Base
L1 EnumL1
L2 *EnumL2 // protected by fileDesc.once
}
EnumL1 struct {
eagerValues bool // controls whether EnumL2.Values is already populated
}
EnumL2 struct {
Options func() pref.ProtoMessage
Values EnumValues
ReservedNames Names
ReservedRanges EnumRanges
}
EnumValue struct {
Base
L1 EnumValueL1
}
EnumValueL1 struct {
Options func() pref.ProtoMessage
Number pref.EnumNumber
}
)
func (ed *Enum) Options() pref.ProtoMessage {
if f := ed.lazyInit().Options; f != nil {
return f()
}
return descopts.Enum
}
func (ed *Enum) Values() pref.EnumValueDescriptors {
if ed.L1.eagerValues {
return &ed.L2.Values
}
return &ed.lazyInit().Values
}
func (ed *Enum) ReservedNames() pref.Names { return &ed.lazyInit().ReservedNames }
func (ed *Enum) ReservedRanges() pref.EnumRanges { return &ed.lazyInit().ReservedRanges }
func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) }
func (ed *Enum) ProtoType(pref.EnumDescriptor) {}
func (ed *Enum) lazyInit() *EnumL2 {
ed.L0.ParentFile.lazyInit() // implicitly initializes L2
return ed.L2
}
func (ed *EnumValue) Options() pref.ProtoMessage {
if f := ed.L1.Options; f != nil {
return f()
}
return descopts.EnumValue
}
func (ed *EnumValue) Number() pref.EnumNumber { return ed.L1.Number }
func (ed *EnumValue) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) }
func (ed *EnumValue) ProtoType(pref.EnumValueDescriptor) {}
type (
Message struct {
Base
L1 MessageL1
L2 *MessageL2 // protected by fileDesc.once
}
MessageL1 struct {
Enums Enums
Messages Messages
Extensions Extensions
IsMapEntry bool // promoted from google.protobuf.MessageOptions
IsMessageSet bool // promoted from google.protobuf.MessageOptions
}
MessageL2 struct {
Options func() pref.ProtoMessage
Fields Fields
Oneofs Oneofs
ReservedNames Names
ReservedRanges FieldRanges
RequiredNumbers FieldNumbers // must be consistent with Fields.Cardinality
ExtensionRanges FieldRanges
ExtensionRangeOptions []func() pref.ProtoMessage // must be same length as ExtensionRanges
}
Field struct {
Base
L1 FieldL1
}
FieldL1 struct {
Options func() pref.ProtoMessage
Number pref.FieldNumber
Cardinality pref.Cardinality // must be consistent with Message.RequiredNumbers
Kind pref.Kind
JSONName jsonName
IsWeak bool // promoted from google.protobuf.FieldOptions
HasPacked bool // promoted from google.protobuf.FieldOptions
IsPacked bool // promoted from google.protobuf.FieldOptions
HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions
EnforceUTF8 bool // promoted from google.protobuf.FieldOptions
Default defaultValue
ContainingOneof pref.OneofDescriptor // must be consistent with Message.Oneofs.Fields
Enum pref.EnumDescriptor
Message pref.MessageDescriptor
}
Oneof struct {
Base
L1 OneofL1
}
OneofL1 struct {
Options func() pref.ProtoMessage
Fields OneofFields // must be consistent with Message.Fields.ContainingOneof
}
)
func (md *Message) Options() pref.ProtoMessage {
if f := md.lazyInit().Options; f != nil {
return f()
}
return descopts.Message
}
func (md *Message) IsMapEntry() bool { return md.L1.IsMapEntry }
func (md *Message) Fields() pref.FieldDescriptors { return &md.lazyInit().Fields }
func (md *Message) Oneofs() pref.OneofDescriptors { return &md.lazyInit().Oneofs }
func (md *Message) ReservedNames() pref.Names { return &md.lazyInit().ReservedNames }
func (md *Message) ReservedRanges() pref.FieldRanges { return &md.lazyInit().ReservedRanges }
func (md *Message) RequiredNumbers() pref.FieldNumbers { return &md.lazyInit().RequiredNumbers }
func (md *Message) ExtensionRanges() pref.FieldRanges { return &md.lazyInit().ExtensionRanges }
func (md *Message) ExtensionRangeOptions(i int) pref.ProtoMessage {
if f := md.lazyInit().ExtensionRangeOptions[i]; f != nil {
return f()
}
return descopts.ExtensionRange
}
func (md *Message) Enums() pref.EnumDescriptors { return &md.L1.Enums }
func (md *Message) Messages() pref.MessageDescriptors { return &md.L1.Messages }
func (md *Message) Extensions() pref.ExtensionDescriptors { return &md.L1.Extensions }
func (md *Message) ProtoType(pref.MessageDescriptor) {}
func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) }
func (md *Message) lazyInit() *MessageL2 {
md.L0.ParentFile.lazyInit() // implicitly initializes L2
return md.L2
}
// IsMessageSet is a pseudo-internal API for checking whether a message
// should serialize in the proto1 message format.
//
// WARNING: This method is exempt from the compatibility promise and may be
// removed in the future without warning.
func (md *Message) IsMessageSet() bool {
return md.L1.IsMessageSet
}
func (fd *Field) Options() pref.ProtoMessage {
if f := fd.L1.Options; f != nil {
return f()
}
return descopts.Field
}
func (fd *Field) Number() pref.FieldNumber { return fd.L1.Number }
func (fd *Field) Cardinality() pref.Cardinality { return fd.L1.Cardinality }
func (fd *Field) Kind() pref.Kind { return fd.L1.Kind }
func (fd *Field) HasJSONName() bool { return fd.L1.JSONName.has }
func (fd *Field) JSONName() string { return fd.L1.JSONName.get(fd) }
func (fd *Field) IsPacked() bool {
if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != pref.Proto2 && fd.L1.Cardinality == pref.Repeated {
switch fd.L1.Kind {
case pref.StringKind, pref.BytesKind, pref.MessageKind, pref.GroupKind:
default:
return true
}
}
return fd.L1.IsPacked
}
func (fd *Field) IsExtension() bool { return false }
func (fd *Field) IsWeak() bool { return fd.L1.IsWeak }
func (fd *Field) IsList() bool { return fd.Cardinality() == pref.Repeated && !fd.IsMap() }
func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() }
func (fd *Field) MapKey() pref.FieldDescriptor {
if !fd.IsMap() {
return nil
}
return fd.Message().Fields().ByNumber(1)
}
func (fd *Field) MapValue() pref.FieldDescriptor {
if !fd.IsMap() {
return nil
}
return fd.Message().Fields().ByNumber(2)
}
func (fd *Field) HasDefault() bool { return fd.L1.Default.has }
func (fd *Field) Default() pref.Value { return fd.L1.Default.get(fd) }
func (fd *Field) DefaultEnumValue() pref.EnumValueDescriptor { return fd.L1.Default.enum }
func (fd *Field) ContainingOneof() pref.OneofDescriptor { return fd.L1.ContainingOneof }
func (fd *Field) ContainingMessage() pref.MessageDescriptor {
return fd.L0.Parent.(pref.MessageDescriptor)
}
func (fd *Field) Enum() pref.EnumDescriptor {
return fd.L1.Enum
}
func (fd *Field) Message() pref.MessageDescriptor {
if fd.L1.IsWeak {
if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil {
return d.(pref.MessageDescriptor)
}
}
return fd.L1.Message
}
func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) }
func (fd *Field) ProtoType(pref.FieldDescriptor) {}
// EnforceUTF8 is a pseudo-internal API to determine whether to enforce UTF-8
// validation for the string field. This exists for Google-internal use only
// since proto3 did not enforce UTF-8 validity prior to the open-source release.
// If this method does not exist, the default is to enforce valid UTF-8.
//
// WARNING: This method is exempt from the compatibility promise and may be
// removed in the future without warning.
func (fd *Field) EnforceUTF8() bool {
if fd.L1.HasEnforceUTF8 {
return fd.L1.EnforceUTF8
}
return fd.L0.ParentFile.L1.Syntax == pref.Proto3
}
func (od *Oneof) Options() pref.ProtoMessage {
if f := od.L1.Options; f != nil {
return f()
}
return descopts.Oneof
}
func (od *Oneof) Fields() pref.FieldDescriptors { return &od.L1.Fields }
func (od *Oneof) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, od) }
func (od *Oneof) ProtoType(pref.OneofDescriptor) {}
type (
Extension struct {
Base
L1 ExtensionL1
L2 *ExtensionL2 // protected by fileDesc.once
}
ExtensionL1 struct {
Number pref.FieldNumber
Extendee pref.MessageDescriptor
Cardinality pref.Cardinality
Kind pref.Kind
}
ExtensionL2 struct {
Options func() pref.ProtoMessage
JSONName jsonName
IsPacked bool // promoted from google.protobuf.FieldOptions
Default defaultValue
Enum pref.EnumDescriptor
Message pref.MessageDescriptor
}
)
func (xd *Extension) Options() pref.ProtoMessage {
if f := xd.lazyInit().Options; f != nil {
return f()
}
return descopts.Field
}
func (xd *Extension) Number() pref.FieldNumber { return xd.L1.Number }
func (xd *Extension) Cardinality() pref.Cardinality { return xd.L1.Cardinality }
func (xd *Extension) Kind() pref.Kind { return xd.L1.Kind }
func (xd *Extension) HasJSONName() bool { return xd.lazyInit().JSONName.has }
func (xd *Extension) JSONName() string { return xd.lazyInit().JSONName.get(xd) }
func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked }
func (xd *Extension) IsExtension() bool { return true }
func (xd *Extension) IsWeak() bool { return false }
func (xd *Extension) IsList() bool { return xd.Cardinality() == pref.Repeated }
func (xd *Extension) IsMap() bool { return false }
func (xd *Extension) MapKey() pref.FieldDescriptor { return nil }
func (xd *Extension) MapValue() pref.FieldDescriptor { return nil }
func (xd *Extension) HasDefault() bool { return xd.lazyInit().Default.has }
func (xd *Extension) Default() pref.Value { return xd.lazyInit().Default.get(xd) }
func (xd *Extension) DefaultEnumValue() pref.EnumValueDescriptor { return xd.lazyInit().Default.enum }
func (xd *Extension) ContainingOneof() pref.OneofDescriptor { return nil }
func (xd *Extension) ContainingMessage() pref.MessageDescriptor { return xd.L1.Extendee }
func (xd *Extension) Enum() pref.EnumDescriptor { return xd.lazyInit().Enum }
func (xd *Extension) Message() pref.MessageDescriptor { return xd.lazyInit().Message }
func (xd *Extension) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, xd) }
func (xd *Extension) ProtoType(pref.FieldDescriptor) {}
func (xd *Extension) ProtoInternal(pragma.DoNotImplement) {}
func (xd *Extension) lazyInit() *ExtensionL2 {
xd.L0.ParentFile.lazyInit() // implicitly initializes L2
return xd.L2
}
type (
Service struct {
Base
L1 ServiceL1
L2 *ServiceL2 // protected by fileDesc.once
}
ServiceL1 struct{}
ServiceL2 struct {
Options func() pref.ProtoMessage
Methods Methods
}
Method struct {
Base
L1 MethodL1
}
MethodL1 struct {
Options func() pref.ProtoMessage
Input pref.MessageDescriptor
Output pref.MessageDescriptor
IsStreamingClient bool
IsStreamingServer bool
}
)
func (sd *Service) Options() pref.ProtoMessage {
if f := sd.lazyInit().Options; f != nil {
return f()
}
return descopts.Service
}
func (sd *Service) Methods() pref.MethodDescriptors { return &sd.lazyInit().Methods }
func (sd *Service) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, sd) }
func (sd *Service) ProtoType(pref.ServiceDescriptor) {}
func (sd *Service) ProtoInternal(pragma.DoNotImplement) {}
func (sd *Service) lazyInit() *ServiceL2 {
sd.L0.ParentFile.lazyInit() // implicitly initializes L2
return sd.L2
}
func (md *Method) Options() pref.ProtoMessage {
if f := md.L1.Options; f != nil {
return f()
}
return descopts.Method
}
func (md *Method) Input() pref.MessageDescriptor { return md.L1.Input }
func (md *Method) Output() pref.MessageDescriptor { return md.L1.Output }
func (md *Method) IsStreamingClient() bool { return md.L1.IsStreamingClient }
func (md *Method) IsStreamingServer() bool { return md.L1.IsStreamingServer }
func (md *Method) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) }
func (md *Method) ProtoType(pref.MethodDescriptor) {}
func (md *Method) ProtoInternal(pragma.DoNotImplement) {}
// Surrogate files are can be used to create standalone descriptors
// where the syntax is only information derived from the parent file.
var (
SurrogateProto2 = &File{L1: FileL1{Syntax: pref.Proto2}, L2: &FileL2{}}
SurrogateProto3 = &File{L1: FileL1{Syntax: pref.Proto3}, L2: &FileL2{}}
)
type (
Base struct {
L0 BaseL0
}
BaseL0 struct {
FullName pref.FullName // must be populated
ParentFile *File // must be populated
Parent pref.Descriptor
Index int
}
)
func (d *Base) Name() pref.Name { return d.L0.FullName.Name() }
func (d *Base) FullName() pref.FullName { return d.L0.FullName }
func (d *Base) ParentFile() pref.FileDescriptor {
if d.L0.ParentFile == SurrogateProto2 || d.L0.ParentFile == SurrogateProto3 {
return nil // surrogate files are not real parents
}
return d.L0.ParentFile
}
func (d *Base) Parent() pref.Descriptor { return d.L0.Parent }
func (d *Base) Index() int { return d.L0.Index }
func (d *Base) Syntax() pref.Syntax { return d.L0.ParentFile.Syntax() }
func (d *Base) IsPlaceholder() bool { return false }
func (d *Base) ProtoInternal(pragma.DoNotImplement) {}
type jsonName struct {
has bool
once sync.Once
name string
}
// Init initializes the name. It is exported for use by other internal packages.
func (js *jsonName) Init(s string) {
js.has = true
js.name = s
}
func (js *jsonName) get(fd pref.FieldDescriptor) string {
if !js.has {
js.once.Do(func() {
js.name = strs.JSONCamelCase(string(fd.Name()))
})
}
return js.name
}
func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue {
dv := defaultValue{has: v.IsValid(), val: v, enum: ev}
if b, ok := v.Interface().([]byte); ok {
// Store a copy of the default bytes, so that we can detect
// accidental mutations of the original value.
dv.bytes = append([]byte(nil), b...)
}
return dv
}
func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) defaultValue {
var evs pref.EnumValueDescriptors
if k == pref.EnumKind {
// If the enum is declared within the same file, be careful not to
// blindly call the Values method, lest we bind ourselves in a deadlock.
if e, ok := ed.(*Enum); ok && e.L0.ParentFile == pf {
evs = &e.L2.Values
} else {
evs = ed.Values()
}
// If we are unable to resolve the enum dependency, use a placeholder
// enum value since we will not be able to parse the default value.
if ed.IsPlaceholder() && pref.Name(b).IsValid() {
v := pref.ValueOfEnum(0)
ev := PlaceholderEnumValue(ed.FullName().Parent().Append(pref.Name(b)))
return DefaultValue(v, ev)
}
}
v, ev, err := defval.Unmarshal(string(b), k, evs, defval.Descriptor)
if err != nil {
panic(err)
}
return DefaultValue(v, ev)
}
type defaultValue struct {
has bool
val pref.Value
enum pref.EnumValueDescriptor
bytes []byte
}
func (dv *defaultValue) get(fd pref.FieldDescriptor) pref.Value {
// Return the zero value as the default if unpopulated.
if !dv.has {
if fd.Cardinality() == pref.Repeated {
return pref.Value{}
}
switch fd.Kind() {
case pref.BoolKind:
return pref.ValueOfBool(false)
case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
return pref.ValueOfInt32(0)
case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
return pref.ValueOfInt64(0)
case pref.Uint32Kind, pref.Fixed32Kind:
return pref.ValueOfUint32(0)
case pref.Uint64Kind, pref.Fixed64Kind:
return pref.ValueOfUint64(0)
case pref.FloatKind:
return pref.ValueOfFloat32(0)
case pref.DoubleKind:
return pref.ValueOfFloat64(0)
case pref.StringKind:
return pref.ValueOfString("")
case pref.BytesKind:
return pref.ValueOfBytes(nil)
case pref.EnumKind:
if evs := fd.Enum().Values(); evs.Len() > 0 {
return pref.ValueOfEnum(evs.Get(0).Number())
}
return pref.ValueOfEnum(0)
}
}
if len(dv.bytes) > 0 && !bytes.Equal(dv.bytes, dv.val.Bytes()) {
// TODO: Avoid panic if we're running with the race detector
// and instead spawn a goroutine that periodically resets
// this value back to the original to induce a race.
panic("detected mutation on the default bytes")
}
return dv.val
}

View file

@ -0,0 +1,471 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package filedesc
import (
"sync"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/fieldnum"
"google.golang.org/protobuf/internal/strs"
pref "google.golang.org/protobuf/reflect/protoreflect"
)
// fileRaw is a data struct used when initializing a file descriptor from
// a raw FileDescriptorProto.
type fileRaw struct {
builder Builder
allEnums []Enum
allMessages []Message
allExtensions []Extension
allServices []Service
}
func newRawFile(db Builder) *File {
fd := &File{fileRaw: fileRaw{builder: db}}
fd.initDecls(db.NumEnums, db.NumMessages, db.NumExtensions, db.NumServices)
fd.unmarshalSeed(db.RawDescriptor)
// Extended message targets are eagerly resolved since registration
// needs this information at program init time.
for i := range fd.allExtensions {
xd := &fd.allExtensions[i]
xd.L1.Extendee = fd.resolveMessageDependency(xd.L1.Extendee, listExtTargets, int32(i))
}
fd.checkDecls()
return fd
}
// initDecls pre-allocates slices for the exact number of enums, messages
// (including map entries), extensions, and services declared in the proto file.
// This is done to avoid regrowing the slice, which would change the address
// for any previously seen declaration.
//
// The alloc methods "allocates" slices by pulling from the capacity.
func (fd *File) initDecls(numEnums, numMessages, numExtensions, numServices int32) {
fd.allEnums = make([]Enum, 0, numEnums)
fd.allMessages = make([]Message, 0, numMessages)
fd.allExtensions = make([]Extension, 0, numExtensions)
fd.allServices = make([]Service, 0, numServices)
}
func (fd *File) allocEnums(n int) []Enum {
total := len(fd.allEnums)
es := fd.allEnums[total : total+n]
fd.allEnums = fd.allEnums[:total+n]
return es
}
func (fd *File) allocMessages(n int) []Message {
total := len(fd.allMessages)
ms := fd.allMessages[total : total+n]
fd.allMessages = fd.allMessages[:total+n]
return ms
}
func (fd *File) allocExtensions(n int) []Extension {
total := len(fd.allExtensions)
xs := fd.allExtensions[total : total+n]
fd.allExtensions = fd.allExtensions[:total+n]
return xs
}
func (fd *File) allocServices(n int) []Service {
total := len(fd.allServices)
xs := fd.allServices[total : total+n]
fd.allServices = fd.allServices[:total+n]
return xs
}
// checkDecls performs a sanity check that the expected number of expected
// declarations matches the number that were found in the descriptor proto.
func (fd *File) checkDecls() {
switch {
case len(fd.allEnums) != cap(fd.allEnums):
case len(fd.allMessages) != cap(fd.allMessages):
case len(fd.allExtensions) != cap(fd.allExtensions):
case len(fd.allServices) != cap(fd.allServices):
default:
return
}
panic("mismatching cardinality")
}
func (fd *File) unmarshalSeed(b []byte) {
sb := getBuilder()
defer putBuilder(sb)
var prevField pref.FieldNumber
var numEnums, numMessages, numExtensions, numServices int
var posEnums, posMessages, posExtensions, posServices int
b0 := b
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case fieldnum.FileDescriptorProto_Syntax:
switch string(v) {
case "proto2":
fd.L1.Syntax = pref.Proto2
case "proto3":
fd.L1.Syntax = pref.Proto3
default:
panic("invalid syntax")
}
case fieldnum.FileDescriptorProto_Name:
fd.L1.Path = sb.MakeString(v)
case fieldnum.FileDescriptorProto_Package:
fd.L1.Package = pref.FullName(sb.MakeString(v))
case fieldnum.FileDescriptorProto_EnumType:
if prevField != fieldnum.FileDescriptorProto_EnumType {
if numEnums > 0 {
panic("non-contiguous repeated field")
}
posEnums = len(b0) - len(b) - n - m
}
numEnums++
case fieldnum.FileDescriptorProto_MessageType:
if prevField != fieldnum.FileDescriptorProto_MessageType {
if numMessages > 0 {
panic("non-contiguous repeated field")
}
posMessages = len(b0) - len(b) - n - m
}
numMessages++
case fieldnum.FileDescriptorProto_Extension:
if prevField != fieldnum.FileDescriptorProto_Extension {
if numExtensions > 0 {
panic("non-contiguous repeated field")
}
posExtensions = len(b0) - len(b) - n - m
}
numExtensions++
case fieldnum.FileDescriptorProto_Service:
if prevField != fieldnum.FileDescriptorProto_Service {
if numServices > 0 {
panic("non-contiguous repeated field")
}
posServices = len(b0) - len(b) - n - m
}
numServices++
}
prevField = num
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
prevField = -1 // ignore known field numbers of unknown wire type
}
}
// If syntax is missing, it is assumed to be proto2.
if fd.L1.Syntax == 0 {
fd.L1.Syntax = pref.Proto2
}
// Must allocate all declarations before parsing each descriptor type
// to ensure we handled all descriptors in "flattened ordering".
if numEnums > 0 {
fd.L1.Enums.List = fd.allocEnums(numEnums)
}
if numMessages > 0 {
fd.L1.Messages.List = fd.allocMessages(numMessages)
}
if numExtensions > 0 {
fd.L1.Extensions.List = fd.allocExtensions(numExtensions)
}
if numServices > 0 {
fd.L1.Services.List = fd.allocServices(numServices)
}
if numEnums > 0 {
b := b0[posEnums:]
for i := range fd.L1.Enums.List {
_, n := protowire.ConsumeVarint(b)
v, m := protowire.ConsumeBytes(b[n:])
fd.L1.Enums.List[i].unmarshalSeed(v, sb, fd, fd, i)
b = b[n+m:]
}
}
if numMessages > 0 {
b := b0[posMessages:]
for i := range fd.L1.Messages.List {
_, n := protowire.ConsumeVarint(b)
v, m := protowire.ConsumeBytes(b[n:])
fd.L1.Messages.List[i].unmarshalSeed(v, sb, fd, fd, i)
b = b[n+m:]
}
}
if numExtensions > 0 {
b := b0[posExtensions:]
for i := range fd.L1.Extensions.List {
_, n := protowire.ConsumeVarint(b)
v, m := protowire.ConsumeBytes(b[n:])
fd.L1.Extensions.List[i].unmarshalSeed(v, sb, fd, fd, i)
b = b[n+m:]
}
}
if numServices > 0 {
b := b0[posServices:]
for i := range fd.L1.Services.List {
_, n := protowire.ConsumeVarint(b)
v, m := protowire.ConsumeBytes(b[n:])
fd.L1.Services.List[i].unmarshalSeed(v, sb, fd, fd, i)
b = b[n+m:]
}
}
}
func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
ed.L0.ParentFile = pf
ed.L0.Parent = pd
ed.L0.Index = i
var numValues int
for b := b; len(b) > 0; {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case fieldnum.EnumDescriptorProto_Name:
ed.L0.FullName = appendFullName(sb, pd.FullName(), v)
case fieldnum.EnumDescriptorProto_Value:
numValues++
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
// Only construct enum value descriptors for top-level enums since
// they are needed for registration.
if pd != pf {
return
}
ed.L1.eagerValues = true
ed.L2 = new(EnumL2)
ed.L2.Values.List = make([]EnumValue, numValues)
for i := 0; len(b) > 0; {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case fieldnum.EnumDescriptorProto_Value:
ed.L2.Values.List[i].unmarshalFull(v, sb, pf, ed, i)
i++
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
}
func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
md.L0.ParentFile = pf
md.L0.Parent = pd
md.L0.Index = i
var prevField pref.FieldNumber
var numEnums, numMessages, numExtensions int
var posEnums, posMessages, posExtensions int
b0 := b
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case fieldnum.DescriptorProto_Name:
md.L0.FullName = appendFullName(sb, pd.FullName(), v)
case fieldnum.DescriptorProto_EnumType:
if prevField != fieldnum.DescriptorProto_EnumType {
if numEnums > 0 {
panic("non-contiguous repeated field")
}
posEnums = len(b0) - len(b) - n - m
}
numEnums++
case fieldnum.DescriptorProto_NestedType:
if prevField != fieldnum.DescriptorProto_NestedType {
if numMessages > 0 {
panic("non-contiguous repeated field")
}
posMessages = len(b0) - len(b) - n - m
}
numMessages++
case fieldnum.DescriptorProto_Extension:
if prevField != fieldnum.DescriptorProto_Extension {
if numExtensions > 0 {
panic("non-contiguous repeated field")
}
posExtensions = len(b0) - len(b) - n - m
}
numExtensions++
case fieldnum.DescriptorProto_Options:
md.unmarshalSeedOptions(v)
}
prevField = num
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
prevField = -1 // ignore known field numbers of unknown wire type
}
}
// Must allocate all declarations before parsing each descriptor type
// to ensure we handled all descriptors in "flattened ordering".
if numEnums > 0 {
md.L1.Enums.List = pf.allocEnums(numEnums)
}
if numMessages > 0 {
md.L1.Messages.List = pf.allocMessages(numMessages)
}
if numExtensions > 0 {
md.L1.Extensions.List = pf.allocExtensions(numExtensions)
}
if numEnums > 0 {
b := b0[posEnums:]
for i := range md.L1.Enums.List {
_, n := protowire.ConsumeVarint(b)
v, m := protowire.ConsumeBytes(b[n:])
md.L1.Enums.List[i].unmarshalSeed(v, sb, pf, md, i)
b = b[n+m:]
}
}
if numMessages > 0 {
b := b0[posMessages:]
for i := range md.L1.Messages.List {
_, n := protowire.ConsumeVarint(b)
v, m := protowire.ConsumeBytes(b[n:])
md.L1.Messages.List[i].unmarshalSeed(v, sb, pf, md, i)
b = b[n+m:]
}
}
if numExtensions > 0 {
b := b0[posExtensions:]
for i := range md.L1.Extensions.List {
_, n := protowire.ConsumeVarint(b)
v, m := protowire.ConsumeBytes(b[n:])
md.L1.Extensions.List[i].unmarshalSeed(v, sb, pf, md, i)
b = b[n+m:]
}
}
}
func (md *Message) unmarshalSeedOptions(b []byte) {
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case fieldnum.MessageOptions_MapEntry:
md.L1.IsMapEntry = protowire.DecodeBool(v)
case fieldnum.MessageOptions_MessageSetWireFormat:
md.L1.IsMessageSet = protowire.DecodeBool(v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
}
func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
xd.L0.ParentFile = pf
xd.L0.Parent = pd
xd.L0.Index = i
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case fieldnum.FieldDescriptorProto_Number:
xd.L1.Number = pref.FieldNumber(v)
case fieldnum.FieldDescriptorProto_Label:
xd.L1.Cardinality = pref.Cardinality(v)
case fieldnum.FieldDescriptorProto_Type:
xd.L1.Kind = pref.Kind(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case fieldnum.FieldDescriptorProto_Name:
xd.L0.FullName = appendFullName(sb, pd.FullName(), v)
case fieldnum.FieldDescriptorProto_Extendee:
xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v))
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
}
func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
sd.L0.ParentFile = pf
sd.L0.Parent = pd
sd.L0.Index = i
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case fieldnum.ServiceDescriptorProto_Name:
sd.L0.FullName = appendFullName(sb, pd.FullName(), v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
}
var nameBuilderPool = sync.Pool{
New: func() interface{} { return new(strs.Builder) },
}
func getBuilder() *strs.Builder {
return nameBuilderPool.Get().(*strs.Builder)
}
func putBuilder(b *strs.Builder) {
nameBuilderPool.Put(b)
}
// makeFullName converts b to a protoreflect.FullName,
// where b must start with a leading dot.
func makeFullName(sb *strs.Builder, b []byte) pref.FullName {
if len(b) == 0 || b[0] != '.' {
panic("name reference must be fully qualified")
}
return pref.FullName(sb.MakeString(b[1:]))
}
func appendFullName(sb *strs.Builder, prefix pref.FullName, suffix []byte) pref.FullName {
return sb.AppendFullName(prefix, pref.Name(strs.UnsafeString(suffix)))
}

View file

@ -0,0 +1,695 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package filedesc
import (
"reflect"
"sync"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/descopts"
"google.golang.org/protobuf/internal/fieldnum"
"google.golang.org/protobuf/internal/strs"
"google.golang.org/protobuf/proto"
pref "google.golang.org/protobuf/reflect/protoreflect"
)
func (fd *File) lazyRawInit() {
fd.unmarshalFull(fd.builder.RawDescriptor)
fd.resolveMessages()
fd.resolveExtensions()
fd.resolveServices()
}
func (file *File) resolveMessages() {
var depIdx int32
for i := range file.allMessages {
md := &file.allMessages[i]
// Resolve message field dependencies.
for j := range md.L2.Fields.List {
fd := &md.L2.Fields.List[j]
// Weak fields are resolved upon actual use.
if fd.L1.IsWeak {
continue
}
// Resolve message field dependency.
switch fd.L1.Kind {
case pref.EnumKind:
fd.L1.Enum = file.resolveEnumDependency(fd.L1.Enum, listFieldDeps, depIdx)
depIdx++
case pref.MessageKind, pref.GroupKind:
fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx)
depIdx++
}
// Default is resolved here since it depends on Enum being resolved.
if v := fd.L1.Default.val; v.IsValid() {
fd.L1.Default = unmarshalDefault(v.Bytes(), fd.L1.Kind, file, fd.L1.Enum)
}
}
}
}
func (file *File) resolveExtensions() {
var depIdx int32
for i := range file.allExtensions {
xd := &file.allExtensions[i]
// Resolve extension field dependency.
switch xd.L1.Kind {
case pref.EnumKind:
xd.L2.Enum = file.resolveEnumDependency(xd.L2.Enum, listExtDeps, depIdx)
depIdx++
case pref.MessageKind, pref.GroupKind:
xd.L2.Message = file.resolveMessageDependency(xd.L2.Message, listExtDeps, depIdx)
depIdx++
}
// Default is resolved here since it depends on Enum being resolved.
if v := xd.L2.Default.val; v.IsValid() {
xd.L2.Default = unmarshalDefault(v.Bytes(), xd.L1.Kind, file, xd.L2.Enum)
}
}
}
func (file *File) resolveServices() {
var depIdx int32
for i := range file.allServices {
sd := &file.allServices[i]
// Resolve method dependencies.
for j := range sd.L2.Methods.List {
md := &sd.L2.Methods.List[j]
md.L1.Input = file.resolveMessageDependency(md.L1.Input, listMethInDeps, depIdx)
md.L1.Output = file.resolveMessageDependency(md.L1.Output, listMethOutDeps, depIdx)
depIdx++
}
}
}
func (file *File) resolveEnumDependency(ed pref.EnumDescriptor, i, j int32) pref.EnumDescriptor {
r := file.builder.FileRegistry
if r, ok := r.(resolverByIndex); ok {
if ed2 := r.FindEnumByIndex(i, j, file.allEnums, file.allMessages); ed2 != nil {
return ed2
}
}
for i := range file.allEnums {
if ed2 := &file.allEnums[i]; ed2.L0.FullName == ed.FullName() {
return ed2
}
}
if d, _ := r.FindDescriptorByName(ed.FullName()); d != nil {
return d.(pref.EnumDescriptor)
}
return ed
}
func (file *File) resolveMessageDependency(md pref.MessageDescriptor, i, j int32) pref.MessageDescriptor {
r := file.builder.FileRegistry
if r, ok := r.(resolverByIndex); ok {
if md2 := r.FindMessageByIndex(i, j, file.allEnums, file.allMessages); md2 != nil {
return md2
}
}
for i := range file.allMessages {
if md2 := &file.allMessages[i]; md2.L0.FullName == md.FullName() {
return md2
}
}
if d, _ := r.FindDescriptorByName(md.FullName()); d != nil {
return d.(pref.MessageDescriptor)
}
return md
}
func (fd *File) unmarshalFull(b []byte) {
sb := getBuilder()
defer putBuilder(sb)
var enumIdx, messageIdx, extensionIdx, serviceIdx int
var rawOptions []byte
fd.L2 = new(FileL2)
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case fieldnum.FileDescriptorProto_PublicDependency:
fd.L2.Imports[v].IsPublic = true
case fieldnum.FileDescriptorProto_WeakDependency:
fd.L2.Imports[v].IsWeak = true
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case fieldnum.FileDescriptorProto_Dependency:
path := sb.MakeString(v)
imp, _ := fd.builder.FileRegistry.FindFileByPath(path)
if imp == nil {
imp = PlaceholderFile(path)
}
fd.L2.Imports = append(fd.L2.Imports, pref.FileImport{FileDescriptor: imp})
case fieldnum.FileDescriptorProto_EnumType:
fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb)
enumIdx++
case fieldnum.FileDescriptorProto_MessageType:
fd.L1.Messages.List[messageIdx].unmarshalFull(v, sb)
messageIdx++
case fieldnum.FileDescriptorProto_Extension:
fd.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb)
extensionIdx++
case fieldnum.FileDescriptorProto_Service:
fd.L1.Services.List[serviceIdx].unmarshalFull(v, sb)
serviceIdx++
case fieldnum.FileDescriptorProto_Options:
rawOptions = appendOptions(rawOptions, v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
fd.L2.Options = fd.builder.optionsUnmarshaler(&descopts.File, rawOptions)
}
func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) {
var rawValues [][]byte
var rawOptions []byte
if !ed.L1.eagerValues {
ed.L2 = new(EnumL2)
}
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case fieldnum.EnumDescriptorProto_Value:
rawValues = append(rawValues, v)
case fieldnum.EnumDescriptorProto_ReservedName:
ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, pref.Name(sb.MakeString(v)))
case fieldnum.EnumDescriptorProto_ReservedRange:
ed.L2.ReservedRanges.List = append(ed.L2.ReservedRanges.List, unmarshalEnumReservedRange(v))
case fieldnum.EnumDescriptorProto_Options:
rawOptions = appendOptions(rawOptions, v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
if !ed.L1.eagerValues && len(rawValues) > 0 {
ed.L2.Values.List = make([]EnumValue, len(rawValues))
for i, b := range rawValues {
ed.L2.Values.List[i].unmarshalFull(b, sb, ed.L0.ParentFile, ed, i)
}
}
ed.L2.Options = ed.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Enum, rawOptions)
}
func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) {
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case fieldnum.EnumDescriptorProto_EnumReservedRange_Start:
r[0] = pref.EnumNumber(v)
case fieldnum.EnumDescriptorProto_EnumReservedRange_End:
r[1] = pref.EnumNumber(v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
return r
}
func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
vd.L0.ParentFile = pf
vd.L0.Parent = pd
vd.L0.Index = i
var rawOptions []byte
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case fieldnum.EnumValueDescriptorProto_Number:
vd.L1.Number = pref.EnumNumber(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case fieldnum.EnumValueDescriptorProto_Name:
// NOTE: Enum values are in the same scope as the enum parent.
vd.L0.FullName = appendFullName(sb, pd.Parent().FullName(), v)
case fieldnum.EnumValueDescriptorProto_Options:
rawOptions = appendOptions(rawOptions, v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
vd.L1.Options = pf.builder.optionsUnmarshaler(&descopts.EnumValue, rawOptions)
}
func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) {
var rawFields, rawOneofs [][]byte
var enumIdx, messageIdx, extensionIdx int
var rawOptions []byte
md.L2 = new(MessageL2)
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case fieldnum.DescriptorProto_Field:
rawFields = append(rawFields, v)
case fieldnum.DescriptorProto_OneofDecl:
rawOneofs = append(rawOneofs, v)
case fieldnum.DescriptorProto_ReservedName:
md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, pref.Name(sb.MakeString(v)))
case fieldnum.DescriptorProto_ReservedRange:
md.L2.ReservedRanges.List = append(md.L2.ReservedRanges.List, unmarshalMessageReservedRange(v))
case fieldnum.DescriptorProto_ExtensionRange:
r, rawOptions := unmarshalMessageExtensionRange(v)
opts := md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.ExtensionRange, rawOptions)
md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, r)
md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, opts)
case fieldnum.DescriptorProto_EnumType:
md.L1.Enums.List[enumIdx].unmarshalFull(v, sb)
enumIdx++
case fieldnum.DescriptorProto_NestedType:
md.L1.Messages.List[messageIdx].unmarshalFull(v, sb)
messageIdx++
case fieldnum.DescriptorProto_Extension:
md.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb)
extensionIdx++
case fieldnum.DescriptorProto_Options:
md.unmarshalOptions(v)
rawOptions = appendOptions(rawOptions, v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
if len(rawFields) > 0 || len(rawOneofs) > 0 {
md.L2.Fields.List = make([]Field, len(rawFields))
md.L2.Oneofs.List = make([]Oneof, len(rawOneofs))
for i, b := range rawFields {
fd := &md.L2.Fields.List[i]
fd.unmarshalFull(b, sb, md.L0.ParentFile, md, i)
if fd.L1.Cardinality == pref.Required {
md.L2.RequiredNumbers.List = append(md.L2.RequiredNumbers.List, fd.L1.Number)
}
}
for i, b := range rawOneofs {
od := &md.L2.Oneofs.List[i]
od.unmarshalFull(b, sb, md.L0.ParentFile, md, i)
}
}
md.L2.Options = md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Message, rawOptions)
}
func (md *Message) unmarshalOptions(b []byte) {
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case fieldnum.MessageOptions_MapEntry:
md.L1.IsMapEntry = protowire.DecodeBool(v)
case fieldnum.MessageOptions_MessageSetWireFormat:
md.L1.IsMessageSet = protowire.DecodeBool(v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
}
func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) {
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case fieldnum.DescriptorProto_ReservedRange_Start:
r[0] = pref.FieldNumber(v)
case fieldnum.DescriptorProto_ReservedRange_End:
r[1] = pref.FieldNumber(v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
return r
}
func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions []byte) {
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case fieldnum.DescriptorProto_ExtensionRange_Start:
r[0] = pref.FieldNumber(v)
case fieldnum.DescriptorProto_ExtensionRange_End:
r[1] = pref.FieldNumber(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case fieldnum.DescriptorProto_ExtensionRange_Options:
rawOptions = appendOptions(rawOptions, v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
return r, rawOptions
}
func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
fd.L0.ParentFile = pf
fd.L0.Parent = pd
fd.L0.Index = i
var rawTypeName []byte
var rawOptions []byte
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case fieldnum.FieldDescriptorProto_Number:
fd.L1.Number = pref.FieldNumber(v)
case fieldnum.FieldDescriptorProto_Label:
fd.L1.Cardinality = pref.Cardinality(v)
case fieldnum.FieldDescriptorProto_Type:
fd.L1.Kind = pref.Kind(v)
case fieldnum.FieldDescriptorProto_OneofIndex:
// In Message.unmarshalFull, we allocate slices for both
// the field and oneof descriptors before unmarshaling either
// of them. This ensures pointers to slice elements are stable.
od := &pd.(*Message).L2.Oneofs.List[v]
od.L1.Fields.List = append(od.L1.Fields.List, fd)
if fd.L1.ContainingOneof != nil {
panic("oneof type already set")
}
fd.L1.ContainingOneof = od
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case fieldnum.FieldDescriptorProto_Name:
fd.L0.FullName = appendFullName(sb, pd.FullName(), v)
case fieldnum.FieldDescriptorProto_JsonName:
fd.L1.JSONName.Init(sb.MakeString(v))
case fieldnum.FieldDescriptorProto_DefaultValue:
fd.L1.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages
case fieldnum.FieldDescriptorProto_TypeName:
rawTypeName = v
case fieldnum.FieldDescriptorProto_Options:
fd.unmarshalOptions(v)
rawOptions = appendOptions(rawOptions, v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
if rawTypeName != nil {
name := makeFullName(sb, rawTypeName)
switch fd.L1.Kind {
case pref.EnumKind:
fd.L1.Enum = PlaceholderEnum(name)
case pref.MessageKind, pref.GroupKind:
fd.L1.Message = PlaceholderMessage(name)
}
}
fd.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Field, rawOptions)
}
func (fd *Field) unmarshalOptions(b []byte) {
const FieldOptions_EnforceUTF8 = 13
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case fieldnum.FieldOptions_Packed:
fd.L1.HasPacked = true
fd.L1.IsPacked = protowire.DecodeBool(v)
case fieldnum.FieldOptions_Weak:
fd.L1.IsWeak = protowire.DecodeBool(v)
case FieldOptions_EnforceUTF8:
fd.L1.HasEnforceUTF8 = true
fd.L1.EnforceUTF8 = protowire.DecodeBool(v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
}
func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
od.L0.ParentFile = pf
od.L0.Parent = pd
od.L0.Index = i
var rawOptions []byte
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case fieldnum.OneofDescriptorProto_Name:
od.L0.FullName = appendFullName(sb, pd.FullName(), v)
case fieldnum.OneofDescriptorProto_Options:
rawOptions = appendOptions(rawOptions, v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
od.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Oneof, rawOptions)
}
func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) {
var rawTypeName []byte
var rawOptions []byte
xd.L2 = new(ExtensionL2)
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case fieldnum.FieldDescriptorProto_JsonName:
xd.L2.JSONName.Init(sb.MakeString(v))
case fieldnum.FieldDescriptorProto_DefaultValue:
xd.L2.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions
case fieldnum.FieldDescriptorProto_TypeName:
rawTypeName = v
case fieldnum.FieldDescriptorProto_Options:
xd.unmarshalOptions(v)
rawOptions = appendOptions(rawOptions, v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
if rawTypeName != nil {
name := makeFullName(sb, rawTypeName)
switch xd.L1.Kind {
case pref.EnumKind:
xd.L2.Enum = PlaceholderEnum(name)
case pref.MessageKind, pref.GroupKind:
xd.L2.Message = PlaceholderMessage(name)
}
}
xd.L2.Options = xd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Field, rawOptions)
}
func (xd *Extension) unmarshalOptions(b []byte) {
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case fieldnum.FieldOptions_Packed:
xd.L2.IsPacked = protowire.DecodeBool(v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
}
func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) {
var rawMethods [][]byte
var rawOptions []byte
sd.L2 = new(ServiceL2)
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case fieldnum.ServiceDescriptorProto_Method:
rawMethods = append(rawMethods, v)
case fieldnum.ServiceDescriptorProto_Options:
rawOptions = appendOptions(rawOptions, v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
if len(rawMethods) > 0 {
sd.L2.Methods.List = make([]Method, len(rawMethods))
for i, b := range rawMethods {
sd.L2.Methods.List[i].unmarshalFull(b, sb, sd.L0.ParentFile, sd, i)
}
}
sd.L2.Options = sd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Service, rawOptions)
}
func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
md.L0.ParentFile = pf
md.L0.Parent = pd
md.L0.Index = i
var rawOptions []byte
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
b = b[n:]
switch typ {
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case fieldnum.MethodDescriptorProto_ClientStreaming:
md.L1.IsStreamingClient = protowire.DecodeBool(v)
case fieldnum.MethodDescriptorProto_ServerStreaming:
md.L1.IsStreamingServer = protowire.DecodeBool(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case fieldnum.MethodDescriptorProto_Name:
md.L0.FullName = appendFullName(sb, pd.FullName(), v)
case fieldnum.MethodDescriptorProto_InputType:
md.L1.Input = PlaceholderMessage(makeFullName(sb, v))
case fieldnum.MethodDescriptorProto_OutputType:
md.L1.Output = PlaceholderMessage(makeFullName(sb, v))
case fieldnum.MethodDescriptorProto_Options:
rawOptions = appendOptions(rawOptions, v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
}
}
md.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Method, rawOptions)
}
// appendOptions appends src to dst, where the returned slice is never nil.
// This is necessary to distinguish between empty and unpopulated options.
func appendOptions(dst, src []byte) []byte {
if dst == nil {
dst = []byte{}
}
return append(dst, src...)
}
// optionsUnmarshaler constructs a lazy unmarshal function for an options message.
//
// The type of message to unmarshal to is passed as a pointer since the
// vars in descopts may not yet be populated at the time this function is called.
func (db *Builder) optionsUnmarshaler(p *pref.ProtoMessage, b []byte) func() pref.ProtoMessage {
if b == nil {
return nil
}
var opts pref.ProtoMessage
var once sync.Once
return func() pref.ProtoMessage {
once.Do(func() {
if *p == nil {
panic("Descriptor.Options called without importing the descriptor package")
}
opts = reflect.New(reflect.TypeOf(*p).Elem()).Interface().(pref.ProtoMessage)
if err := (proto.UnmarshalOptions{
AllowPartial: true,
Resolver: db.TypeResolver,
}).Unmarshal(b, opts); err != nil {
panic(err)
}
})
return opts
}
}

View file

@ -0,0 +1,286 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package filedesc
import (
"fmt"
"math"
"sort"
"sync"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/descfmt"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/reflect/protoreflect"
pref "google.golang.org/protobuf/reflect/protoreflect"
)
type FileImports []pref.FileImport
func (p *FileImports) Len() int { return len(*p) }
func (p *FileImports) Get(i int) pref.FileImport { return (*p)[i] }
func (p *FileImports) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
func (p *FileImports) ProtoInternal(pragma.DoNotImplement) {}
type Names struct {
List []pref.Name
once sync.Once
has map[pref.Name]int // protected by once
}
func (p *Names) Len() int { return len(p.List) }
func (p *Names) Get(i int) pref.Name { return p.List[i] }
func (p *Names) Has(s pref.Name) bool { return p.lazyInit().has[s] > 0 }
func (p *Names) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
func (p *Names) ProtoInternal(pragma.DoNotImplement) {}
func (p *Names) lazyInit() *Names {
p.once.Do(func() {
if len(p.List) > 0 {
p.has = make(map[pref.Name]int, len(p.List))
for _, s := range p.List {
p.has[s] = p.has[s] + 1
}
}
})
return p
}
// CheckValid reports any errors with the set of names with an error message
// that completes the sentence: "ranges is invalid because it has ..."
func (p *Names) CheckValid() error {
for s, n := range p.lazyInit().has {
switch {
case n > 1:
return errors.New("duplicate name: %q", s)
case false && !s.IsValid():
// NOTE: The C++ implementation does not validate the identifier.
// See https://github.com/protocolbuffers/protobuf/issues/6335.
return errors.New("invalid name: %q", s)
}
}
return nil
}
type EnumRanges struct {
List [][2]pref.EnumNumber // start inclusive; end inclusive
once sync.Once
sorted [][2]pref.EnumNumber // protected by once
}
func (p *EnumRanges) Len() int { return len(p.List) }
func (p *EnumRanges) Get(i int) [2]pref.EnumNumber { return p.List[i] }
func (p *EnumRanges) Has(n pref.EnumNumber) bool {
for ls := p.lazyInit().sorted; len(ls) > 0; {
i := len(ls) / 2
switch r := enumRange(ls[i]); {
case n < r.Start():
ls = ls[:i] // search lower
case n > r.End():
ls = ls[i+1:] // search upper
default:
return true
}
}
return false
}
func (p *EnumRanges) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
func (p *EnumRanges) ProtoInternal(pragma.DoNotImplement) {}
func (p *EnumRanges) lazyInit() *EnumRanges {
p.once.Do(func() {
p.sorted = append(p.sorted, p.List...)
sort.Slice(p.sorted, func(i, j int) bool {
return p.sorted[i][0] < p.sorted[j][0]
})
})
return p
}
// CheckValid reports any errors with the set of names with an error message
// that completes the sentence: "ranges is invalid because it has ..."
func (p *EnumRanges) CheckValid() error {
var rp enumRange
for i, r := range p.lazyInit().sorted {
r := enumRange(r)
switch {
case !(r.Start() <= r.End()):
return errors.New("invalid range: %v", r)
case !(rp.End() < r.Start()) && i > 0:
return errors.New("overlapping ranges: %v with %v", rp, r)
}
rp = r
}
return nil
}
type enumRange [2]protoreflect.EnumNumber
func (r enumRange) Start() protoreflect.EnumNumber { return r[0] } // inclusive
func (r enumRange) End() protoreflect.EnumNumber { return r[1] } // inclusive
func (r enumRange) String() string {
if r.Start() == r.End() {
return fmt.Sprintf("%d", r.Start())
}
return fmt.Sprintf("%d to %d", r.Start(), r.End())
}
type FieldRanges struct {
List [][2]pref.FieldNumber // start inclusive; end exclusive
once sync.Once
sorted [][2]pref.FieldNumber // protected by once
}
func (p *FieldRanges) Len() int { return len(p.List) }
func (p *FieldRanges) Get(i int) [2]pref.FieldNumber { return p.List[i] }
func (p *FieldRanges) Has(n pref.FieldNumber) bool {
for ls := p.lazyInit().sorted; len(ls) > 0; {
i := len(ls) / 2
switch r := fieldRange(ls[i]); {
case n < r.Start():
ls = ls[:i] // search lower
case n > r.End():
ls = ls[i+1:] // search upper
default:
return true
}
}
return false
}
func (p *FieldRanges) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
func (p *FieldRanges) ProtoInternal(pragma.DoNotImplement) {}
func (p *FieldRanges) lazyInit() *FieldRanges {
p.once.Do(func() {
p.sorted = append(p.sorted, p.List...)
sort.Slice(p.sorted, func(i, j int) bool {
return p.sorted[i][0] < p.sorted[j][0]
})
})
return p
}
// CheckValid reports any errors with the set of ranges with an error message
// that completes the sentence: "ranges is invalid because it has ..."
func (p *FieldRanges) CheckValid(isMessageSet bool) error {
var rp fieldRange
for i, r := range p.lazyInit().sorted {
r := fieldRange(r)
switch {
case !isValidFieldNumber(r.Start(), isMessageSet):
return errors.New("invalid field number: %d", r.Start())
case !isValidFieldNumber(r.End(), isMessageSet):
return errors.New("invalid field number: %d", r.End())
case !(r.Start() <= r.End()):
return errors.New("invalid range: %v", r)
case !(rp.End() < r.Start()) && i > 0:
return errors.New("overlapping ranges: %v with %v", rp, r)
}
rp = r
}
return nil
}
// isValidFieldNumber reports whether the field number is valid.
// Unlike the FieldNumber.IsValid method, it allows ranges that cover the
// reserved number range.
func isValidFieldNumber(n protoreflect.FieldNumber, isMessageSet bool) bool {
if isMessageSet {
return protowire.MinValidNumber <= n && n <= math.MaxInt32
}
return protowire.MinValidNumber <= n && n <= protowire.MaxValidNumber
}
// CheckOverlap reports an error if p and q overlap.
func (p *FieldRanges) CheckOverlap(q *FieldRanges) error {
rps := p.lazyInit().sorted
rqs := q.lazyInit().sorted
for pi, qi := 0, 0; pi < len(rps) && qi < len(rqs); {
rp := fieldRange(rps[pi])
rq := fieldRange(rqs[qi])
if !(rp.End() < rq.Start() || rq.End() < rp.Start()) {
return errors.New("overlapping ranges: %v with %v", rp, rq)
}
if rp.Start() < rq.Start() {
pi++
} else {
qi++
}
}
return nil
}
type fieldRange [2]protoreflect.FieldNumber
func (r fieldRange) Start() protoreflect.FieldNumber { return r[0] } // inclusive
func (r fieldRange) End() protoreflect.FieldNumber { return r[1] - 1 } // inclusive
func (r fieldRange) String() string {
if r.Start() == r.End() {
return fmt.Sprintf("%d", r.Start())
}
return fmt.Sprintf("%d to %d", r.Start(), r.End())
}
type FieldNumbers struct {
List []pref.FieldNumber
once sync.Once
has map[pref.FieldNumber]struct{} // protected by once
}
func (p *FieldNumbers) Len() int { return len(p.List) }
func (p *FieldNumbers) Get(i int) pref.FieldNumber { return p.List[i] }
func (p *FieldNumbers) Has(n pref.FieldNumber) bool {
p.once.Do(func() {
if len(p.List) > 0 {
p.has = make(map[pref.FieldNumber]struct{}, len(p.List))
for _, n := range p.List {
p.has[n] = struct{}{}
}
}
})
_, ok := p.has[n]
return ok
}
func (p *FieldNumbers) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
func (p *FieldNumbers) ProtoInternal(pragma.DoNotImplement) {}
type OneofFields struct {
List []pref.FieldDescriptor
once sync.Once
byName map[pref.Name]pref.FieldDescriptor // protected by once
byJSON map[string]pref.FieldDescriptor // protected by once
byNum map[pref.FieldNumber]pref.FieldDescriptor // protected by once
}
func (p *OneofFields) Len() int { return len(p.List) }
func (p *OneofFields) Get(i int) pref.FieldDescriptor { return p.List[i] }
func (p *OneofFields) ByName(s pref.Name) pref.FieldDescriptor { return p.lazyInit().byName[s] }
func (p *OneofFields) ByJSONName(s string) pref.FieldDescriptor { return p.lazyInit().byJSON[s] }
func (p *OneofFields) ByNumber(n pref.FieldNumber) pref.FieldDescriptor { return p.lazyInit().byNum[n] }
func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {}
func (p *OneofFields) lazyInit() *OneofFields {
p.once.Do(func() {
if len(p.List) > 0 {
p.byName = make(map[pref.Name]pref.FieldDescriptor, len(p.List))
p.byJSON = make(map[string]pref.FieldDescriptor, len(p.List))
p.byNum = make(map[pref.FieldNumber]pref.FieldDescriptor, len(p.List))
for _, f := range p.List {
// Field names and numbers are guaranteed to be unique.
p.byName[f.Name()] = f
p.byJSON[f.JSONName()] = f
p.byNum[f.Number()] = f
}
}
})
return p
}
type SourceLocations struct {
List []pref.SourceLocation
}
func (p *SourceLocations) Len() int { return len(p.List) }
func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.List[i] }
func (p *SourceLocations) ProtoInternal(pragma.DoNotImplement) {}

View file

@ -0,0 +1,345 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by generate-types. DO NOT EDIT.
package filedesc
import (
"fmt"
"sync"
"google.golang.org/protobuf/internal/descfmt"
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/reflect/protoreflect"
)
type Enums struct {
List []Enum
once sync.Once
byName map[protoreflect.Name]*Enum // protected by once
}
func (p *Enums) Len() int {
return len(p.List)
}
func (p *Enums) Get(i int) protoreflect.EnumDescriptor {
return &p.List[i]
}
func (p *Enums) ByName(s protoreflect.Name) protoreflect.EnumDescriptor {
if d := p.lazyInit().byName[s]; d != nil {
return d
}
return nil
}
func (p *Enums) Format(s fmt.State, r rune) {
descfmt.FormatList(s, r, p)
}
func (p *Enums) ProtoInternal(pragma.DoNotImplement) {}
func (p *Enums) lazyInit() *Enums {
p.once.Do(func() {
if len(p.List) > 0 {
p.byName = make(map[protoreflect.Name]*Enum, len(p.List))
for i := range p.List {
d := &p.List[i]
if _, ok := p.byName[d.Name()]; !ok {
p.byName[d.Name()] = d
}
}
}
})
return p
}
type EnumValues struct {
List []EnumValue
once sync.Once
byName map[protoreflect.Name]*EnumValue // protected by once
byNum map[protoreflect.EnumNumber]*EnumValue // protected by once
}
func (p *EnumValues) Len() int {
return len(p.List)
}
func (p *EnumValues) Get(i int) protoreflect.EnumValueDescriptor {
return &p.List[i]
}
func (p *EnumValues) ByName(s protoreflect.Name) protoreflect.EnumValueDescriptor {
if d := p.lazyInit().byName[s]; d != nil {
return d
}
return nil
}
func (p *EnumValues) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor {
if d := p.lazyInit().byNum[n]; d != nil {
return d
}
return nil
}
func (p *EnumValues) Format(s fmt.State, r rune) {
descfmt.FormatList(s, r, p)
}
func (p *EnumValues) ProtoInternal(pragma.DoNotImplement) {}
func (p *EnumValues) lazyInit() *EnumValues {
p.once.Do(func() {
if len(p.List) > 0 {
p.byName = make(map[protoreflect.Name]*EnumValue, len(p.List))
p.byNum = make(map[protoreflect.EnumNumber]*EnumValue, len(p.List))
for i := range p.List {
d := &p.List[i]
if _, ok := p.byName[d.Name()]; !ok {
p.byName[d.Name()] = d
}
if _, ok := p.byNum[d.Number()]; !ok {
p.byNum[d.Number()] = d
}
}
}
})
return p
}
type Messages struct {
List []Message
once sync.Once
byName map[protoreflect.Name]*Message // protected by once
}
func (p *Messages) Len() int {
return len(p.List)
}
func (p *Messages) Get(i int) protoreflect.MessageDescriptor {
return &p.List[i]
}
func (p *Messages) ByName(s protoreflect.Name) protoreflect.MessageDescriptor {
if d := p.lazyInit().byName[s]; d != nil {
return d
}
return nil
}
func (p *Messages) Format(s fmt.State, r rune) {
descfmt.FormatList(s, r, p)
}
func (p *Messages) ProtoInternal(pragma.DoNotImplement) {}
func (p *Messages) lazyInit() *Messages {
p.once.Do(func() {
if len(p.List) > 0 {
p.byName = make(map[protoreflect.Name]*Message, len(p.List))
for i := range p.List {
d := &p.List[i]
if _, ok := p.byName[d.Name()]; !ok {
p.byName[d.Name()] = d
}
}
}
})
return p
}
type Fields struct {
List []Field
once sync.Once
byName map[protoreflect.Name]*Field // protected by once
byJSON map[string]*Field // protected by once
byNum map[protoreflect.FieldNumber]*Field // protected by once
}
func (p *Fields) Len() int {
return len(p.List)
}
func (p *Fields) Get(i int) protoreflect.FieldDescriptor {
return &p.List[i]
}
func (p *Fields) ByName(s protoreflect.Name) protoreflect.FieldDescriptor {
if d := p.lazyInit().byName[s]; d != nil {
return d
}
return nil
}
func (p *Fields) ByJSONName(s string) protoreflect.FieldDescriptor {
if d := p.lazyInit().byJSON[s]; d != nil {
return d
}
return nil
}
func (p *Fields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor {
if d := p.lazyInit().byNum[n]; d != nil {
return d
}
return nil
}
func (p *Fields) Format(s fmt.State, r rune) {
descfmt.FormatList(s, r, p)
}
func (p *Fields) ProtoInternal(pragma.DoNotImplement) {}
func (p *Fields) lazyInit() *Fields {
p.once.Do(func() {
if len(p.List) > 0 {
p.byName = make(map[protoreflect.Name]*Field, len(p.List))
p.byJSON = make(map[string]*Field, len(p.List))
p.byNum = make(map[protoreflect.FieldNumber]*Field, len(p.List))
for i := range p.List {
d := &p.List[i]
if _, ok := p.byName[d.Name()]; !ok {
p.byName[d.Name()] = d
}
if _, ok := p.byJSON[d.JSONName()]; !ok {
p.byJSON[d.JSONName()] = d
}
if _, ok := p.byNum[d.Number()]; !ok {
p.byNum[d.Number()] = d
}
}
}
})
return p
}
type Oneofs struct {
List []Oneof
once sync.Once
byName map[protoreflect.Name]*Oneof // protected by once
}
func (p *Oneofs) Len() int {
return len(p.List)
}
func (p *Oneofs) Get(i int) protoreflect.OneofDescriptor {
return &p.List[i]
}
func (p *Oneofs) ByName(s protoreflect.Name) protoreflect.OneofDescriptor {
if d := p.lazyInit().byName[s]; d != nil {
return d
}
return nil
}
func (p *Oneofs) Format(s fmt.State, r rune) {
descfmt.FormatList(s, r, p)
}
func (p *Oneofs) ProtoInternal(pragma.DoNotImplement) {}
func (p *Oneofs) lazyInit() *Oneofs {
p.once.Do(func() {
if len(p.List) > 0 {
p.byName = make(map[protoreflect.Name]*Oneof, len(p.List))
for i := range p.List {
d := &p.List[i]
if _, ok := p.byName[d.Name()]; !ok {
p.byName[d.Name()] = d
}
}
}
})
return p
}
type Extensions struct {
List []Extension
once sync.Once
byName map[protoreflect.Name]*Extension // protected by once
}
func (p *Extensions) Len() int {
return len(p.List)
}
func (p *Extensions) Get(i int) protoreflect.ExtensionDescriptor {
return &p.List[i]
}
func (p *Extensions) ByName(s protoreflect.Name) protoreflect.ExtensionDescriptor {
if d := p.lazyInit().byName[s]; d != nil {
return d
}
return nil
}
func (p *Extensions) Format(s fmt.State, r rune) {
descfmt.FormatList(s, r, p)
}
func (p *Extensions) ProtoInternal(pragma.DoNotImplement) {}
func (p *Extensions) lazyInit() *Extensions {
p.once.Do(func() {
if len(p.List) > 0 {
p.byName = make(map[protoreflect.Name]*Extension, len(p.List))
for i := range p.List {
d := &p.List[i]
if _, ok := p.byName[d.Name()]; !ok {
p.byName[d.Name()] = d
}
}
}
})
return p
}
type Services struct {
List []Service
once sync.Once
byName map[protoreflect.Name]*Service // protected by once
}
func (p *Services) Len() int {
return len(p.List)
}
func (p *Services) Get(i int) protoreflect.ServiceDescriptor {
return &p.List[i]
}
func (p *Services) ByName(s protoreflect.Name) protoreflect.ServiceDescriptor {
if d := p.lazyInit().byName[s]; d != nil {
return d
}
return nil
}
func (p *Services) Format(s fmt.State, r rune) {
descfmt.FormatList(s, r, p)
}
func (p *Services) ProtoInternal(pragma.DoNotImplement) {}
func (p *Services) lazyInit() *Services {
p.once.Do(func() {
if len(p.List) > 0 {
p.byName = make(map[protoreflect.Name]*Service, len(p.List))
for i := range p.List {
d := &p.List[i]
if _, ok := p.byName[d.Name()]; !ok {
p.byName[d.Name()] = d
}
}
}
})
return p
}
type Methods struct {
List []Method
once sync.Once
byName map[protoreflect.Name]*Method // protected by once
}
func (p *Methods) Len() int {
return len(p.List)
}
func (p *Methods) Get(i int) protoreflect.MethodDescriptor {
return &p.List[i]
}
func (p *Methods) ByName(s protoreflect.Name) protoreflect.MethodDescriptor {
if d := p.lazyInit().byName[s]; d != nil {
return d
}
return nil
}
func (p *Methods) Format(s fmt.State, r rune) {
descfmt.FormatList(s, r, p)
}
func (p *Methods) ProtoInternal(pragma.DoNotImplement) {}
func (p *Methods) lazyInit() *Methods {
p.once.Do(func() {
if len(p.List) > 0 {
p.byName = make(map[protoreflect.Name]*Method, len(p.List))
for i := range p.List {
d := &p.List[i]
if _, ok := p.byName[d.Name()]; !ok {
p.byName[d.Name()] = d
}
}
}
})
return p
}

View file

@ -0,0 +1,107 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package filedesc
import (
"google.golang.org/protobuf/internal/descopts"
"google.golang.org/protobuf/internal/pragma"
pref "google.golang.org/protobuf/reflect/protoreflect"
)
var (
emptyNames = new(Names)
emptyEnumRanges = new(EnumRanges)
emptyFieldRanges = new(FieldRanges)
emptyFieldNumbers = new(FieldNumbers)
emptySourceLocations = new(SourceLocations)
emptyFiles = new(FileImports)
emptyMessages = new(Messages)
emptyFields = new(Fields)
emptyOneofs = new(Oneofs)
emptyEnums = new(Enums)
emptyEnumValues = new(EnumValues)
emptyExtensions = new(Extensions)
emptyServices = new(Services)
)
// PlaceholderFile is a placeholder, representing only the file path.
type PlaceholderFile string
func (f PlaceholderFile) ParentFile() pref.FileDescriptor { return f }
func (f PlaceholderFile) Parent() pref.Descriptor { return nil }
func (f PlaceholderFile) Index() int { return 0 }
func (f PlaceholderFile) Syntax() pref.Syntax { return 0 }
func (f PlaceholderFile) Name() pref.Name { return "" }
func (f PlaceholderFile) FullName() pref.FullName { return "" }
func (f PlaceholderFile) IsPlaceholder() bool { return true }
func (f PlaceholderFile) Options() pref.ProtoMessage { return descopts.File }
func (f PlaceholderFile) Path() string { return string(f) }
func (f PlaceholderFile) Package() pref.FullName { return "" }
func (f PlaceholderFile) Imports() pref.FileImports { return emptyFiles }
func (f PlaceholderFile) Messages() pref.MessageDescriptors { return emptyMessages }
func (f PlaceholderFile) Enums() pref.EnumDescriptors { return emptyEnums }
func (f PlaceholderFile) Extensions() pref.ExtensionDescriptors { return emptyExtensions }
func (f PlaceholderFile) Services() pref.ServiceDescriptors { return emptyServices }
func (f PlaceholderFile) SourceLocations() pref.SourceLocations { return emptySourceLocations }
func (f PlaceholderFile) ProtoType(pref.FileDescriptor) { return }
func (f PlaceholderFile) ProtoInternal(pragma.DoNotImplement) { return }
// PlaceholderEnum is a placeholder, representing only the full name.
type PlaceholderEnum pref.FullName
func (e PlaceholderEnum) ParentFile() pref.FileDescriptor { return nil }
func (e PlaceholderEnum) Parent() pref.Descriptor { return nil }
func (e PlaceholderEnum) Index() int { return 0 }
func (e PlaceholderEnum) Syntax() pref.Syntax { return 0 }
func (e PlaceholderEnum) Name() pref.Name { return pref.FullName(e).Name() }
func (e PlaceholderEnum) FullName() pref.FullName { return pref.FullName(e) }
func (e PlaceholderEnum) IsPlaceholder() bool { return true }
func (e PlaceholderEnum) Options() pref.ProtoMessage { return descopts.Enum }
func (e PlaceholderEnum) Values() pref.EnumValueDescriptors { return emptyEnumValues }
func (e PlaceholderEnum) ReservedNames() pref.Names { return emptyNames }
func (e PlaceholderEnum) ReservedRanges() pref.EnumRanges { return emptyEnumRanges }
func (e PlaceholderEnum) ProtoType(pref.EnumDescriptor) { return }
func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return }
// PlaceholderEnumValue is a placeholder, representing only the full name.
type PlaceholderEnumValue pref.FullName
func (e PlaceholderEnumValue) ParentFile() pref.FileDescriptor { return nil }
func (e PlaceholderEnumValue) Parent() pref.Descriptor { return nil }
func (e PlaceholderEnumValue) Index() int { return 0 }
func (e PlaceholderEnumValue) Syntax() pref.Syntax { return 0 }
func (e PlaceholderEnumValue) Name() pref.Name { return pref.FullName(e).Name() }
func (e PlaceholderEnumValue) FullName() pref.FullName { return pref.FullName(e) }
func (e PlaceholderEnumValue) IsPlaceholder() bool { return true }
func (e PlaceholderEnumValue) Options() pref.ProtoMessage { return descopts.EnumValue }
func (e PlaceholderEnumValue) Number() pref.EnumNumber { return 0 }
func (e PlaceholderEnumValue) ProtoType(pref.EnumValueDescriptor) { return }
func (e PlaceholderEnumValue) ProtoInternal(pragma.DoNotImplement) { return }
// PlaceholderMessage is a placeholder, representing only the full name.
type PlaceholderMessage pref.FullName
func (m PlaceholderMessage) ParentFile() pref.FileDescriptor { return nil }
func (m PlaceholderMessage) Parent() pref.Descriptor { return nil }
func (m PlaceholderMessage) Index() int { return 0 }
func (m PlaceholderMessage) Syntax() pref.Syntax { return 0 }
func (m PlaceholderMessage) Name() pref.Name { return pref.FullName(m).Name() }
func (m PlaceholderMessage) FullName() pref.FullName { return pref.FullName(m) }
func (m PlaceholderMessage) IsPlaceholder() bool { return true }
func (m PlaceholderMessage) Options() pref.ProtoMessage { return descopts.Message }
func (m PlaceholderMessage) IsMapEntry() bool { return false }
func (m PlaceholderMessage) Fields() pref.FieldDescriptors { return emptyFields }
func (m PlaceholderMessage) Oneofs() pref.OneofDescriptors { return emptyOneofs }
func (m PlaceholderMessage) ReservedNames() pref.Names { return emptyNames }
func (m PlaceholderMessage) ReservedRanges() pref.FieldRanges { return emptyFieldRanges }
func (m PlaceholderMessage) RequiredNumbers() pref.FieldNumbers { return emptyFieldNumbers }
func (m PlaceholderMessage) ExtensionRanges() pref.FieldRanges { return emptyFieldRanges }
func (m PlaceholderMessage) ExtensionRangeOptions(int) pref.ProtoMessage { panic("index out of range") }
func (m PlaceholderMessage) Messages() pref.MessageDescriptors { return emptyMessages }
func (m PlaceholderMessage) Enums() pref.EnumDescriptors { return emptyEnums }
func (m PlaceholderMessage) Extensions() pref.ExtensionDescriptors { return emptyExtensions }
func (m PlaceholderMessage) ProtoType(pref.MessageDescriptor) { return }
func (m PlaceholderMessage) ProtoInternal(pragma.DoNotImplement) { return }

Some files were not shown because too many files have changed in this diff Show more