diff --git a/go.mod b/go.mod
index ed8750a15..c09e22212 100644
--- a/go.mod
+++ b/go.mod
@@ -1,27 +1,27 @@
 module github.com/VictoriaMetrics/VictoriaMetrics
 
 require (
+	cloud.google.com/go v0.80.0 // indirect
 	cloud.google.com/go/storage v1.14.0
 	github.com/VictoriaMetrics/fastcache v1.5.8
 
 	// Do not use the original github.com/valyala/fasthttp because of issues
 	// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
 	github.com/VictoriaMetrics/fasthttp v1.0.14
-	github.com/VictoriaMetrics/metrics v1.17.1
+	github.com/VictoriaMetrics/metrics v1.17.2
 	github.com/VictoriaMetrics/metricsql v0.14.0
-	github.com/aws/aws-sdk-go v1.37.31
+	github.com/aws/aws-sdk-go v1.38.4
 	github.com/cespare/xxhash/v2 v2.1.1
-	github.com/cheggaaa/pb/v3 v3.0.6
+	github.com/cheggaaa/pb/v3 v3.0.7
 	github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
 	github.com/fatih/color v1.10.0 // indirect
 	github.com/golang/snappy v0.0.3
 	github.com/influxdata/influxdb v1.8.4
 	github.com/klauspost/compress v1.11.12
-	github.com/lithammer/go-jump-consistent-hash v1.0.1
+	github.com/lithammer/go-jump-consistent-hash v1.0.2
 	github.com/mattn/go-runewidth v0.0.10 // indirect
-	github.com/prometheus/client_golang v1.9.0 // indirect
-	github.com/prometheus/common v0.19.0 // indirect
-	github.com/prometheus/procfs v0.6.0 // indirect
+	github.com/prometheus/client_golang v1.10.0 // indirect
+	github.com/prometheus/common v0.20.0 // indirect
 	github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
 	github.com/rivo/uniseg v0.2.0 // indirect
 	github.com/russross/blackfriday/v2 v2.1.0 // indirect
@@ -33,11 +33,11 @@ require (
 	github.com/valyala/histogram v1.1.2
 	github.com/valyala/quicktemplate v1.6.3
 	golang.org/x/mod v0.4.2 // indirect
-	golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4 // indirect
-	golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84
-	golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa
-	google.golang.org/api v0.42.0
-	google.golang.org/genproto v0.0.0-20210315173758-2651cd453018 // indirect
+	golang.org/x/net v0.0.0-20210324205630-d1beb07c2056 // indirect
+	golang.org/x/oauth2 v0.0.0-20210323180902-22b0adad7558
+	golang.org/x/sys v0.0.0-20210324051608-47abb6519492
+	google.golang.org/api v0.43.0
+	google.golang.org/genproto v0.0.0-20210325141258-5636347f2b14 // indirect
 	gopkg.in/yaml.v2 v2.4.0
 )
 
diff --git a/go.sum b/go.sum
index 7da02b112..2533d3e88 100644
--- a/go.sum
+++ b/go.sum
@@ -19,8 +19,9 @@ cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKP
 cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
 cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
 cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
-cloud.google.com/go v0.79.0 h1:oqqswrt4x6b9OGBnNqdssxBl1xf0rSUNjU2BR4BZar0=
 cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
+cloud.google.com/go v0.80.0 h1:kAdyAMrj9CjqOSGiluseVjIgAyQ3uxADYtUYR6MwYeY=
+cloud.google.com/go v0.80.0/go.mod h1:fqpb6QRi1CFGAMXDoE72G+b+Ybv7dMB/T1tbExDHktI=
 cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
 cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
 cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
@@ -86,8 +87,8 @@ github.com/VictoriaMetrics/fastcache v1.5.8/go.mod h1:SiMZNgwEPJ9qWLshu9tyuE6bKc
 github.com/VictoriaMetrics/fasthttp v1.0.14 h1:iWCdHg7JQ1SO0xvPAgw3QFpFT3he+Ugdshg+1clN6CQ=
 github.com/VictoriaMetrics/fasthttp v1.0.14/go.mod h1:eDVgYyGts3xXpYpVGDxQ3ZlQKW5TSvOqfc9FryjH1JA=
 github.com/VictoriaMetrics/metrics v1.12.2/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE=
-github.com/VictoriaMetrics/metrics v1.17.1 h1:hQCM4mUIrFWe0ZY8czGbiGdMRu7W491MAkH9INFruHI=
-github.com/VictoriaMetrics/metrics v1.17.1/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE=
+github.com/VictoriaMetrics/metrics v1.17.2 h1:9zPJ7DPfxdJWshOGLPLpAtPL0ZZ9AeUyQC3fIqG6Lvo=
+github.com/VictoriaMetrics/metrics v1.17.2/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE=
 github.com/VictoriaMetrics/metricsql v0.14.0 h1:XGbpZJVskUPJFo2C7vG6ATxXBwkBFPe7EWZXB2HZt2U=
 github.com/VictoriaMetrics/metricsql v0.14.0/go.mod h1:ylO7YITho/Iw6P71oEaGyHbO94bGoGtzWfLGqFhMIg8=
 github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
@@ -124,8 +125,8 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ
 github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
 github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
 github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
-github.com/aws/aws-sdk-go v1.37.31 h1:eK7hgg1H4xivwopAbnzfQ7ZBbDb9cEkGDivd9rUMnJs=
-github.com/aws/aws-sdk-go v1.37.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
+github.com/aws/aws-sdk-go v1.38.4 h1:ifewiUUfuB6LrOR6PDqjlld3IIoWskrTVEGrzF2Q/v4=
+github.com/aws/aws-sdk-go v1.38.4/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
 github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
 github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@@ -143,8 +144,8 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
 github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
 github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
 github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cheggaaa/pb/v3 v3.0.6 h1:ULPm1wpzvj60FvmCrX7bIaB80UgbhI+zSaQJKRfCbAs=
-github.com/cheggaaa/pb/v3 v3.0.6/go.mod h1:X1L61/+36nz9bjIsrDU52qHKOQukUQe2Ge+YvGuquCw=
+github.com/cheggaaa/pb/v3 v3.0.7 h1:58wduqwoqpsklsCZ7NZ5dsrCqPWv2t0+p85FcBds6XY=
+github.com/cheggaaa/pb/v3 v3.0.7/go.mod h1:X1L61/+36nz9bjIsrDU52qHKOQukUQe2Ge+YvGuquCw=
 github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
 github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
 github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@@ -364,8 +365,10 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
 github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
 github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
 github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
 github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1 h1:jAbXjIeW2ZSW2AwFxlGTDoc2CjI2XujLkV3ArsZFCvc=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
 github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
 github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
 github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
@@ -532,8 +535,8 @@ github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+
 github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
 github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
 github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
-github.com/lithammer/go-jump-consistent-hash v1.0.1 h1:RUuBvVsOT4MGzV80RfO7u3YYivqOLbWWz4WpCBemq3Y=
-github.com/lithammer/go-jump-consistent-hash v1.0.1/go.mod h1:Snz99O1UkmvgsOV76Jm7Zu4sokENziqvUCbPztFABIU=
+github.com/lithammer/go-jump-consistent-hash v1.0.2 h1:w74N9XiMa4dWZdoVnfLbnDhfpGOMCxlrudzt2e7wtyk=
+github.com/lithammer/go-jump-consistent-hash v1.0.2/go.mod h1:4MD1WDikNGnb9D56hAtscaZaOWOiCG+lLbRR5ZN9JL0=
 github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
 github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
 github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
@@ -661,8 +664,8 @@ github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O
 github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4=
 github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
 github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM=
-github.com/prometheus/client_golang v1.9.0 h1:Rrch9mh17XcxvEu9D9DEpb4isxjGBtcevQjKvxPRQIU=
-github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU=
+github.com/prometheus/client_golang v1.10.0 h1:/o0BDeWzLWXNZ+4q5gXltUvaMpJqckTa+jTNoB+z4cg=
+github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU=
 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -678,8 +681,9 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b
 github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
 github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
 github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
-github.com/prometheus/common v0.19.0 h1:Itb4+NjG9wRdkAWgVucbM/adyIXxEhbw0866e0uZE6A=
-github.com/prometheus/common v0.19.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
+github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
+github.com/prometheus/common v0.20.0 h1:pfeDeUdQcIxOMutNjCejsEFp7qeP+/iltHSSmLpE+hU=
+github.com/prometheus/common v0.20.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
 github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
 github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
 github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@@ -919,8 +923,9 @@ golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v
 golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
 golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
 golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4 h1:b0LrWgu8+q7z4J+0Y3Umo5q1dL7NXBkKBWkaVkAq17E=
 golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
+golang.org/x/net v0.0.0-20210324205630-d1beb07c2056 h1:sANdAef76Ioam9aQUUdcAqricwY/WUaMc4+7LY4eGg8=
+golang.org/x/net v0.0.0-20210324205630-d1beb07c2056/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
 golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
 golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -931,8 +936,9 @@ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ
 golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
 golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
 golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84 h1:duBc5zuJsmJXYOVVE/6PxejI+N3AaCqKjtsoLn1Je5Q=
 golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210323180902-22b0adad7558 h1:D7nTwh4J0i+5mW4Zjzn5omvlr6YBcWywE6KOcatyNxY=
+golang.org/x/oauth2 v0.0.0-20210323180902-22b0adad7558/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
 golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -1007,17 +1013,18 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
 golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210314195730-07df6a141424/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa h1:ZYxPR6aca/uhfRJyaOAtflSHjJYiktO7QnJC5ut7iY4=
-golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210324051608-47abb6519492 h1:Paq34FxTluEPvVyayQqMPgHm+vTOrIifmcYxFBx9TLg=
+golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
 golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1135,8 +1142,9 @@ google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ
 google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
 google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
 google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
-google.golang.org/api v0.42.0 h1:uqATLkpxiBrhrvFoebXUjvyzE9nQf+pVyy0Z0IHE+fc=
 google.golang.org/api v0.42.0/go.mod h1:+Oj4s6ch2SEGtPjGqfUfZonBH0GjQH89gTeKKAEGZKI=
+google.golang.org/api v0.43.0 h1:4sAyIHT6ZohtAQDoxws+ez7bROYmUlOVvsUscYCDTqA=
+google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
 google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
 google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
 google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -1190,8 +1198,10 @@ google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6D
 google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
 google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
 google.golang.org/genproto v0.0.0-20210312152112-fc591d9ea70f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210315173758-2651cd453018 h1:ZH67MZWSz0sEGHWoYAO1ysjL0yWEXTQ7gOxytol4KAk=
-google.golang.org/genproto v0.0.0-20210315173758-2651cd453018/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210323160006-e668133fea6a/go.mod h1:f2Bd7+2PlaVKmvKQ52aspJZXIDaRQBVdOOBfJ5i8OEs=
+google.golang.org/genproto v0.0.0-20210325141258-5636347f2b14 h1:0VNRpy5TroA/6mYt3pPEq+E3oomxLJ+FUit3+oIsUy4=
+google.golang.org/genproto v0.0.0-20210325141258-5636347f2b14/go.mod h1:f2Bd7+2PlaVKmvKQ52aspJZXIDaRQBVdOOBfJ5i8OEs=
 google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
 google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
 google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
@@ -1225,8 +1235,10 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
 google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
-google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
 google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
 gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md
index 4b704c46e..d38b3a4ea 100644
--- a/vendor/cloud.google.com/go/CHANGES.md
+++ b/vendor/cloud.google.com/go/CHANGES.md
@@ -1,6 +1,35 @@
 # Changes
 
 
+## [0.80.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.79.0...v0.80.0) (2021-03-23)
+
+
+### ⚠ BREAKING CHANGES
+
+* **all:** This is a breaking change in dialogflow
+
+### Features
+
+* **appengine:** added vm_liveness, search_api_available, network_settings, service_account, build_env_variables, kms_key_reference to v1 API ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493))
+* **assuredworkloads:** Add 'resource_settings' field to provide custom properties (ids) for the provisioned projects. ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524))
+* **assuredworkloads:** add HIPAA and HITRUST compliance regimes ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524))
+* **dialogflow/cx:** added fallback option when restoring an agent docs: clarified experiment length ([cd70aa9](https://www.github.com/googleapis/google-cloud-go/commit/cd70aa9cc1a5dccfe4e49d2d6ca6db2119553c86))
+* **dialogflow/cx:** start generating apiv3 ([#3850](https://www.github.com/googleapis/google-cloud-go/issues/3850)) ([febbdcf](https://www.github.com/googleapis/google-cloud-go/commit/febbdcf13fcea3f5d8186c3d3dface1c0d27ef9e)), refs [#3634](https://www.github.com/googleapis/google-cloud-go/issues/3634)
+* **documentai:** add EVAL_SKIPPED value to the Provenance.OperationType enum in document.proto. ([cb43066](https://www.github.com/googleapis/google-cloud-go/commit/cb4306683926843f6e977f207fa6070bb9242a61))
+* **documentai:** start generating apiv1 ([#3853](https://www.github.com/googleapis/google-cloud-go/issues/3853)) ([d68e604](https://www.github.com/googleapis/google-cloud-go/commit/d68e604c953eea90489f6134e71849b24dd0fcbf))
+* **internal/godocfx:** add prettyprint class to code blocks ([#3819](https://www.github.com/googleapis/google-cloud-go/issues/3819)) ([6e49f21](https://www.github.com/googleapis/google-cloud-go/commit/6e49f2148b116ee439c8a882dcfeefb6e7647c57))
+* **internal/godocfx:** handle Markdown content ([#3816](https://www.github.com/googleapis/google-cloud-go/issues/3816)) ([56d5d0a](https://www.github.com/googleapis/google-cloud-go/commit/56d5d0a900197fb2de46120a0eda649f2c17448f))
+* **kms:** Add maxAttempts to retry policy for KMS gRPC service config feat: Add Bazel exports_files entry for KMS gRPC service config ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493))
+* **resourcesettings:** start generating apiv1 ([#3854](https://www.github.com/googleapis/google-cloud-go/issues/3854)) ([3b288b4](https://www.github.com/googleapis/google-cloud-go/commit/3b288b4fa593c6cb418f696b5b26768967c20b9e))
+* **speech:** Support output transcript to GCS for LongRunningRecognize. ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493))
+* **speech:** Support output transcript to GCS for LongRunningRecognize. ([cd70aa9](https://www.github.com/googleapis/google-cloud-go/commit/cd70aa9cc1a5dccfe4e49d2d6ca6db2119553c86))
+* **speech:** Support output transcript to GCS for LongRunningRecognize. ([35a8706](https://www.github.com/googleapis/google-cloud-go/commit/35a870662df8bf63c4ec10a0233d1d7a708007ee))
+
+
+### Miscellaneous Chores
+
+* **all:** auto-regenerate gapics ([#3837](https://www.github.com/googleapis/google-cloud-go/issues/3837)) ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524))
+
 ## [0.79.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.78.0...v0.79.0) (2021-03-10)
 
 
diff --git a/vendor/cloud.google.com/go/go.mod b/vendor/cloud.google.com/go/go.mod
index 8b114535b..24ebd1a85 100644
--- a/vendor/cloud.google.com/go/go.mod
+++ b/vendor/cloud.google.com/go/go.mod
@@ -5,7 +5,7 @@ go 1.11
 require (
 	cloud.google.com/go/storage v1.10.0
 	github.com/golang/mock v1.5.0
-	github.com/golang/protobuf v1.4.3
+	github.com/golang/protobuf v1.5.1
 	github.com/google/go-cmp v0.5.5
 	github.com/google/martian/v3 v3.1.0
 	github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5
@@ -13,11 +13,11 @@ require (
 	github.com/jstemmer/go-junit-report v0.9.1
 	go.opencensus.io v0.23.0
 	golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5
-	golang.org/x/net v0.0.0-20210226172049-e18ecbb05110
-	golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93
+	golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4
+	golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84
 	golang.org/x/text v0.3.5
 	golang.org/x/tools v0.1.0
-	google.golang.org/api v0.41.0
-	google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e
+	google.golang.org/api v0.42.0
+	google.golang.org/genproto v0.0.0-20210323160006-e668133fea6a
 	google.golang.org/grpc v1.36.0
 )
diff --git a/vendor/cloud.google.com/go/go.sum b/vendor/cloud.google.com/go/go.sum
index c494b49d3..6e00313a1 100644
--- a/vendor/cloud.google.com/go/go.sum
+++ b/vendor/cloud.google.com/go/go.sum
@@ -16,6 +16,7 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb
 cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
 cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
 cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
+cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
 cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
 cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
 cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
@@ -82,8 +83,10 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
 github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
 github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
 github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
 github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1 h1:jAbXjIeW2ZSW2AwFxlGTDoc2CjI2XujLkV3ArsZFCvc=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
 github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
 github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
 github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@@ -221,8 +224,9 @@ golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwY
 golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
 golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
 golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw=
 golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4 h1:b0LrWgu8+q7z4J+0Y3Umo5q1dL7NXBkKBWkaVkAq17E=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
 golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
 golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -232,8 +236,9 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ
 golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
 golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
 golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93 h1:alLDrZkL34Y2bnGHfvC1CYBRBXCXgx8AC2vY4MRtYX4=
 golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84 h1:duBc5zuJsmJXYOVVE/6PxejI+N3AaCqKjtsoLn1Je5Q=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
 golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -278,8 +283,11 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w
 golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b h1:ggRgirZABFolTmi3sn6Ivd9SipZwLedQ5wR0aAKnFxU=
 golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210314195730-07df6a141424/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4 h1:EZ2mChiOa8udjfp6rRmswTbtZN/QzUQp4ptM4rnjHvc=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
 golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -363,8 +371,9 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513
 google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
 google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
 google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
-google.golang.org/api v0.41.0 h1:12aHIhhQCpWtd3Rcp2WwbboB5W72tJHcjzyA9MCoHAw=
 google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
+google.golang.org/api v0.42.0 h1:uqATLkpxiBrhrvFoebXUjvyzE9nQf+pVyy0Z0IHE+fc=
+google.golang.org/api v0.42.0/go.mod h1:+Oj4s6ch2SEGtPjGqfUfZonBH0GjQH89gTeKKAEGZKI=
 google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
 google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -409,8 +418,10 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D
 google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
 google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
 google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e h1:ajd1UAja5y1pRx7xOU6R6faEHLKigztzPRvZ+mpE1Fo=
 google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210312152112-fc591d9ea70f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210323160006-e668133fea6a h1:XVaQ1+BDKvrRcgppHhtAaniHCKyV5xJAvymwsPHHFaE=
+google.golang.org/genproto v0.0.0-20210323160006-e668133fea6a/go.mod h1:f2Bd7+2PlaVKmvKQ52aspJZXIDaRQBVdOOBfJ5i8OEs=
 google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
 google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
 google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -438,8 +449,10 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
 google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
-google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
 google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json
index 09f3ccd65..ffe045e4e 100644
--- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json
+++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json
@@ -29,11 +29,11 @@
     "language": "Go",
     "client_library_type": "generated",
     "docs_url": "https://pkg.go.dev/cloud.google.com/go/apigateway/apiv1",
-    "release_level": "beta"
+    "release_level": "ga"
   },
   "cloud.google.com/go/appengine/apiv1": {
     "distribution_name": "cloud.google.com/go/appengine/apiv1",
-    "description": "App Engine Audit Data",
+    "description": "App Engine Admin API",
     "language": "Go",
     "client_library_type": "generated",
     "docs_url": "https://pkg.go.dev/cloud.google.com/go/appengine/apiv1",
@@ -359,6 +359,14 @@
     "docs_url": "https://pkg.go.dev/cloud.google.com/go/dialogflow/apiv2",
     "release_level": "ga"
   },
+  "cloud.google.com/go/dialogflow/cx/apiv3": {
+    "distribution_name": "cloud.google.com/go/dialogflow/cx/apiv3",
+    "description": "Dialogflow API",
+    "language": "Go",
+    "client_library_type": "generated",
+    "docs_url": "https://pkg.go.dev/cloud.google.com/go/dialogflow/cx/apiv3",
+    "release_level": "beta"
+  },
   "cloud.google.com/go/dialogflow/cx/apiv3beta1": {
     "distribution_name": "cloud.google.com/go/dialogflow/cx/apiv3beta1",
     "description": "Dialogflow API",
@@ -375,6 +383,14 @@
     "docs_url": "https://pkg.go.dev/cloud.google.com/go/dlp/apiv2",
     "release_level": "ga"
   },
+  "cloud.google.com/go/documentai/apiv1": {
+    "distribution_name": "cloud.google.com/go/documentai/apiv1",
+    "description": "Cloud Document AI API",
+    "language": "Go",
+    "client_library_type": "generated",
+    "docs_url": "https://pkg.go.dev/cloud.google.com/go/documentai/apiv1",
+    "release_level": "beta"
+  },
   "cloud.google.com/go/documentai/apiv1beta3": {
     "distribution_name": "cloud.google.com/go/documentai/apiv1beta3",
     "description": "Cloud Document AI API",
@@ -557,7 +573,7 @@
     "language": "Go",
     "client_library_type": "generated",
     "docs_url": "https://pkg.go.dev/cloud.google.com/go/memcache/apiv1",
-    "release_level": "beta"
+    "release_level": "ga"
   },
   "cloud.google.com/go/memcache/apiv1beta2": {
     "distribution_name": "cloud.google.com/go/memcache/apiv1beta2",
@@ -621,7 +637,7 @@
     "language": "Go",
     "client_library_type": "generated",
     "docs_url": "https://pkg.go.dev/cloud.google.com/go/orgpolicy/apiv2",
-    "release_level": "beta"
+    "release_level": "ga"
   },
   "cloud.google.com/go/osconfig/agentendpoint/apiv1": {
     "distribution_name": "cloud.google.com/go/osconfig/agentendpoint/apiv1",
@@ -781,6 +797,14 @@
     "language": "Go",
     "client_library_type": "generated",
     "docs_url": "https://pkg.go.dev/cloud.google.com/go/resourcemanager/apiv2",
+    "release_level": "ga"
+  },
+  "cloud.google.com/go/resourcesettings/apiv1": {
+    "distribution_name": "cloud.google.com/go/resourcesettings/apiv1",
+    "description": "Resource Settings API",
+    "language": "Go",
+    "client_library_type": "generated",
+    "docs_url": "https://pkg.go.dev/cloud.google.com/go/resourcesettings/apiv1",
     "release_level": "beta"
   },
   "cloud.google.com/go/retail/apiv2": {
@@ -877,7 +901,7 @@
     "language": "Go",
     "client_library_type": "generated",
     "docs_url": "https://pkg.go.dev/cloud.google.com/go/servicecontrol/apiv1",
-    "release_level": "beta"
+    "release_level": "ga"
   },
   "cloud.google.com/go/servicedirectory/apiv1": {
     "distribution_name": "cloud.google.com/go/servicedirectory/apiv1",
@@ -901,7 +925,7 @@
     "language": "Go",
     "client_library_type": "generated",
     "docs_url": "https://pkg.go.dev/cloud.google.com/go/servicemanagement/apiv1",
-    "release_level": "beta"
+    "release_level": "ga"
   },
   "cloud.google.com/go/spanner": {
     "distribution_name": "cloud.google.com/go/spanner",
diff --git a/vendor/github.com/VictoriaMetrics/metrics/metrics.go b/vendor/github.com/VictoriaMetrics/metrics/metrics.go
index a6ef87965..c28c03613 100644
--- a/vendor/github.com/VictoriaMetrics/metrics/metrics.go
+++ b/vendor/github.com/VictoriaMetrics/metrics/metrics.go
@@ -72,6 +72,21 @@ func WritePrometheus(w io.Writer, exposeProcessMetrics bool) {
 //     - process_io_storage_read_bytes_total - the number of bytes actually read from disk
 //     - process_io_storage_written_bytes_total - the number of bytes actually written to disk
 //
+//     - go_memstats_alloc_bytes - memory usage for Go objects in the heap
+//     - go_memstats_alloc_bytes_total - the cumulative counter for total size of allocated Go objects
+//     - go_memstats_frees_total - the cumulative counter for number of freed Go objects
+//     - go_memstats_gc_cpu_fraction - the fraction of CPU spent in Go garbage collector
+//     - go_memstats_gc_sys_bytes - the size of Go garbage collector metadata
+//     - go_memstats_heap_alloc_bytes - the same as go_memstats_alloc_bytes
+//     - go_memstats_heap_idle_bytes - idle memory ready for new Go object allocations
+//     - go_memstats_heap_objects - the number of Go objects in the heap
+//     - go_memstats_heap_sys_bytes - memory requested for Go objects from the OS
+//     - go_memstats_mallocs_total - the number of allocations for Go objects
+//     - go_memstats_next_gc_bytes - the target heap size when the next garbage collection should start
+//     - go_memstats_stack_inuse_bytes - memory used for goroutine stacks
+//     - go_memstats_stack_sys_bytes - memory requested fromthe OS for goroutine stacks
+//     - go_memstats_sys_bytes - memory requested by Go runtime from the OS
+//
 // The WriteProcessMetrics func is usually called in combination with writing Set metrics
 // inside "/metrics" handler:
 //
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
index 6f4c32fa0..05cbff629 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
@@ -1004,9 +1004,11 @@ var awsPartition = partition{
 		"batch": service{
 
 			Endpoints: endpoints{
+				"af-south-1":     endpoint{},
 				"ap-east-1":      endpoint{},
 				"ap-northeast-1": endpoint{},
 				"ap-northeast-2": endpoint{},
+				"ap-northeast-3": endpoint{},
 				"ap-south-1":     endpoint{},
 				"ap-southeast-1": endpoint{},
 				"ap-southeast-2": endpoint{},
@@ -5019,12 +5021,42 @@ var awsPartition = partition{
 				"eu-west-1":      endpoint{},
 				"eu-west-2":      endpoint{},
 				"eu-west-3":      endpoint{},
-				"me-south-1":     endpoint{},
-				"sa-east-1":      endpoint{},
-				"us-east-1":      endpoint{},
-				"us-east-2":      endpoint{},
-				"us-west-1":      endpoint{},
-				"us-west-2":      endpoint{},
+				"fips-ca-central-1": endpoint{
+					Hostname: "ram-fips.ca-central-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "ca-central-1",
+					},
+				},
+				"fips-us-east-1": endpoint{
+					Hostname: "ram-fips.us-east-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "us-east-1",
+					},
+				},
+				"fips-us-east-2": endpoint{
+					Hostname: "ram-fips.us-east-2.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "us-east-2",
+					},
+				},
+				"fips-us-west-1": endpoint{
+					Hostname: "ram-fips.us-west-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "us-west-1",
+					},
+				},
+				"fips-us-west-2": endpoint{
+					Hostname: "ram-fips.us-west-2.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "us-west-2",
+					},
+				},
+				"me-south-1": endpoint{},
+				"sa-east-1":  endpoint{},
+				"us-east-1":  endpoint{},
+				"us-east-2":  endpoint{},
+				"us-west-1":  endpoint{},
+				"us-west-2":  endpoint{},
 			},
 		},
 		"rds": service{
@@ -9214,8 +9246,18 @@ var awsusgovPartition = partition{
 		"ram": service{
 
 			Endpoints: endpoints{
-				"us-gov-east-1": endpoint{},
-				"us-gov-west-1": endpoint{},
+				"us-gov-east-1": endpoint{
+					Hostname: "ram.us-gov-east-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "us-gov-east-1",
+					},
+				},
+				"us-gov-west-1": endpoint{
+					Hostname: "ram.us-gov-west-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "us-gov-west-1",
+					},
+				},
 			},
 		},
 		"rds": service{
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
index d71f7b3f4..1737c2686 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
@@ -689,9 +689,12 @@ func (ctx *signingCtx) buildBodyDigest() error {
 	if hash == "" {
 		includeSHA256Header := ctx.unsignedPayload ||
 			ctx.ServiceName == "s3" ||
+			ctx.ServiceName == "s3-object-lambda" ||
 			ctx.ServiceName == "glacier"
 
-		s3Presign := ctx.isPresign && ctx.ServiceName == "s3"
+		s3Presign := ctx.isPresign &&
+			(ctx.ServiceName == "s3" ||
+				ctx.ServiceName == "s3-object-lambda")
 
 		if ctx.unsignedPayload || s3Presign {
 			hash = "UNSIGNED-PAYLOAD"
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go
index cf16ba68e..88f5dc06d 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/version.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go
@@ -5,4 +5,4 @@ package aws
 const SDKName = "aws-sdk-go"
 
 // SDKVersion is the version of this SDK
-const SDKVersion = "1.37.31"
+const SDKVersion = "1.38.4"
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go
index 7a8e46fbd..3079e4ab0 100644
--- a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go
+++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go
@@ -7,6 +7,21 @@ import (
 	"github.com/aws/aws-sdk-go/aws/arn"
 )
 
+var supportedServiceARN = []string{
+	"s3",
+	"s3-outposts",
+	"s3-object-lambda",
+}
+
+func isSupportedServiceARN(service string) bool {
+	for _, name := range supportedServiceARN {
+		if name == service {
+			return true
+		}
+	}
+	return false
+}
+
 // Resource provides the interfaces abstracting ARNs of specific resource
 // types.
 type Resource interface {
@@ -29,9 +44,10 @@ func ParseResource(s string, resParser ResourceParser) (resARN Resource, err err
 		return nil, InvalidARNError{ARN: a, Reason: "partition not set"}
 	}
 
-	if a.Service != "s3" && a.Service != "s3-outposts" {
+	if !isSupportedServiceARN(a.Service) {
 		return nil, InvalidARNError{ARN: a, Reason: "service is not supported"}
 	}
+
 	if len(a.Resource) == 0 {
 		return nil, InvalidARNError{ARN: a, Reason: "resource not set"}
 	}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go
new file mode 100644
index 000000000..513154cc0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go
@@ -0,0 +1,15 @@
+package arn
+
+// S3ObjectLambdaARN represents an ARN for the s3-object-lambda service
+type S3ObjectLambdaARN interface {
+	Resource
+
+	isS3ObjectLambdasARN()
+}
+
+// S3ObjectLambdaAccessPointARN is an S3ObjectLambdaARN for the Access Point resource type
+type S3ObjectLambdaAccessPointARN struct {
+	AccessPointARN
+}
+
+func (s S3ObjectLambdaAccessPointARN) isS3ObjectLambdasARN() {}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
index 07bf5d5f4..6d15bad28 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
@@ -14,6 +14,7 @@ import (
 	"github.com/aws/aws-sdk-go/aws/awsutil"
 	"github.com/aws/aws-sdk-go/aws/client"
 	"github.com/aws/aws-sdk-go/aws/request"
+	"github.com/aws/aws-sdk-go/aws/signer/v4"
 	"github.com/aws/aws-sdk-go/internal/s3shared/arn"
 	"github.com/aws/aws-sdk-go/private/checksum"
 	"github.com/aws/aws-sdk-go/private/protocol"
@@ -404,7 +405,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou
 //
 // If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the
 // object. For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html)
-// in the Amazon Simple Storage Service User Guide.
+// in the Amazon S3 User Guide.
 //
 // Access Control List (ACL)-Specific Request Headers
 //
@@ -989,8 +990,8 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt
 // To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration
 // action. The bucket owner has this permission by default. The bucket owner
 // can grant this permission to others. For more information about permissions,
-// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
 //
 // For information about the Amazon S3 analytics feature, see Amazon S3 Analytics
 // – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html).
@@ -1083,7 +1084,7 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request
 // permission to others.
 //
 // For information about cors, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html)
-// in the Amazon Simple Storage Service User Guide.
+// in the Amazon S3 User Guide.
 //
 // Related Resources:
 //
@@ -1167,14 +1168,14 @@ func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) (
 // This implementation of the DELETE action removes default encryption from
 // the bucket. For information about the Amazon S3 default encryption feature,
 // see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html)
-// in the Amazon Simple Storage Service User Guide.
+// in the Amazon S3 User Guide.
 //
 // To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration
 // action. The bucket owner has this permission by default. The bucket owner
 // can grant this permission to others. For more information about permissions,
-// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
-// in the Amazon Simple Storage Service User Guide.
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
+// in the Amazon S3 User Guide.
 //
 // Related Resources
 //
@@ -1361,8 +1362,8 @@ func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInvent
 // To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration
 // action. The bucket owner has this permission by default. The bucket owner
 // can grant this permission to others. For more information about permissions,
-// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
 //
 // For information about the Amazon S3 inventory feature, see Amazon S3 Inventory
 // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html).
@@ -1550,8 +1551,8 @@ func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsC
 // To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration
 // action. The bucket owner has this permission by default. The bucket owner
 // can grant this permission to others. For more information about permissions,
-// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
 //
 // For information about CloudWatch request metrics for Amazon S3, see Monitoring
 // Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html).
@@ -1827,8 +1828,8 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput)
 // To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration
 // action. The bucket owner has these permissions by default and can grant it
 // to others. For more information about permissions, see Permissions Related
-// to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+// to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
 //
 // It can take a while for the deletion of a replication configuration to fully
 // propagate.
@@ -2095,7 +2096,8 @@ func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request
 //
 // Removes the null version (if there is one) of an object and inserts a delete
 // marker, which becomes the latest version of the object. If there isn't a
-// null version, Amazon S3 does not remove any objects.
+// null version, Amazon S3 does not remove any objects but will still respond
+// that the command was successful.
 //
 // To remove a specific version, you must be the bucket owner and you must use
 // the version Id subresource. Using this subresource permanently deletes the
@@ -2404,8 +2406,8 @@ func (c *S3) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput)
 // Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use
 // this operation, you must have the s3:PutBucketPublicAccessBlock permission.
 // For more information about permissions, see Permissions Related to Bucket
-// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
 //
 // The following operations are related to DeletePublicAccessBlock:
 //
@@ -2497,9 +2499,9 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC
 // To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration
 // action. The bucket owner has this permission by default. The bucket owner
 // can grant this permission to others. For more information about permissions,
-// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
-// in the Amazon Simple Storage Service User Guide.
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
+// in the Amazon S3 User Guide.
 //
 // You set the Transfer Acceleration state of an existing bucket to Enabled
 // or Suspended by using the PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html)
@@ -2511,7 +2513,7 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC
 //
 // For more information about transfer acceleration, see Transfer Acceleration
 // (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html)
-// in the Amazon Simple Storage Service User Guide.
+// in the Amazon S3 User Guide.
 //
 // Related Resources
 //
@@ -2677,13 +2679,13 @@ func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsCon
 // To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration
 // action. The bucket owner has this permission by default. The bucket owner
 // can grant this permission to others. For more information about permissions,
-// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
-// in the Amazon Simple Storage Service User Guide.
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
+// in the Amazon S3 User Guide.
 //
 // For information about Amazon S3 analytics feature, see Amazon S3 Analytics
 // – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html)
-// in the Amazon Simple Storage Service User Guide.
+// in the Amazon S3 User Guide.
 //
 // Related Resources
 //
@@ -2862,8 +2864,8 @@ func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *r
 // To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration
 // action. The bucket owner has this permission by default. The bucket owner
 // can grant this permission to others. For more information about permissions,
-// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
 //
 // The following operations are related to GetBucketEncryption:
 //
@@ -3048,8 +3050,8 @@ func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryCon
 // To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration
 // action. The bucket owner has this permission by default and can grant this
 // permission to others. For more information about permissions, see Permissions
-// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
 //
 // For information about the Amazon S3 inventory feature, see Amazon S3 Inventory
 // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html).
@@ -3151,8 +3153,8 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req
 // To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration
 // action. The bucket owner has this permission by default. The bucket owner
 // can grant this permission to others. For more information about permissions,
-// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
 //
 // GetBucketLifecycle has the following special error:
 //
@@ -3259,8 +3261,8 @@ func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleCon
 // To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration
 // action. The bucket owner has this permission, by default. The bucket owner
 // can grant this permission to others. For more information about permissions,
-// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
 //
 // GetBucketLifecycleConfiguration has the following special error:
 //
@@ -3519,8 +3521,8 @@ func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigu
 // To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration
 // action. The bucket owner has this permission by default. The bucket owner
 // can grant this permission to others. For more information about permissions,
-// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
 //
 // For information about CloudWatch request metrics for Amazon S3, see Monitoring
 // Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html).
@@ -4055,7 +4057,7 @@ func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req
 // can return a wrong result.
 //
 // For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html)
-// in the Amazon Simple Storage Service User Guide.
+// in the Amazon S3 User Guide.
 //
 // This action requires permissions for the s3:GetReplicationConfiguration action.
 // For more information about permissions, see Using Bucket Policies and User
@@ -5317,8 +5319,8 @@ func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, ou
 // To use this operation, you must have permissions to perform the s3:ListBucket
 // action. The bucket owner has this permission by default and can grant this
 // permission to others. For more information about permissions, see Permissions
-// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
 //
 // Returns awserr.Error for service API and SDK errors. Use runtime type assertions
 // with awserr.Error's Code and Message methods to get detailed information about
@@ -5552,8 +5554,8 @@ func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalytics
 // To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration
 // action. The bucket owner has this permission by default. The bucket owner
 // can grant this permission to others. For more information about permissions,
-// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
 //
 // For information about Amazon S3 analytics feature, see Amazon S3 Analytics
 // – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html).
@@ -5751,8 +5753,8 @@ func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventory
 // To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration
 // action. The bucket owner has this permission by default. The bucket owner
 // can grant this permission to others. For more information about permissions,
-// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
 //
 // For information about the Amazon S3 inventory feature, see Amazon S3 Inventory
 // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html)
@@ -5852,8 +5854,8 @@ func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConf
 // To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration
 // action. The bucket owner has this permission by default. The bucket owner
 // can grant this permission to others. For more information about permissions,
-// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
 //
 // For more information about metrics configurations and CloudWatch request
 // metrics, see Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html).
@@ -6187,6 +6189,9 @@ func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *req
 // use request parameters as selection criteria to return metadata about a subset
 // of all the object versions.
 //
+// To use this operation, you must have permissions to perform the s3:ListBucketVersions
+// action. Be aware of the name difference.
+//
 // A 200 OK response can contain valid or invalid XML. Make sure to design your
 // application to parse the contents of the response and handle it appropriately.
 //
@@ -6505,8 +6510,8 @@ func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Reque
 // you must have permissions to perform the s3:ListBucket action. The bucket
 // owner has this permission by default and can grant this permission to others.
 // For more information about permissions, see Permissions Related to Bucket
-// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
 //
 // This section describes the latest revision of this action. We recommend that
 // you use this revised API for application development. For backward compatibility,
@@ -6819,8 +6824,8 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC
 // To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration
 // action. The bucket owner has this permission by default. The bucket owner
 // can grant this permission to others. For more information about permissions,
-// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
 //
 // The Transfer Acceleration state of a bucket can be set to one of the following
 // two values:
@@ -7107,8 +7112,8 @@ func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsCon
 // To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration
 // action. The bucket owner has this permission by default. The bucket owner
 // can grant this permission to others. For more information about permissions,
-// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
 //
 // Special Errors
 //
@@ -7242,7 +7247,7 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque
 //
 // For more information about CORS, go to Enabling Cross-Origin Resource Sharing
 // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon
-// Simple Storage Service User Guide.
+// S3 User Guide.
 //
 // Related Resources
 //
@@ -7337,9 +7342,9 @@ func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *r
 // specify default encryption using SSE-KMS, you can also configure Amazon S3
 // Bucket Key. For information about default encryption, see Amazon S3 default
 // bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html)
-// in the Amazon Simple Storage Service User Guide. For more information about
-// S3 Bucket Keys, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html)
-// in the Amazon Simple Storage Service User Guide.
+// in the Amazon S3 User Guide. For more information about S3 Bucket Keys, see
+// Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html)
+// in the Amazon S3 User Guide.
 //
 // This action requires AWS Signature Version 4. For more information, see Authenticating
 // Requests (AWS Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html).
@@ -7347,9 +7352,9 @@ func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *r
 // To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration
 // action. The bucket owner has this permission by default. The bucket owner
 // can grant this permission to others. For more information about permissions,
-// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
-// in the Amazon Simple Storage Service User Guide.
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
+// in the Amazon S3 User Guide.
 //
 // Related Resources
 //
@@ -7562,7 +7567,7 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon
 // the inventory daily or weekly. You can also configure what object metadata
 // to include and whether to inventory all object versions or only current versions.
 // For more information, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html)
-// in the Amazon Simple Storage Service User Guide.
+// in the Amazon S3 User Guide.
 //
 // You must create a bucket policy on the destination bucket to grant permissions
 // to Amazon S3 to write objects to the bucket in the defined location. For
@@ -7572,9 +7577,9 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon
 // To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration
 // action. The bucket owner has this permission by default and can grant this
 // permission to others. For more information about permissions, see Permissions
-// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
-// in the Amazon Simple Storage Service User Guide.
+// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
+// in the Amazon S3 User Guide.
 //
 // Special Errors
 //
@@ -7686,7 +7691,7 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req
 // Creates a new lifecycle configuration for the bucket or replaces an existing
 // lifecycle configuration. For information about lifecycle configuration, see
 // Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)
-// in the Amazon Simple Storage Service User Guide.
+// in the Amazon S3 User Guide.
 //
 // By default, all Amazon S3 resources, including buckets, objects, and related
 // subresources (for example, lifecycle configuration and website configuration)
@@ -7707,8 +7712,8 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req
 //    * s3:PutLifecycleConfiguration
 //
 // For more information about permissions, see Managing Access Permissions to
-// your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
-// in the Amazon Simple Storage Service User Guide.
+// your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
+// in the Amazon S3 User Guide.
 //
 // For more examples of transitioning objects to storage classes such as STANDARD_IA
 // or ONEZONE_IA, see Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#lifecycle-configuration-examples).
@@ -7724,9 +7729,9 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req
 //    * By default, a resource owner—in this case, a bucket owner, which is
 //    the AWS account that created the bucket—can perform any of the operations.
 //    A resource owner can also grant others permission to perform the operation.
-//    For more information, see the following topics in the Amazon Simple Storage
-//    Service User Guide: Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html)
-//    Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
+//    For more information, see the following topics in the Amazon S3 User Guide:
+//    Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html)
+//    Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
 //
 // Returns awserr.Error for service API and SDK errors. Use runtime type assertions
 // with awserr.Error's Code and Message methods to get detailed information about
@@ -7811,7 +7816,7 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon
 //
 // Creates a new lifecycle configuration for the bucket or replaces an existing
 // lifecycle configuration. For information about lifecycle configuration, see
-// Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+// Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
 //
 // Bucket lifecycle configuration now supports specifying a lifecycle rule using
 // an object key name prefix, one or more object tags, or a combination of both.
@@ -7863,7 +7868,7 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon
 //    * s3:PutLifecycleConfiguration
 //
 // For more information about permissions, see Managing Access Permissions to
-// Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+// Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
 //
 // The following are related to PutBucketLifecycleConfiguration:
 //
@@ -8080,8 +8085,8 @@ func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigu
 // To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration
 // action. The bucket owner has this permission by default. The bucket owner
 // can grant this permission to others. For more information about permissions,
-// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
 //
 // For information about CloudWatch request metrics for Amazon S3, see Monitoring
 // Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html).
@@ -8615,7 +8620,7 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req
 // bucket, can perform this operation. The resource owner can also grant others
 // permissions to perform the operation. For more information about permissions,
 // see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
 //
 // Handling Replication of Encrypted Objects
 //
@@ -8818,8 +8823,8 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request
 // To use this operation, you must have permissions to perform the s3:PutBucketTagging
 // action. The bucket owner has this permission by default and can grant this
 // permission to others. For more information about permissions, see Permissions
-// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html).
+// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
 //
 // PutBucketTagging has the following special errors:
 //
@@ -9099,7 +9104,7 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request
 // Amazon S3 has a limitation of 50 routing rules per website configuration.
 // If you require more than 50 routing rules, you can use object redirect. For
 // more information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html)
-// in the Amazon Simple Storage Service User Guide.
+// in the Amazon S3 User Guide.
 //
 // Returns awserr.Error for service API and SDK errors. Use runtime type assertions
 // with awserr.Error's Code and Message methods to get detailed information about
@@ -9193,7 +9198,7 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp
 // The Content-MD5 header is required for any request to upload an object with
 // a retention period configured using Amazon S3 Object Lock. For more information
 // about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)
-// in the Amazon Simple Storage Service Developer Guide.
+// in the Amazon S3 User Guide.
 //
 // Server-side Encryption
 //
@@ -9206,7 +9211,7 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp
 // If you request server-side encryption using AWS Key Management Service (SSE-KMS),
 // you can enable an S3 Bucket Key at the object-level. For more information,
 // see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html)
-// in the Amazon Simple Storage Service User Guide.
+// in the Amazon S3 User Guide.
 //
 // Access Control List (ACL)-Specific Request Headers
 //
@@ -9325,7 +9330,7 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request
 // for a new or existing object in an S3 bucket. You must have WRITE_ACP permission
 // to set the ACL of an object. For more information, see What permissions can
 // I grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions)
-// in the Amazon Simple Storage Service User Guide.
+// in the Amazon S3 User Guide.
 //
 // This action is not supported by Amazon S3 on Outposts.
 //
@@ -9489,14 +9494,11 @@ func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *req
 
 // PutObjectLegalHold API operation for Amazon Simple Storage Service.
 //
-// Applies a Legal Hold configuration to the specified object.
+// Applies a Legal Hold configuration to the specified object. For more information,
+// see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html).
 //
 // This action is not supported by Amazon S3 on Outposts.
 //
-// Related Resources
-//
-//    * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html)
-//
 // Returns awserr.Error for service API and SDK errors. Use runtime type assertions
 // with awserr.Error's Code and Message methods to get detailed information about
 // the error.
@@ -9575,14 +9577,16 @@ func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfiguration
 //
 // Places an Object Lock configuration on the specified bucket. The rule specified
 // in the Object Lock configuration will be applied by default to every new
-// object placed in the specified bucket.
+// object placed in the specified bucket. For more information, see Locking
+// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html).
 //
-// DefaultRetention requires either Days or Years. You can't specify both at
-// the same time.
+//    * The DefaultRetention settings require both a mode and a period.
 //
-// Related Resources
+//    * The DefaultRetention period can be either Days or Years but you must
+//    select one. You cannot specify Days and Years at the same time.
 //
-//    * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html)
+//    * You can only enable Object Lock for new buckets. If you want to turn
+//    on Object Lock for an existing bucket, contact AWS Support.
 //
 // Returns awserr.Error for service API and SDK errors. Use runtime type assertions
 // with awserr.Error's Code and Message methods to get detailed information about
@@ -9660,14 +9664,11 @@ func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *req
 
 // PutObjectRetention API operation for Amazon Simple Storage Service.
 //
-// Places an Object Retention configuration on an object.
+// Places an Object Retention configuration on an object. For more information,
+// see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html).
 //
 // This action is not supported by Amazon S3 on Outposts.
 //
-// Related Resources
-//
-//    * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html)
-//
 // Returns awserr.Error for service API and SDK errors. Use runtime type assertions
 // with awserr.Error's Code and Message methods to get detailed information about
 // the error.
@@ -9972,9 +9973,9 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque
 // To use this operation, you must have permissions to perform the s3:RestoreObject
 // action. The bucket owner has this permission by default and can grant this
 // permission to others. For more information about permissions, see Permissions
-// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
-// in the Amazon Simple Storage Service User Guide.
+// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
+// in the Amazon S3 User Guide.
 //
 // Querying Archives with Select Requests
 //
@@ -9984,7 +9985,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque
 // queries and custom analytics on your archived data without having to restore
 // your data to a hotter Amazon S3 tier. For an overview about select requests,
 // see Querying Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html)
-// in the Amazon Simple Storage Service User Guide.
+// in the Amazon S3 User Guide.
 //
 // When making a select request, do the following:
 //
@@ -9995,13 +9996,12 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque
 //    the storage class and encryption for the output objects stored in the
 //    bucket. For more information about output, see Querying Archived Objects
 //    (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html)
-//    in the Amazon Simple Storage Service User Guide. For more information
-//    about the S3 structure in the request body, see the following: PutObject
-//    (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) Managing
-//    Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html)
-//    in the Amazon Simple Storage Service User Guide Protecting Data Using
-//    Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html)
-//    in the Amazon Simple Storage Service User Guide
+//    in the Amazon S3 User Guide. For more information about the S3 structure
+//    in the request body, see the following: PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
+//    Managing Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html)
+//    in the Amazon S3 User Guide Protecting Data Using Server-Side Encryption
+//    (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html)
+//    in the Amazon S3 User Guide
 //
 //    * Define the SQL expression for the SELECT type of restoration for your
 //    query in the request body's SelectParameters structure. You can use expressions
@@ -10017,7 +10017,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque
 //
 // For more information about using SQL with S3 Glacier Select restore, see
 // SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html)
-// in the Amazon Simple Storage Service User Guide.
+// in the Amazon S3 User Guide.
 //
 // When making a select request, you can also do the following:
 //
@@ -10088,19 +10088,19 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque
 //
 // For more information about archive retrieval options and provisioned capacity
 // for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html)
-// in the Amazon Simple Storage Service User Guide.
+// in the Amazon S3 User Guide.
 //
 // You can use Amazon S3 restore speed upgrade to change the restore speed to
 // a faster speed while it is in progress. For more information, see Upgrading
 // the speed of an in-progress restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html)
-// in the Amazon Simple Storage Service User Guide.
+// in the Amazon S3 User Guide.
 //
 // To get the status of object restoration, you can send a HEAD request. Operations
 // return the x-amz-restore header, which provides information about the restoration
 // status, in the response. You can use Amazon S3 event notifications to notify
 // you when a restore is initiated or completed. For more information, see Configuring
 // Amazon S3 Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
-// in the Amazon Simple Storage Service User Guide.
+// in the Amazon S3 User Guide.
 //
 // After restoring an archived object, you can update the restoration period
 // by reissuing the request with a new period. Amazon S3 updates the restoration
@@ -10115,7 +10115,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque
 // the object in 3 days. For more information about lifecycle configuration,
 // see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)
 // and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)
-// in Amazon Simple Storage Service User Guide.
+// in Amazon S3 User Guide.
 //
 // Responses
 //
@@ -10146,7 +10146,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque
 //    * GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html)
 //
 //    * SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html)
-//    in the Amazon Simple Storage Service User Guide
+//    in the Amazon S3 User Guide
 //
 // Returns awserr.Error for service API and SDK errors. Use runtime type assertions
 // with awserr.Error's Code and Message methods to get detailed information about
@@ -10246,18 +10246,18 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r
 //
 // For more information about Amazon S3 Select, see Selecting Content from Objects
 // (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html)
-// in the Amazon Simple Storage Service User Guide.
+// in the Amazon S3 User Guide.
 //
 // For more information about using SQL with Amazon S3 Select, see SQL Reference
 // for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html)
-// in the Amazon Simple Storage Service User Guide.
+// in the Amazon S3 User Guide.
 //
 // Permissions
 //
 // You must have s3:GetObject permission for this operation. Amazon S3 Select
 // does not support anonymous access. For more information about permissions,
 // see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html)
-// in the Amazon Simple Storage Service User Guide.
+// in the Amazon S3 User Guide.
 //
 // Object Data Formats
 //
@@ -10280,13 +10280,13 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r
 //    you must use the headers that are documented in the GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html).
 //    For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided
 //    Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
-//    in the Amazon Simple Storage Service User Guide. For objects that are
-//    encrypted with Amazon S3 managed encryption keys (SSE-S3) and customer
-//    master keys (CMKs) stored in AWS Key Management Service (SSE-KMS), server-side
-//    encryption is handled transparently, so you don't need to specify anything.
-//    For more information about server-side encryption, including SSE-S3 and
-//    SSE-KMS, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html)
-//    in the Amazon Simple Storage Service User Guide.
+//    in the Amazon S3 User Guide. For objects that are encrypted with Amazon
+//    S3 managed encryption keys (SSE-S3) and customer master keys (CMKs) stored
+//    in AWS Key Management Service (SSE-KMS), server-side encryption is handled
+//    transparently, so you don't need to specify anything. For more information
+//    about server-side encryption, including SSE-S3 and SSE-KMS, see Protecting
+//    Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html)
+//    in the Amazon S3 User Guide.
 //
 // Working with the Response Body
 //
@@ -10308,7 +10308,7 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r
 //    * GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot
 //    specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes.
 //    For more information, about storage classes see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro)
-//    in the Amazon Simple Storage Service User Guide.
+//    in the Amazon S3 User Guide.
 //
 // Special Errors
 //
@@ -10601,11 +10601,11 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou
 //
 // For more information on multipart uploads, go to Multipart Upload Overview
 // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the
-// Amazon Simple Storage Service User Guide .
+// Amazon S3 User Guide .
 //
 // For information on the permissions required to use the multipart upload API,
 // go to Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
-// in the Amazon Simple Storage Service User Guide.
+// in the Amazon S3 User Guide.
 //
 // You can optionally request server-side encryption where Amazon S3 encrypts
 // your data as it writes it to disks in its data centers and decrypts it for
@@ -10615,7 +10615,7 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou
 // match the headers you used in the request to initiate the upload by using
 // CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html).
 // For more information, go to Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html)
-// in the Amazon Simple Storage Service User Guide.
+// in the Amazon S3 User Guide.
 //
 // Server-side encryption is supported by the S3 Multipart Upload actions. Unless
 // you are using a customer-provided encryption key, you don't need to specify
@@ -10731,7 +10731,7 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req
 //
 // The minimum allowable part size for a multipart upload is 5 MB. For more
 // information about multipart upload limits, go to Quick Facts (https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html)
-// in the Amazon Simple Storage Service User Guide.
+// in the Amazon S3 User Guide.
 //
 // Instead of using an existing object as part data, you might use the UploadPart
 // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) action
@@ -10745,15 +10745,15 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req
 //
 //    * For conceptual information about multipart uploads, see Uploading Objects
 //    Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html)
-//    in the Amazon Simple Storage Service User Guide.
+//    in the Amazon S3 User Guide.
 //
 //    * For information about permissions required to use the multipart upload
 //    API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
-//    in the Amazon Simple Storage Service User Guide.
+//    in the Amazon S3 User Guide.
 //
 //    * For information about copying objects using a single atomic action vs.
 //    the multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html)
-//    in the Amazon Simple Storage Service User Guide.
+//    in the Amazon S3 User Guide.
 //
 //    * For information about using server-side encryption with customer-provided
 //    encryption keys with the UploadPartCopy operation, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html)
@@ -10842,11 +10842,128 @@ func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInp
 	return out, req.Send()
 }
 
+const opWriteGetObjectResponse = "WriteGetObjectResponse"
+
+// WriteGetObjectResponseRequest generates a "aws/request.Request" representing the
+// client's request for the WriteGetObjectResponse operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See WriteGetObjectResponse for more information on using the WriteGetObjectResponse
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+//    // Example sending a request using the WriteGetObjectResponseRequest method.
+//    req, resp := client.WriteGetObjectResponseRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WriteGetObjectResponse
+func (c *S3) WriteGetObjectResponseRequest(input *WriteGetObjectResponseInput) (req *request.Request, output *WriteGetObjectResponseOutput) {
+	op := &request.Operation{
+		Name:       opWriteGetObjectResponse,
+		HTTPMethod: "POST",
+		HTTPPath:   "/WriteGetObjectResponse",
+	}
+
+	if input == nil {
+		input = &WriteGetObjectResponseInput{}
+	}
+
+	output = &WriteGetObjectResponseOutput{}
+	req = c.newRequest(op, input, output)
+	req.Handlers.Sign.Remove(v4.SignRequestHandler)
+	handler := v4.BuildNamedHandler("v4.CustomSignerHandler", v4.WithUnsignedPayload)
+	req.Handlers.Sign.PushFrontNamed(handler)
+	req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+	req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{RequestRoute}.", input.hostLabels))
+	req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler)
+	return
+}
+
+// WriteGetObjectResponse API operation for Amazon Simple Storage Service.
+//
+// Passes transformed objects to a GetObject operation when using Object Lambda
+// Access Points. For information about Object Lambda Access Points, see Transforming
+// objects with Object Lambda Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html)
+// in the Amazon S3 User Guide.
+//
+// This operation supports metadata that can be returned by GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html),
+// in addition to RequestRoute, RequestToken, StatusCode, ErrorCode, and ErrorMessage.
+// The GetObject response metadata is supported so that the WriteGetObjectResponse
+// caller, typically an AWS Lambda function, can provide the same metadata when
+// it internally invokes GetObject. When WriteGetObjectResponse is called by
+// a customer-owned Lambda function, the metadata returned to the end user GetObject
+// call might differ from what Amazon S3 would normally return.
+//
+// AWS provides some prebuilt Lambda functions that you can use with S3 Object
+// Lambda to detect and redact personally identifiable information (PII) and
+// decompress S3 objects. These Lambda functions are available in the AWS Serverless
+// Application Repository, and can be selected through the AWS Management Console
+// when you create your Object Lambda Access Point.
+//
+// Example 1: PII Access Control - This Lambda function uses Amazon Comprehend,
+// a natural language processing (NLP) service using machine learning to find
+// insights and relationships in text. It automatically detects personally identifiable
+// information (PII) such as names, addresses, dates, credit card numbers, and
+// social security numbers from documents in your Amazon S3 bucket.
+//
+// Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, a
+// natural language processing (NLP) service using machine learning to find
+// insights and relationships in text. It automatically redacts personally identifiable
+// information (PII) such as names, addresses, dates, credit card numbers, and
+// social security numbers from documents in your Amazon S3 bucket.
+//
+// Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression,
+// is equipped to decompress objects stored in S3 in one of six compressed file
+// formats including bzip2, gzip, snappy, zlib, zstandard and ZIP.
+//
+// For information on how to view and use these functions, see Using AWS built
+// Lambda functions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html)
+// in the Amazon S3 User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation WriteGetObjectResponse for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WriteGetObjectResponse
+func (c *S3) WriteGetObjectResponse(input *WriteGetObjectResponseInput) (*WriteGetObjectResponseOutput, error) {
+	req, out := c.WriteGetObjectResponseRequest(input)
+	return out, req.Send()
+}
+
+// WriteGetObjectResponseWithContext is the same as WriteGetObjectResponse with the addition of
+// the ability to pass a context and additional request options.
+//
+// See WriteGetObjectResponse for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) WriteGetObjectResponseWithContext(ctx aws.Context, input *WriteGetObjectResponseInput, opts ...request.Option) (*WriteGetObjectResponseOutput, error) {
+	req, out := c.WriteGetObjectResponseRequest(input)
+	req.SetContext(ctx)
+	req.ApplyOptions(opts...)
+	return out, req.Send()
+}
+
 // Specifies the days since the initiation of an incomplete multipart upload
 // that Amazon S3 will wait before permanently removing all parts of the upload.
 // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket
 // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)
-// in the Amazon Simple Storage Service Developer Guide.
+// in the Amazon S3 User Guide.
 type AbortIncompleteMultipartUpload struct {
 	_ struct{} `type:"structure"`
 
@@ -10880,21 +10997,21 @@ type AbortMultipartUploadInput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// When using this action with Amazon S3 on Outposts, you must direct requests
 	// to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
 	// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
 	// using this action using S3 on Outposts through the AWS SDKs, you provide
 	// the Outposts bucket ARN in place of the bucket name. For more information
-	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+	// in the Amazon S3 User Guide.
 	//
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -11042,7 +11159,7 @@ func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipart
 
 // Configures the transfer acceleration state for an Amazon S3 bucket. For more
 // information, see Amazon S3 Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html)
-// in the Amazon Simple Storage Service Developer Guide.
+// in the Amazon S3 User Guide.
 type AccelerateConfiguration struct {
 	_ struct{} `type:"structure"`
 
@@ -11520,7 +11637,7 @@ func (s *Bucket) SetName(v string) *Bucket {
 
 // Specifies the lifecycle configuration for objects in an Amazon S3 bucket.
 // For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)
-// in the Amazon Simple Storage Service Developer Guide.
+// in the Amazon S3 User Guide.
 type BucketLifecycleConfiguration struct {
 	_ struct{} `type:"structure"`
 
@@ -11614,7 +11731,7 @@ func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggin
 // Describes the cross-origin access configuration for objects in an Amazon
 // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing
 // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon
-// Simple Storage Service Developer Guide.
+// S3 User Guide.
 type CORSConfiguration struct {
 	_ struct{} `type:"structure"`
 
@@ -12034,7 +12151,7 @@ type CompleteMultipartUploadInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -12174,16 +12291,16 @@ type CompleteMultipartUploadOutput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// When using this action with Amazon S3 on Outposts, you must direct requests
 	// to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
 	// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
 	// using this action using S3 on Outposts through the AWS SDKs, you provide
 	// the Outposts bucket ARN in place of the bucket name. For more information
-	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+	// in the Amazon S3 User Guide.
 	Bucket *string `type:"string"`
 
 	// Indicates whether the multipart upload uses an S3 Bucket Key for server-side
@@ -12460,16 +12577,16 @@ type CopyObjectInput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// When using this action with Amazon S3 on Outposts, you must direct requests
 	// to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
 	// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
 	// using this action using S3 on Outposts through the AWS SDKs, you provide
 	// the Outposts bucket ARN in place of the bucket name. For more information
-	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+	// in the Amazon S3 User Guide.
 	//
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -12502,7 +12619,7 @@ type CopyObjectInput struct {
 
 	// Specifies the source object for the copy operation. You specify the value
 	// in one of two formats, depending on whether you want to access the source
-	// object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-points.html):
+	// object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html):
 	//
 	//    * For objects not accessed through an access point, specify the name of
 	//    the source bucket and the key of the source object, separated by a slash
@@ -12560,12 +12677,12 @@ type CopyObjectInput struct {
 	// encryption key was transmitted without error.
 	CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"`
 
-	// The account id of the expected destination bucket owner. If the destination
+	// The account ID of the expected destination bucket owner. If the destination
 	// bucket is owned by a different account, the request will fail with an HTTP
 	// 403 (Access Denied) error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
 
-	// The account id of the expected source bucket owner. If the source bucket
+	// The account ID of the expected source bucket owner. If the source bucket
 	// is owned by a different account, the request will fail with an HTTP 403 (Access
 	// Denied) error.
 	ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"`
@@ -13377,16 +13494,16 @@ type CreateMultipartUploadInput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// When using this action with Amazon S3 on Outposts, you must direct requests
 	// to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
 	// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
 	// using this action using S3 on Outposts through the AWS SDKs, you provide
 	// the Outposts bucket ARN in place of the bucket name. For more information
-	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+	// in the Amazon S3 User Guide.
 	//
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -13417,7 +13534,7 @@ type CreateMultipartUploadInput struct {
 	// A standard MIME type describing the format of the object data.
 	ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -13791,16 +13908,16 @@ type CreateMultipartUploadOutput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// When using this action with Amazon S3 on Outposts, you must direct requests
 	// to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
 	// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
 	// using this action using S3 on Outposts through the AWS SDKs, you provide
 	// the Outposts bucket ARN in place of the bucket name. For more information
-	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+	// in the Amazon S3 User Guide.
 	Bucket *string `locationName:"Bucket" type:"string"`
 
 	// Indicates whether the multipart upload uses an S3 Bucket Key for server-side
@@ -13933,17 +14050,24 @@ func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUplo
 
 // The container element for specifying the default Object Lock retention settings
 // for new objects placed in the specified bucket.
+//
+//    * The DefaultRetention settings require both a mode and a period.
+//
+//    * The DefaultRetention period can be either Days or Years but you must
+//    select one. You cannot specify Days and Years at the same time.
 type DefaultRetention struct {
 	_ struct{} `type:"structure"`
 
 	// The number of days that you want to specify for the default retention period.
+	// Must be used with Mode.
 	Days *int64 `type:"integer"`
 
 	// The default Object Lock retention mode you want to apply to new objects placed
-	// in the specified bucket.
+	// in the specified bucket. Must be used with either Days or Years.
 	Mode *string `type:"string" enum:"ObjectLockRetentionMode"`
 
 	// The number of years that you want to specify for the default retention period.
+	// Must be used with Mode.
 	Years *int64 `type:"integer"`
 }
 
@@ -14042,7 +14166,7 @@ type DeleteBucketAnalyticsConfigurationInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -14156,7 +14280,7 @@ type DeleteBucketCorsInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -14257,7 +14381,7 @@ type DeleteBucketEncryptionInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -14357,7 +14481,7 @@ type DeleteBucketInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -14547,7 +14671,7 @@ type DeleteBucketInventoryConfigurationInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -14661,7 +14785,7 @@ type DeleteBucketLifecycleInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -14761,7 +14885,7 @@ type DeleteBucketMetricsConfigurationInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -14889,7 +15013,7 @@ type DeleteBucketOwnershipControlsInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -14989,7 +15113,7 @@ type DeleteBucketPolicyInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -15089,7 +15213,7 @@ type DeleteBucketReplicationInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -15189,7 +15313,7 @@ type DeleteBucketTaggingInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -15289,7 +15413,7 @@ type DeleteBucketWebsiteInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -15489,16 +15613,16 @@ type DeleteObjectInput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// When using this action with Amazon S3 on Outposts, you must direct requests
 	// to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
 	// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
 	// using this action using S3 on Outposts through the AWS SDKs, you provide
 	// the Outposts bucket ARN in place of the bucket name. For more information
-	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+	// in the Amazon S3 User Guide.
 	//
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -15507,7 +15631,7 @@ type DeleteObjectInput struct {
 	// to process this operation.
 	BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -15695,21 +15819,21 @@ type DeleteObjectTaggingInput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// When using this action with Amazon S3 on Outposts, you must direct requests
 	// to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
 	// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
 	// using this action using S3 on Outposts through the AWS SDKs, you provide
 	// the Outposts bucket ARN in place of the bucket name. For more information
-	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+	// in the Amazon S3 User Guide.
 	//
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -15846,16 +15970,16 @@ type DeleteObjectsInput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// When using this action with Amazon S3 on Outposts, you must direct requests
 	// to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
 	// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
 	// using this action using S3 on Outposts through the AWS SDKs, you provide
 	// the Outposts bucket ARN in place of the bucket name. For more information
-	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+	// in the Amazon S3 User Guide.
 	//
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -15870,7 +15994,7 @@ type DeleteObjectsInput struct {
 	// Delete is a required field
 	Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -16045,7 +16169,7 @@ type DeletePublicAccessBlockInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -16957,7 +17081,7 @@ type FilterRule struct {
 	// the filtering rule applies. The maximum length is 1,024 characters. Overlapping
 	// prefixes and suffixes are not supported. For more information, see Configuring
 	// Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// in the Amazon S3 User Guide.
 	Name *string `type:"string" enum:"FilterRuleName"`
 
 	// The value that the filter searches for in object key names.
@@ -16994,7 +17118,7 @@ type GetBucketAccelerateConfigurationInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -17103,7 +17227,7 @@ type GetBucketAclInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -17221,7 +17345,7 @@ type GetBucketAnalyticsConfigurationInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -17344,7 +17468,7 @@ type GetBucketCorsInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -17455,7 +17579,7 @@ type GetBucketEncryptionInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -17677,7 +17801,7 @@ type GetBucketInventoryConfigurationInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -17800,7 +17924,7 @@ type GetBucketLifecycleConfigurationInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -17909,7 +18033,7 @@ type GetBucketLifecycleInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -18018,7 +18142,7 @@ type GetBucketLocationInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -18129,7 +18253,7 @@ type GetBucketLoggingInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -18241,7 +18365,7 @@ type GetBucketMetricsConfigurationInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -18364,7 +18488,7 @@ type GetBucketNotificationConfigurationRequest struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -18450,7 +18574,7 @@ type GetBucketOwnershipControlsInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -18560,7 +18684,7 @@ type GetBucketPolicyInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -18669,7 +18793,7 @@ type GetBucketPolicyStatusInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -18778,7 +18902,7 @@ type GetBucketReplicationInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -18888,7 +19012,7 @@ type GetBucketRequestPaymentInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -18997,7 +19121,7 @@ type GetBucketTaggingInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -19108,7 +19232,7 @@ type GetBucketVersioningInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -19228,7 +19352,7 @@ type GetBucketWebsiteInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -19366,13 +19490,13 @@ type GetObjectAclInput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -19540,21 +19664,21 @@ type GetObjectInput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// When using this action with Amazon S3 on Outposts, you must direct requests
 	// to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
 	// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
 	// using this action using S3 on Outposts through the AWS SDKs, you provide
 	// the Outposts bucket ARN in place of the bucket name. For more information
-	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+	// in the Amazon S3 User Guide.
 	//
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -19840,13 +19964,13 @@ type GetObjectLegalHoldInput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -19995,13 +20119,13 @@ type GetObjectLockConfigurationInput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -20443,13 +20567,13 @@ type GetObjectRetentionInput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -20598,21 +20722,21 @@ type GetObjectTaggingInput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// When using this action with Amazon S3 on Outposts, you must direct requests
 	// to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
 	// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
 	// using this action using S3 on Outposts through the AWS SDKs, you provide
 	// the Outposts bucket ARN in place of the bucket name. For more information
-	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+	// in the Amazon S3 User Guide.
 	//
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -20772,7 +20896,7 @@ type GetObjectTorrentInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -20922,7 +21046,7 @@ type GetPublicAccessBlockInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -21218,21 +21342,21 @@ type HeadBucketInput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// When using this action with Amazon S3 on Outposts, you must direct requests
 	// to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
 	// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
 	// using this action using S3 on Outposts through the AWS SDKs, you provide
 	// the Outposts bucket ARN in place of the bucket name. For more information
-	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+	// in the Amazon S3 User Guide.
 	//
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -21333,21 +21457,21 @@ type HeadObjectInput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// When using this action with Amazon S3 on Outposts, you must direct requests
 	// to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
 	// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
 	// using this action using S3 on Outposts through the AWS SDKs, you provide
 	// the Outposts bucket ARN in place of the bucket name. For more information
-	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+	// in the Amazon S3 User Guide.
 	//
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -21701,7 +21825,7 @@ type HeadObjectOutput struct {
 	// If an archive copy is already restored, the header value indicates when Amazon
 	// S3 is scheduled to delete the object copy. For example:
 	//
-	// x-amz-restore: ongoing-request="false", expiry-date="Fri, 23 Dec 2012 00:00:00
+	// x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 Dec 2012 00:00:00
 	// GMT"
 	//
 	// If the object restoration is in progress, the header returns the value ongoing-request="true".
@@ -22778,14 +22902,14 @@ type LambdaFunctionConfiguration struct {
 
 	// The Amazon S3 bucket event for which to invoke the AWS Lambda function. For
 	// more information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// in the Amazon S3 User Guide.
 	//
 	// Events is a required field
 	Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
 
 	// Specifies object key name filtering rules. For information about key name
 	// filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// in the Amazon S3 User Guide.
 	Filter *NotificationConfigurationFilter `type:"structure"`
 
 	// An optional unique identifier for configurations in a notification configuration.
@@ -22953,7 +23077,7 @@ type LifecycleRule struct {
 	// that Amazon S3 will wait before permanently removing all parts of the upload.
 	// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket
 	// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// in the Amazon S3 User Guide.
 	AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"`
 
 	// Specifies the expiration for the lifecycle of the object in the form of date,
@@ -22961,7 +23085,8 @@ type LifecycleRule struct {
 	Expiration *LifecycleExpiration `type:"structure"`
 
 	// The Filter is used to identify objects that a Lifecycle Rule applies to.
-	// A Filter must have exactly one of Prefix, Tag, or And specified.
+	// A Filter must have exactly one of Prefix, Tag, or And specified. Filter is
+	// required if the LifecycleRule does not containt a Prefix element.
 	Filter *LifecycleRuleFilter `type:"structure"`
 
 	// Unique identifier for the rule. The value cannot be longer than 255 characters.
@@ -22982,7 +23107,7 @@ type LifecycleRule struct {
 	NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"`
 
 	// Prefix identifying one or more objects to which the rule applies. This is
-	// No longer used; use Filter instead.
+	// no longer used; use Filter instead.
 	//
 	// Replacement must be made for object keys containing special characters (such
 	// as carriage returns) when using XML requests. For more information, see XML
@@ -23220,7 +23345,7 @@ type ListBucketAnalyticsConfigurationsInput struct {
 	// should begin.
 	ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -23514,7 +23639,7 @@ type ListBucketInventoryConfigurationsInput struct {
 	// that Amazon S3 understands.
 	ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -23667,7 +23792,7 @@ type ListBucketMetricsConfigurationsInput struct {
 	// value that Amazon S3 understands.
 	ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -23862,16 +23987,16 @@ type ListMultipartUploadsInput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// When using this action with Amazon S3 on Outposts, you must direct requests
 	// to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
 	// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
 	// using this action using S3 on Outposts through the AWS SDKs, you provide
 	// the Outposts bucket ARN in place of the bucket name. For more information
-	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+	// in the Amazon S3 User Guide.
 	//
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -23894,7 +24019,7 @@ type ListMultipartUploadsInput struct {
 	// keys in the response.
 	EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -24206,7 +24331,7 @@ type ListObjectVersionsInput struct {
 	// keys in the response.
 	EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -24502,16 +24627,16 @@ type ListObjectsInput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// When using this action with Amazon S3 on Outposts, you must direct requests
 	// to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
 	// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
 	// using this action using S3 on Outposts through the AWS SDKs, you provide
 	// the Outposts bucket ARN in place of the bucket name. For more information
-	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+	// in the Amazon S3 User Guide.
 	//
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -24527,7 +24652,7 @@ type ListObjectsInput struct {
 	// keys in the response.
 	EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -24796,16 +24921,16 @@ type ListObjectsV2Input struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// When using this action with Amazon S3 on Outposts, you must direct requests
 	// to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
 	// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
 	// using this action using S3 on Outposts through the AWS SDKs, you provide
 	// the Outposts bucket ARN in place of the bucket name. For more information
-	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+	// in the Amazon S3 User Guide.
 	//
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -24821,7 +24946,7 @@ type ListObjectsV2Input struct {
 	// Encoding type used by Amazon S3 to encode object keys in the response.
 	EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -25032,16 +25157,16 @@ type ListObjectsV2Output struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// When using this action with Amazon S3 on Outposts, you must direct requests
 	// to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
 	// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
 	// using this action using S3 on Outposts through the AWS SDKs, you provide
 	// the Outposts bucket ARN in place of the bucket name. For more information
-	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+	// in the Amazon S3 User Guide.
 	Name *string `type:"string"`
 
 	// NextContinuationToken is sent when isTruncated is true, which means there
@@ -25148,21 +25273,21 @@ type ListPartsInput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// When using this action with Amazon S3 on Outposts, you must direct requests
 	// to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
 	// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
 	// using this action using S3 on Outposts through the AWS SDKs, you provide
 	// the Outposts bucket ARN in place of the bucket name. For more information
-	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+	// in the Amazon S3 User Guide.
 	//
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -26064,7 +26189,7 @@ type NoncurrentVersionTransition struct {
 	// perform the associated action. For information about the noncurrent days
 	// calculations, see How Amazon S3 Calculates How Long an Object Has Been Noncurrent
 	// (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// in the Amazon S3 User Guide.
 	NoncurrentDays *int64 `type:"integer"`
 
 	// The class of storage used to store the object.
@@ -26226,7 +26351,7 @@ func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConf
 
 // Specifies object key name filtering rules. For information about key name
 // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
-// in the Amazon Simple Storage Service Developer Guide.
+// in the Amazon S3 User Guide.
 type NotificationConfigurationFilter struct {
 	_ struct{} `type:"structure"`
 
@@ -26394,10 +26519,14 @@ func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier {
 type ObjectLockConfiguration struct {
 	_ struct{} `type:"structure"`
 
-	// Indicates whether this bucket has an Object Lock configuration enabled.
+	// Indicates whether this bucket has an Object Lock configuration enabled. Enable
+	// ObjectLockEnabled when you apply ObjectLockConfiguration to a bucket.
 	ObjectLockEnabled *string `type:"string" enum:"ObjectLockEnabled"`
 
-	// The Object Lock rule in place for the specified object.
+	// Specifies the Object Lock rule for the specified object. Enable the this
+	// rule when you apply ObjectLockConfiguration to a bucket. Bucket settings
+	// require both a mode and a period. The period can be either Days or Years
+	// but you must select one. You cannot specify Days and Years at the same time.
 	Rule *ObjectLockRule `type:"structure"`
 }
 
@@ -26484,8 +26613,10 @@ func (s *ObjectLockRetention) SetRetainUntilDate(v time.Time) *ObjectLockRetenti
 type ObjectLockRule struct {
 	_ struct{} `type:"structure"`
 
-	// The default retention period that you want to apply to new objects placed
-	// in the specified bucket.
+	// The default Object Lock retention mode and period that you want to apply
+	// to new objects placed in the specified bucket. Bucket settings require both
+	// a mode and a period. The period can be either Days or Years but you must
+	// select one. You cannot specify Days and Years at the same time.
 	DefaultRetention *DefaultRetention `type:"structure"`
 }
 
@@ -26984,7 +27115,7 @@ func (s *ProgressEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstr
 // S3 bucket. You can enable the configuration options in any combination. For
 // more information about when Amazon S3 considers a bucket or object public,
 // see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status)
-// in the Amazon Simple Storage Service User Guide.
+// in the Amazon S3 User Guide.
 type PublicAccessBlockConfiguration struct {
 	_ struct{} `type:"structure"`
 
@@ -27075,7 +27206,7 @@ type PutBucketAccelerateConfigurationInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -27190,7 +27321,7 @@ type PutBucketAclInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -27358,7 +27489,7 @@ type PutBucketAnalyticsConfigurationInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -27489,12 +27620,12 @@ type PutBucketCorsInput struct {
 	// Describes the cross-origin access configuration for objects in an Amazon
 	// S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing
 	// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon
-	// Simple Storage Service User Guide.
+	// S3 User Guide.
 	//
 	// CORSConfiguration is a required field
 	CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -27607,12 +27738,12 @@ type PutBucketEncryptionInput struct {
 	// Amazon S3-managed keys (SSE-S3) or customer master keys stored in AWS KMS
 	// (SSE-KMS). For information about the Amazon S3 default encryption feature,
 	// see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// in the Amazon S3 User Guide.
 	//
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -27854,7 +27985,7 @@ type PutBucketInventoryConfigurationInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -27987,7 +28118,7 @@ type PutBucketLifecycleConfigurationInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -28099,7 +28230,7 @@ type PutBucketLifecycleInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -28218,7 +28349,7 @@ type PutBucketLoggingInput struct {
 	// BucketLoggingStatus is a required field
 	BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -28332,7 +28463,7 @@ type PutBucketMetricsConfigurationInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -28465,7 +28596,7 @@ type PutBucketNotificationConfigurationInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -28585,7 +28716,7 @@ type PutBucketNotificationInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -28699,7 +28830,7 @@ type PutBucketOwnershipControlsInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -28823,7 +28954,7 @@ type PutBucketPolicyInput struct {
 	// to change this bucket policy in the future.
 	ConfirmRemoveSelfBucketAccess *bool `location:"header" locationName:"x-amz-confirm-remove-self-bucket-access" type:"boolean"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -28943,7 +29074,7 @@ type PutBucketReplicationInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -29072,7 +29203,7 @@ type PutBucketRequestPaymentInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -29191,7 +29322,7 @@ type PutBucketTaggingInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -29310,7 +29441,7 @@ type PutBucketVersioningInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -29434,7 +29565,7 @@ type PutBucketWebsiteInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -29562,13 +29693,13 @@ type PutObjectAclInput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -29603,16 +29734,16 @@ type PutObjectAclInput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// When using this action with Amazon S3 on Outposts, you must direct requests
 	// to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
 	// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
 	// using this action using S3 on Outposts through the AWS SDKs, you provide
 	// the Outposts bucket ARN in place of the bucket name. For more information
-	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+	// in the Amazon S3 User Guide.
 	//
 	// Key is a required field
 	Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
@@ -29813,16 +29944,16 @@ type PutObjectInput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// When using this action with Amazon S3 on Outposts, you must direct requests
 	// to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
 	// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
 	// using this action using S3 on Outposts through the AWS SDKs, you provide
 	// the Outposts bucket ARN in place of the bucket name. For more information
-	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+	// in the Amazon S3 User Guide.
 	//
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -29871,7 +30002,7 @@ type PutObjectInput struct {
 	// see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17).
 	ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -30270,13 +30401,13 @@ type PutObjectLegalHoldInput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -30435,7 +30566,7 @@ type PutObjectLockConfigurationInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -30700,8 +30831,8 @@ type PutObjectRetentionInput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -30709,7 +30840,7 @@ type PutObjectRetentionInput struct {
 	// Indicates whether this action should bypass Governance-mode restrictions.
 	BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -30876,21 +31007,21 @@ type PutObjectTaggingInput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// When using this action with Amazon S3 on Outposts, you must direct requests
 	// to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
 	// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
 	// using this action using S3 on Outposts through the AWS SDKs, you provide
 	// the Outposts bucket ARN in place of the bucket name. For more information
-	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+	// in the Amazon S3 User Guide.
 	//
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -31058,7 +31189,7 @@ type PutPublicAccessBlockInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -31067,7 +31198,7 @@ type PutPublicAccessBlockInput struct {
 	// S3 bucket. You can enable the configuration options in any combination. For
 	// more information about when Amazon S3 considers a bucket or object public,
 	// see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// in the Amazon S3 User Guide.
 	//
 	// PublicAccessBlockConfiguration is a required field
 	PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `locationName:"PublicAccessBlockConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
@@ -31180,7 +31311,7 @@ type QueueConfiguration struct {
 
 	// Specifies object key name filtering rules. For information about key name
 	// filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// in the Amazon S3 User Guide.
 	Filter *NotificationConfigurationFilter `type:"structure"`
 
 	// An optional unique identifier for configurations in a notification configuration.
@@ -31256,7 +31387,7 @@ type QueueConfigurationDeprecated struct {
 	// Deprecated: Event has been deprecated
 	Event *string `deprecated:"true" type:"string" enum:"Event"`
 
-	// A collection of bucket events for which to send notifications
+	// A collection of bucket events for which to send notifications.
 	Events []*string `locationName:"Event" type:"list" flattened:"true"`
 
 	// An optional unique identifier for configurations in a notification configuration.
@@ -31534,7 +31665,7 @@ type ReplicationConfiguration struct {
 	// The Amazon Resource Name (ARN) of the AWS Identity and Access Management
 	// (IAM) role that Amazon S3 assumes when replicating objects. For more information,
 	// see How to Set Up Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// in the Amazon S3 User Guide.
 	//
 	// Role is a required field
 	Role *string `type:"string" required:"true"`
@@ -31649,7 +31780,7 @@ type ReplicationRule struct {
 	// with the highest priority. The higher the number, the higher the priority.
 	//
 	// For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// in the Amazon S3 User Guide.
 	Priority *int64 `type:"integer"`
 
 	// A container that describes additional filters for identifying the source
@@ -31775,7 +31906,7 @@ func (s *ReplicationRule) SetStatus(v string) *ReplicationRule {
 //    an And tag.
 //
 //    * If you specify a filter based on multiple tags, wrap the Tag elements
-//    in an And tag
+//    in an And tag.
 type ReplicationRuleAndOperator struct {
 	_ struct{} `type:"structure"`
 
@@ -32064,21 +32195,21 @@ type RestoreObjectInput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// When using this action with Amazon S3 on Outposts, you must direct requests
 	// to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
 	// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
 	// using this action using S3 on Outposts through the AWS SDKs, you provide
 	// the Outposts bucket ARN in place of the bucket name. For more information
-	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+	// in the Amazon S3 User Guide.
 	//
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -32354,7 +32485,7 @@ func (s *RestoreRequest) SetType(v string) *RestoreRequest {
 // Specifies the redirect behavior and when a redirect is applied. For more
 // information about routing rules, see Configuring advanced conditional redirects
 // (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects)
-// in the Amazon Simple Storage Service User Guide.
+// in the Amazon S3 User Guide.
 type RoutingRule struct {
 	_ struct{} `type:"structure"`
 
@@ -32410,7 +32541,7 @@ func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule {
 // Specifies lifecycle rules for an Amazon S3 bucket. For more information,
 // see Put Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html)
 // in the Amazon Simple Storage Service API Reference. For examples, see Put
-// Bucket Lifecycle Configuration Examples (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html#API_PutBucketLifecycleConfiguration_Examples)
+// Bucket Lifecycle Configuration Examples (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html#API_PutBucketLifecycleConfiguration_Examples).
 type Rule struct {
 	_ struct{} `type:"structure"`
 
@@ -32418,7 +32549,7 @@ type Rule struct {
 	// that Amazon S3 will wait before permanently removing all parts of the upload.
 	// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket
 	// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// in the Amazon S3 User Guide.
 	AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"`
 
 	// Specifies the expiration for the lifecycle of the object.
@@ -32462,7 +32593,7 @@ type Rule struct {
 	// Specifies when an object transitions to a specified storage class. For more
 	// information about Amazon S3 lifecycle configuration rules, see Transitioning
 	// Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html)
-	// in the Amazon Simple Storage Service User Guide.
+	// in the Amazon S3 User Guide.
 	Transition *Transition `type:"structure"`
 }
 
@@ -32821,7 +32952,7 @@ type SelectObjectContentInput struct {
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -33286,7 +33417,7 @@ type ServerSideEncryptionRule struct {
 	// S3 to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled.
 	//
 	// For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html)
-	// in the Amazon Simple Storage Service User Guide.
+	// in the Amazon S3 User Guide.
 	BucketKeyEnabled *bool `type:"boolean"`
 }
 
@@ -33853,14 +33984,14 @@ type TopicConfiguration struct {
 
 	// The Amazon S3 bucket event about which to send notifications. For more information,
 	// see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
-	// in the Amazon Simple Storage Service User Guide.
+	// in the Amazon S3 User Guide.
 	//
 	// Events is a required field
 	Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
 
 	// Specifies object key name filtering rules. For information about key name
 	// filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// in the Amazon S3 User Guide.
 	Filter *NotificationConfigurationFilter `type:"structure"`
 
 	// An optional unique identifier for configurations in a notification configuration.
@@ -33986,7 +34117,7 @@ func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDep
 // Specifies when an object transitions to a specified storage class. For more
 // information about Amazon S3 lifecycle configuration rules, see Transitioning
 // Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html)
-// in the Amazon Simple Storage Service User Guide.
+// in the Amazon S3 User Guide.
 type Transition struct {
 	_ struct{} `type:"structure"`
 
@@ -34039,23 +34170,23 @@ type UploadPartCopyInput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// When using this action with Amazon S3 on Outposts, you must direct requests
 	// to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
 	// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
 	// using this action using S3 on Outposts through the AWS SDKs, you provide
 	// the Outposts bucket ARN in place of the bucket name. For more information
-	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+	// in the Amazon S3 User Guide.
 	//
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
 	// Specifies the source object for the copy operation. You specify the value
 	// in one of two formats, depending on whether you want to access the source
-	// object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-points.html):
+	// object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html):
 	//
 	//    * For objects not accessed through an access point, specify the name of
 	//    the source bucket and key of the source object, separated by a slash (/).
@@ -34119,12 +34250,12 @@ type UploadPartCopyInput struct {
 	// encryption key was transmitted without error.
 	CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"`
 
-	// The account id of the expected destination bucket owner. If the destination
+	// The account ID of the expected destination bucket owner. If the destination
 	// bucket is owned by a different account, the request will fail with an HTTP
 	// 403 (Access Denied) error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
 
-	// The account id of the expected source bucket owner. If the source bucket
+	// The account ID of the expected source bucket owner. If the source bucket
 	// is owned by a different account, the request will fail with an HTTP 403 (Access
 	// Denied) error.
 	ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"`
@@ -34481,16 +34612,16 @@ type UploadPartInput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// When using this action with Amazon S3 on Outposts, you must direct requests
 	// to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
 	// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
 	// using this action using S3 on Outposts through the AWS SDKs, you provide
 	// the Outposts bucket ARN in place of the bucket name. For more information
-	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+	// in the Amazon S3 User Guide.
 	//
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -34504,7 +34635,7 @@ type UploadPartInput struct {
 	// if object lock parameters are specified.
 	ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
@@ -34911,6 +35042,452 @@ func (s *WebsiteConfiguration) SetRoutingRules(v []*RoutingRule) *WebsiteConfigu
 	return s
 }
 
+type WriteGetObjectResponseInput struct {
+	_ struct{} `locationName:"WriteGetObjectResponseRequest" type:"structure" payload:"Body"`
+
+	// Indicates that a range of bytes was specified.
+	AcceptRanges *string `location:"header" locationName:"x-amz-fwd-header-accept-ranges" type:"string"`
+
+	// The object data.
+	//
+	// To use an non-seekable io.Reader for this request wrap the io.Reader with
+	// "aws.ReadSeekCloser". The SDK will not retry request errors for non-seekable
+	// readers. This will allow the SDK to send the reader's payload as chunked
+	// transfer encoding.
+	Body io.ReadSeeker `type:"blob"`
+
+	// Indicates whether the object stored in Amazon S3 uses an S3 bucket key for
+	// server-side encryption with AWS KMS (SSE-KMS).
+	BucketKeyEnabled *bool `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"`
+
+	// Specifies caching behavior along the request/reply chain.
+	CacheControl *string `location:"header" locationName:"x-amz-fwd-header-Cache-Control" type:"string"`
+
+	// Specifies presentational information for the object.
+	ContentDisposition *string `location:"header" locationName:"x-amz-fwd-header-Content-Disposition" type:"string"`
+
+	// Specifies what content encodings have been applied to the object and thus
+	// what decoding mechanisms must be applied to obtain the media-type referenced
+	// by the Content-Type header field.
+	ContentEncoding *string `location:"header" locationName:"x-amz-fwd-header-Content-Encoding" type:"string"`
+
+	// The language the content is in.
+	ContentLanguage *string `location:"header" locationName:"x-amz-fwd-header-Content-Language" type:"string"`
+
+	// The size of the content body in bytes.
+	ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
+
+	// The portion of the object returned in the response.
+	ContentRange *string `location:"header" locationName:"x-amz-fwd-header-Content-Range" type:"string"`
+
+	// A standard MIME type describing the format of the object data.
+	ContentType *string `location:"header" locationName:"x-amz-fwd-header-Content-Type" type:"string"`
+
+	// Specifies whether an object stored in Amazon S3 is (true) or is not (false)
+	// a delete marker.
+	DeleteMarker *bool `location:"header" locationName:"x-amz-fwd-header-x-amz-delete-marker" type:"boolean"`
+
+	// An opaque identifier assigned by a web server to a specific version of a
+	// resource found at a URL.
+	ETag *string `location:"header" locationName:"x-amz-fwd-header-ETag" type:"string"`
+
+	// A string that uniquely identifies an error condition. Returned in the <Code>
+	// tag of the error XML response for a corresponding GetObject call. Cannot
+	// be used with a successful StatusCode header or when the transformed object
+	// is provided in the body. All error codes from S3 are sentence-cased. Regex
+	// value is "^[A-Z][a-zA-Z]+$".
+	ErrorCode *string `location:"header" locationName:"x-amz-fwd-error-code" type:"string"`
+
+	// Contains a generic description of the error condition. Returned in the <Message>
+	// tag of the error XML response for a corresponding GetObject call. Cannot
+	// be used with a successful StatusCode header or when the transformed object
+	// is provided in body.
+	ErrorMessage *string `location:"header" locationName:"x-amz-fwd-error-message" type:"string"`
+
+	// If object stored in Amazon S3 expiration is configured (see PUT Bucket lifecycle)
+	// it includes expiry-date and rule-id key-value pairs providing object expiration
+	// information. The value of the rule-id is URL encoded.
+	Expiration *string `location:"header" locationName:"x-amz-fwd-header-x-amz-expiration" type:"string"`
+
+	// The date and time at which the object is no longer cacheable.
+	Expires *time.Time `location:"header" locationName:"x-amz-fwd-header-Expires" type:"timestamp"`
+
+	// The date and time that the object was last modified.
+	LastModified *time.Time `location:"header" locationName:"x-amz-fwd-header-Last-Modified" type:"timestamp"`
+
+	// A map of metadata to store with the object in S3.
+	Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+	// Set to the number of metadata entries not returned in x-amz-meta headers.
+	// This can happen if you create metadata using an API like SOAP that supports
+	// more flexible metadata than the REST API. For example, using SOAP, you can
+	// create metadata whose values are not legal HTTP headers.
+	MissingMeta *int64 `location:"header" locationName:"x-amz-fwd-header-x-amz-missing-meta" type:"integer"`
+
+	// Indicates whether an object stored in Amazon S3 has an active legal hold.
+	ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-fwd-header-x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
+
+	// Indicates whether an object stored in Amazon S3 has Object Lock enabled.
+	// For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html).
+	ObjectLockMode *string `location:"header" locationName:"x-amz-fwd-header-x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
+
+	// The date and time when Object Lock is configured to expire.
+	ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-fwd-header-x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
+
+	// The count of parts this object has.
+	PartsCount *int64 `location:"header" locationName:"x-amz-fwd-header-x-amz-mp-parts-count" type:"integer"`
+
+	// Indicates if request involves bucket that is either a source or destination
+	// in a Replication rule. For more information about S3 Replication, see Replication
+	// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html).
+	ReplicationStatus *string `location:"header" locationName:"x-amz-fwd-header-x-amz-replication-status" type:"string" enum:"ReplicationStatus"`
+
+	// If present, indicates that the requester was successfully charged for the
+	// request.
+	RequestCharged *string `location:"header" locationName:"x-amz-fwd-header-x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+	// Route prefix to the HTTP URL generated.
+	//
+	// RequestRoute is a required field
+	RequestRoute *string `location:"header" locationName:"x-amz-request-route" type:"string" required:"true"`
+
+	// A single use encrypted token that maps WriteGetObjectResponse to the end
+	// user GetObject request.
+	//
+	// RequestToken is a required field
+	RequestToken *string `location:"header" locationName:"x-amz-request-token" type:"string" required:"true"`
+
+	// Provides information about object restoration operation and expiration time
+	// of the restored object copy.
+	Restore *string `location:"header" locationName:"x-amz-fwd-header-x-amz-restore" type:"string"`
+
+	// Encryption algorithm used if server-side encryption with a customer-provided
+	// encryption key was specified for object stored in Amazon S3.
+	SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+	// 128-bit MD5 digest of customer-provided encryption key used in Amazon S3
+	// to encrypt data stored in S3. For more information, see Protecting data using
+	// server-side encryption with customer-provided encryption keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html).
+	SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+	// If present, specifies the ID of the AWS Key Management Service (AWS KMS)
+	// symmetric customer managed customer master key (CMK) that was used for stored
+	// in Amazon S3 object.
+	SSEKMSKeyId *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
+
+	// The server-side encryption algorithm used when storing requested object in
+	// Amazon S3 (for example, AES256, aws:kms).
+	ServerSideEncryption *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+	// The integer status code for an HTTP response of a corresponding GetObject
+	// request.
+	//
+	// Status Codes
+	//
+	//    * 200 - OK
+	//
+	//    * 206 - Partial Content
+	//
+	//    * 304 - Not Modified
+	//
+	//    * 400 - Bad Request
+	//
+	//    * 401 - Unauthorized
+	//
+	//    * 403 - Forbidden
+	//
+	//    * 404 - Not Found
+	//
+	//    * 405 - Method Not Allowed
+	//
+	//    * 409 - Conflict
+	//
+	//    * 411 - Length Required
+	//
+	//    * 412 - Precondition Failed
+	//
+	//    * 416 - Range Not Satisfiable
+	//
+	//    * 500 - Internal Server Error
+	//
+	//    * 503 - Service Unavailable
+	StatusCode *int64 `location:"header" locationName:"x-amz-fwd-status" type:"integer"`
+
+	// The class of storage used to store object in Amazon S3.
+	StorageClass *string `location:"header" locationName:"x-amz-fwd-header-x-amz-storage-class" type:"string" enum:"StorageClass"`
+
+	// The number of tags, if any, on the object.
+	TagCount *int64 `location:"header" locationName:"x-amz-fwd-header-x-amz-tagging-count" type:"integer"`
+
+	// An ID used to reference a specific version of the object.
+	VersionId *string `location:"header" locationName:"x-amz-fwd-header-x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s WriteGetObjectResponseInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s WriteGetObjectResponseInput) GoString() string {
+	return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *WriteGetObjectResponseInput) Validate() error {
+	invalidParams := request.ErrInvalidParams{Context: "WriteGetObjectResponseInput"}
+	if s.RequestRoute == nil {
+		invalidParams.Add(request.NewErrParamRequired("RequestRoute"))
+	}
+	if s.RequestRoute != nil && len(*s.RequestRoute) < 1 {
+		invalidParams.Add(request.NewErrParamMinLen("RequestRoute", 1))
+	}
+	if s.RequestToken == nil {
+		invalidParams.Add(request.NewErrParamRequired("RequestToken"))
+	}
+
+	if invalidParams.Len() > 0 {
+		return invalidParams
+	}
+	return nil
+}
+
+// SetAcceptRanges sets the AcceptRanges field's value.
+func (s *WriteGetObjectResponseInput) SetAcceptRanges(v string) *WriteGetObjectResponseInput {
+	s.AcceptRanges = &v
+	return s
+}
+
+// SetBody sets the Body field's value.
+func (s *WriteGetObjectResponseInput) SetBody(v io.ReadSeeker) *WriteGetObjectResponseInput {
+	s.Body = v
+	return s
+}
+
+// SetBucketKeyEnabled sets the BucketKeyEnabled field's value.
+func (s *WriteGetObjectResponseInput) SetBucketKeyEnabled(v bool) *WriteGetObjectResponseInput {
+	s.BucketKeyEnabled = &v
+	return s
+}
+
+// SetCacheControl sets the CacheControl field's value.
+func (s *WriteGetObjectResponseInput) SetCacheControl(v string) *WriteGetObjectResponseInput {
+	s.CacheControl = &v
+	return s
+}
+
+// SetContentDisposition sets the ContentDisposition field's value.
+func (s *WriteGetObjectResponseInput) SetContentDisposition(v string) *WriteGetObjectResponseInput {
+	s.ContentDisposition = &v
+	return s
+}
+
+// SetContentEncoding sets the ContentEncoding field's value.
+func (s *WriteGetObjectResponseInput) SetContentEncoding(v string) *WriteGetObjectResponseInput {
+	s.ContentEncoding = &v
+	return s
+}
+
+// SetContentLanguage sets the ContentLanguage field's value.
+func (s *WriteGetObjectResponseInput) SetContentLanguage(v string) *WriteGetObjectResponseInput {
+	s.ContentLanguage = &v
+	return s
+}
+
+// SetContentLength sets the ContentLength field's value.
+func (s *WriteGetObjectResponseInput) SetContentLength(v int64) *WriteGetObjectResponseInput {
+	s.ContentLength = &v
+	return s
+}
+
+// SetContentRange sets the ContentRange field's value.
+func (s *WriteGetObjectResponseInput) SetContentRange(v string) *WriteGetObjectResponseInput {
+	s.ContentRange = &v
+	return s
+}
+
+// SetContentType sets the ContentType field's value.
+func (s *WriteGetObjectResponseInput) SetContentType(v string) *WriteGetObjectResponseInput {
+	s.ContentType = &v
+	return s
+}
+
+// SetDeleteMarker sets the DeleteMarker field's value.
+func (s *WriteGetObjectResponseInput) SetDeleteMarker(v bool) *WriteGetObjectResponseInput {
+	s.DeleteMarker = &v
+	return s
+}
+
+// SetETag sets the ETag field's value.
+func (s *WriteGetObjectResponseInput) SetETag(v string) *WriteGetObjectResponseInput {
+	s.ETag = &v
+	return s
+}
+
+// SetErrorCode sets the ErrorCode field's value.
+func (s *WriteGetObjectResponseInput) SetErrorCode(v string) *WriteGetObjectResponseInput {
+	s.ErrorCode = &v
+	return s
+}
+
+// SetErrorMessage sets the ErrorMessage field's value.
+func (s *WriteGetObjectResponseInput) SetErrorMessage(v string) *WriteGetObjectResponseInput {
+	s.ErrorMessage = &v
+	return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *WriteGetObjectResponseInput) SetExpiration(v string) *WriteGetObjectResponseInput {
+	s.Expiration = &v
+	return s
+}
+
+// SetExpires sets the Expires field's value.
+func (s *WriteGetObjectResponseInput) SetExpires(v time.Time) *WriteGetObjectResponseInput {
+	s.Expires = &v
+	return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *WriteGetObjectResponseInput) SetLastModified(v time.Time) *WriteGetObjectResponseInput {
+	s.LastModified = &v
+	return s
+}
+
+// SetMetadata sets the Metadata field's value.
+func (s *WriteGetObjectResponseInput) SetMetadata(v map[string]*string) *WriteGetObjectResponseInput {
+	s.Metadata = v
+	return s
+}
+
+// SetMissingMeta sets the MissingMeta field's value.
+func (s *WriteGetObjectResponseInput) SetMissingMeta(v int64) *WriteGetObjectResponseInput {
+	s.MissingMeta = &v
+	return s
+}
+
+// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value.
+func (s *WriteGetObjectResponseInput) SetObjectLockLegalHoldStatus(v string) *WriteGetObjectResponseInput {
+	s.ObjectLockLegalHoldStatus = &v
+	return s
+}
+
+// SetObjectLockMode sets the ObjectLockMode field's value.
+func (s *WriteGetObjectResponseInput) SetObjectLockMode(v string) *WriteGetObjectResponseInput {
+	s.ObjectLockMode = &v
+	return s
+}
+
+// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value.
+func (s *WriteGetObjectResponseInput) SetObjectLockRetainUntilDate(v time.Time) *WriteGetObjectResponseInput {
+	s.ObjectLockRetainUntilDate = &v
+	return s
+}
+
+// SetPartsCount sets the PartsCount field's value.
+func (s *WriteGetObjectResponseInput) SetPartsCount(v int64) *WriteGetObjectResponseInput {
+	s.PartsCount = &v
+	return s
+}
+
+// SetReplicationStatus sets the ReplicationStatus field's value.
+func (s *WriteGetObjectResponseInput) SetReplicationStatus(v string) *WriteGetObjectResponseInput {
+	s.ReplicationStatus = &v
+	return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *WriteGetObjectResponseInput) SetRequestCharged(v string) *WriteGetObjectResponseInput {
+	s.RequestCharged = &v
+	return s
+}
+
+// SetRequestRoute sets the RequestRoute field's value.
+func (s *WriteGetObjectResponseInput) SetRequestRoute(v string) *WriteGetObjectResponseInput {
+	s.RequestRoute = &v
+	return s
+}
+
+// SetRequestToken sets the RequestToken field's value.
+func (s *WriteGetObjectResponseInput) SetRequestToken(v string) *WriteGetObjectResponseInput {
+	s.RequestToken = &v
+	return s
+}
+
+// SetRestore sets the Restore field's value.
+func (s *WriteGetObjectResponseInput) SetRestore(v string) *WriteGetObjectResponseInput {
+	s.Restore = &v
+	return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *WriteGetObjectResponseInput) SetSSECustomerAlgorithm(v string) *WriteGetObjectResponseInput {
+	s.SSECustomerAlgorithm = &v
+	return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *WriteGetObjectResponseInput) SetSSECustomerKeyMD5(v string) *WriteGetObjectResponseInput {
+	s.SSECustomerKeyMD5 = &v
+	return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *WriteGetObjectResponseInput) SetSSEKMSKeyId(v string) *WriteGetObjectResponseInput {
+	s.SSEKMSKeyId = &v
+	return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *WriteGetObjectResponseInput) SetServerSideEncryption(v string) *WriteGetObjectResponseInput {
+	s.ServerSideEncryption = &v
+	return s
+}
+
+// SetStatusCode sets the StatusCode field's value.
+func (s *WriteGetObjectResponseInput) SetStatusCode(v int64) *WriteGetObjectResponseInput {
+	s.StatusCode = &v
+	return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *WriteGetObjectResponseInput) SetStorageClass(v string) *WriteGetObjectResponseInput {
+	s.StorageClass = &v
+	return s
+}
+
+// SetTagCount sets the TagCount field's value.
+func (s *WriteGetObjectResponseInput) SetTagCount(v int64) *WriteGetObjectResponseInput {
+	s.TagCount = &v
+	return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *WriteGetObjectResponseInput) SetVersionId(v string) *WriteGetObjectResponseInput {
+	s.VersionId = &v
+	return s
+}
+
+func (s *WriteGetObjectResponseInput) hostLabels() map[string]string {
+	return map[string]string{
+		"RequestRoute": aws.StringValue(s.RequestRoute),
+	}
+}
+
+type WriteGetObjectResponseOutput struct {
+	_ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s WriteGetObjectResponseOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s WriteGetObjectResponseOutput) GoString() string {
+	return s.String()
+}
+
 const (
 	// AnalyticsS3ExportFileFormatCsv is a AnalyticsS3ExportFileFormat enum value
 	AnalyticsS3ExportFileFormatCsv = "CSV"
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
index f1959b03a..ce87ab320 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
@@ -48,6 +48,8 @@ func defaultInitRequestFn(r *request.Request) {
 		//	case opGetObject:
 		//		r.Handlers.Build.PushBack(askForTxEncodingAppendMD5)
 		//		r.Handlers.Unmarshal.PushBack(useMD5ValidationReader)
+	case opWriteGetObjectResponse:
+		r.Handlers.Build.PushFront(buildWriteGetObjectResponseEndpoint)
 	}
 }
 
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go
index 6346b9279..9fc2105fd 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go
@@ -1,6 +1,8 @@
 package s3
 
 import (
+	"fmt"
+	"github.com/aws/aws-sdk-go/aws/awserr"
 	"net/url"
 	"strings"
 
@@ -11,6 +13,13 @@ import (
 	"github.com/aws/aws-sdk-go/internal/s3shared/arn"
 )
 
+const (
+	s3Namespace              = "s3"
+	s3AccessPointNamespace   = "s3-accesspoint"
+	s3ObjectsLambdaNamespace = "s3-object-lambda"
+	s3OutpostsNamespace      = "s3-outposts"
+)
+
 // Used by shapes with members decorated as endpoint ARN.
 func parseEndpointARN(v string) (arn.Resource, error) {
 	return arn.ParseResource(v, accessPointResourceParser)
@@ -20,10 +29,14 @@ func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) {
 	resParts := arn.SplitResource(a.Resource)
 	switch resParts[0] {
 	case "accesspoint":
-		if a.Service != "s3" {
-			return arn.AccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3"}
+		switch a.Service {
+		case s3Namespace:
+			return arn.ParseAccessPointResource(a, resParts[1:])
+		case s3ObjectsLambdaNamespace:
+			return parseS3ObjectLambdaAccessPointResource(a, resParts)
+		default:
+			return arn.AccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s or %s", s3Namespace, s3ObjectsLambdaNamespace)}
 		}
-		return arn.ParseAccessPointResource(a, resParts[1:])
 	case "outpost":
 		if a.Service != "s3-outposts" {
 			return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"}
@@ -80,6 +93,25 @@ func parseOutpostAccessPointResource(a awsarn.ARN, resParts []string) (arn.Outpo
 	return outpostAccessPointARN, nil
 }
 
+func parseS3ObjectLambdaAccessPointResource(a awsarn.ARN, resParts []string) (arn.S3ObjectLambdaAccessPointARN, error) {
+	if a.Service != s3ObjectsLambdaNamespace {
+		return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s", s3ObjectsLambdaNamespace)}
+	}
+
+	accessPointARN, err := arn.ParseAccessPointResource(a, resParts[1:])
+	if err != nil {
+		return arn.S3ObjectLambdaAccessPointARN{}, err
+	}
+
+	if len(accessPointARN.Region) == 0 {
+		return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("%s region not set", s3ObjectsLambdaNamespace)}
+	}
+
+	return arn.S3ObjectLambdaAccessPointARN{
+		AccessPointARN: accessPointARN,
+	}, nil
+}
+
 func endpointHandler(req *request.Request) {
 	endpoint, ok := req.Params.(endpointARNGetter)
 	if !ok || !endpoint.hasEndpointARN() {
@@ -116,6 +148,11 @@ func endpointHandler(req *request.Request) {
 		if err != nil {
 			req.Error = err
 		}
+	case arn.S3ObjectLambdaAccessPointARN:
+		err = updateRequestS3ObjectLambdaAccessPointEndpoint(req, tv)
+		if err != nil {
+			req.Error = err
+		}
 	case arn.OutpostAccessPointARN:
 		// outposts does not support FIPS regions
 		if resReq.ResourceConfiguredForFIPS() {
@@ -162,6 +199,31 @@ func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.Acce
 	return nil
 }
 
+func updateRequestS3ObjectLambdaAccessPointEndpoint(req *request.Request, accessPoint arn.S3ObjectLambdaAccessPointARN) error {
+	// DualStack not supported
+	if aws.BoolValue(req.Config.UseDualStack) {
+		return s3shared.NewClientConfiguredForDualStackError(accessPoint,
+			req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
+	}
+
+	// Accelerate not supported
+	if aws.BoolValue(req.Config.S3UseAccelerate) {
+		return s3shared.NewClientConfiguredForAccelerateError(accessPoint,
+			req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
+	}
+
+	// Ignore the disable host prefix for access points
+	req.Config.DisableEndpointHostPrefix = aws.Bool(false)
+
+	if err := s3ObjectLambdaAccessPointEndpointBuilder(accessPoint).build(req); err != nil {
+		return err
+	}
+
+	removeBucketFromPath(req.HTTPRequest.URL)
+
+	return nil
+}
+
 func updateRequestOutpostAccessPointEndpoint(req *request.Request, accessPoint arn.OutpostAccessPointARN) error {
 	// Accelerate not supported
 	if aws.BoolValue(req.Config.S3UseAccelerate) {
@@ -192,3 +254,37 @@ func removeBucketFromPath(u *url.URL) {
 		u.Path = "/"
 	}
 }
+
+func buildWriteGetObjectResponseEndpoint(req *request.Request) {
+	// DualStack not supported
+	if aws.BoolValue(req.Config.UseDualStack) {
+		req.Error = awserr.New("ConfigurationError", "client configured for dualstack but not supported for operation", nil)
+		return
+	}
+
+	// Accelerate not supported
+	if aws.BoolValue(req.Config.S3UseAccelerate) {
+		req.Error = awserr.New("ConfigurationError", "client configured for accelerate but not supported for operation", nil)
+		return
+	}
+
+	signingName := s3ObjectsLambdaNamespace
+	signingRegion := req.ClientInfo.SigningRegion
+
+	if !hasCustomEndpoint(req) {
+		endpoint, err := resolveRegionalEndpoint(req, aws.StringValue(req.Config.Region), EndpointsID)
+		if err != nil {
+			req.Error = awserr.New(request.ErrCodeSerialization, "failed to resolve endpoint", err)
+			return
+		}
+		signingRegion = endpoint.SigningRegion
+
+		if err = updateRequestEndpoint(req, endpoint.URL); err != nil {
+			req.Error = err
+			return
+		}
+		updateS3HostPrefixForS3ObjectLambda(req)
+	}
+
+	redirectSigner(req, signingName, signingRegion)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go
index eb77d981e..71e9c9eff 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go
@@ -66,13 +66,9 @@ func (a accessPointEndpointBuilder) build(req *request.Request) error {
 		if err = updateRequestEndpoint(req, endpoint.URL); err != nil {
 			return err
 		}
-		const serviceEndpointLabel = "s3-accesspoint"
 
 		// dual stack provided by endpoint resolver
-		cfgHost := req.HTTPRequest.URL.Host
-		if strings.HasPrefix(cfgHost, "s3") {
-			req.HTTPRequest.URL.Host = serviceEndpointLabel + cfgHost[2:]
-		}
+		updateS3HostForS3AccessPoint(req)
 	}
 
 	protocol.HostPrefixBuilder{
@@ -98,6 +94,73 @@ func (a accessPointEndpointBuilder) hostPrefixLabelValues() map[string]string {
 	}
 }
 
+// s3ObjectLambdaAccessPointEndpointBuilder represents the endpoint builder for an s3 object lambda access point arn
+type s3ObjectLambdaAccessPointEndpointBuilder arn.S3ObjectLambdaAccessPointARN
+
+// build builds the endpoint for corresponding access point arn
+//
+// For building an endpoint from access point arn, format used is:
+// - Access point endpoint format : {accesspointName}-{accountId}.s3-object-lambda.{region}.{dnsSuffix}
+// - example : myaccesspoint-012345678901.s3-object-lambda.us-west-2.amazonaws.com
+//
+// Access Point Endpoint requests are signed using "s3-object-lambda" as signing name.
+//
+func (a s3ObjectLambdaAccessPointEndpointBuilder) build(req *request.Request) error {
+	resolveRegion := arn.S3ObjectLambdaAccessPointARN(a).Region
+	cfgRegion := aws.StringValue(req.Config.Region)
+
+	if s3shared.IsFIPS(cfgRegion) {
+		if aws.BoolValue(req.Config.S3UseARNRegion) && s3shared.IsCrossRegion(req, resolveRegion) {
+			// FIPS with cross region is not supported, the SDK must fail
+			// because there is no well defined method for SDK to construct a
+			// correct FIPS endpoint.
+			return s3shared.NewClientConfiguredForCrossRegionFIPSError(arn.S3ObjectLambdaAccessPointARN(a),
+				req.ClientInfo.PartitionID, cfgRegion, nil)
+		}
+		resolveRegion = cfgRegion
+	}
+
+	endpoint, err := resolveRegionalEndpoint(req, resolveRegion, EndpointsID)
+	if err != nil {
+		return s3shared.NewFailedToResolveEndpointError(arn.S3ObjectLambdaAccessPointARN(a),
+			req.ClientInfo.PartitionID, cfgRegion, err)
+	}
+
+	endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL))
+
+	endpoint.SigningName = s3ObjectsLambdaNamespace
+
+	if !hasCustomEndpoint(req) {
+		if err = updateRequestEndpoint(req, endpoint.URL); err != nil {
+			return err
+		}
+
+		updateS3HostPrefixForS3ObjectLambda(req)
+	}
+
+	protocol.HostPrefixBuilder{
+		Prefix:   accessPointPrefixTemplate,
+		LabelsFn: a.hostPrefixLabelValues,
+	}.Build(req)
+
+	// signer redirection
+	redirectSigner(req, endpoint.SigningName, endpoint.SigningRegion)
+
+	err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host)
+	if err != nil {
+		return s3shared.NewInvalidARNError(arn.S3ObjectLambdaAccessPointARN(a), err)
+	}
+
+	return nil
+}
+
+func (a s3ObjectLambdaAccessPointEndpointBuilder) hostPrefixLabelValues() map[string]string {
+	return map[string]string{
+		accessPointPrefixLabel: arn.S3ObjectLambdaAccessPointARN(a).AccessPointName,
+		accountIDPrefixLabel:   arn.S3ObjectLambdaAccessPointARN(a).AccountID,
+	}
+}
+
 // outpostAccessPointEndpointBuilder represents the Endpoint builder for outpost access point arn.
 type outpostAccessPointEndpointBuilder arn.OutpostAccessPointARN
 
@@ -114,7 +177,7 @@ func (o outpostAccessPointEndpointBuilder) build(req *request.Request) error {
 	resolveService := o.Service
 
 	endpointsID := resolveService
-	if resolveService == "s3-outposts" {
+	if resolveService == s3OutpostsNamespace {
 		endpointsID = "s3"
 	}
 
@@ -130,11 +193,7 @@ func (o outpostAccessPointEndpointBuilder) build(req *request.Request) error {
 		if err = updateRequestEndpoint(req, endpoint.URL); err != nil {
 			return err
 		}
-		// add url host as s3-outposts
-		cfgHost := req.HTTPRequest.URL.Host
-		if strings.HasPrefix(cfgHost, endpointsID) {
-			req.HTTPRequest.URL.Host = resolveService + cfgHost[len(endpointsID):]
-		}
+		updateHostPrefix(req, endpointsID, resolveService)
 	}
 
 	protocol.HostPrefixBuilder{
@@ -170,7 +229,6 @@ func resolveRegionalEndpoint(r *request.Request, region string, endpointsID stri
 }
 
 func updateRequestEndpoint(r *request.Request, endpoint string) (err error) {
-
 	r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath)
 	if err != nil {
 		return awserr.New(request.ErrCodeSerialization,
@@ -185,3 +243,19 @@ func redirectSigner(req *request.Request, signingName string, signingRegion stri
 	req.ClientInfo.SigningName = signingName
 	req.ClientInfo.SigningRegion = signingRegion
 }
+
+func updateS3HostForS3AccessPoint(req *request.Request) {
+	updateHostPrefix(req, "s3", s3AccessPointNamespace)
+}
+
+func updateS3HostPrefixForS3ObjectLambda(req *request.Request) {
+	updateHostPrefix(req, "s3", s3ObjectsLambdaNamespace)
+}
+
+func updateHostPrefix(req *request.Request, oldEndpointPrefix, newEndpointPrefix string) {
+	host := req.HTTPRequest.URL.Host
+	if strings.HasPrefix(host, oldEndpointPrefix) {
+		// replace service hostlabel oldEndpointPrefix to newEndpointPrefix
+		req.HTTPRequest.URL.Host = newEndpointPrefix + host[len(oldEndpointPrefix):]
+	}
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go
index 7c6221878..1e32fb94d 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go
@@ -455,6 +455,10 @@ type S3API interface {
 	UploadPartCopyWithContext(aws.Context, *s3.UploadPartCopyInput, ...request.Option) (*s3.UploadPartCopyOutput, error)
 	UploadPartCopyRequest(*s3.UploadPartCopyInput) (*request.Request, *s3.UploadPartCopyOutput)
 
+	WriteGetObjectResponse(*s3.WriteGetObjectResponseInput) (*s3.WriteGetObjectResponseOutput, error)
+	WriteGetObjectResponseWithContext(aws.Context, *s3.WriteGetObjectResponseInput, ...request.Option) (*s3.WriteGetObjectResponseOutput, error)
+	WriteGetObjectResponseRequest(*s3.WriteGetObjectResponseInput) (*request.Request, *s3.WriteGetObjectResponseOutput)
+
 	WaitUntilBucketExists(*s3.HeadBucketInput) error
 	WaitUntilBucketExistsWithContext(aws.Context, *s3.HeadBucketInput, ...request.WaiterOption) error
 
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/arn.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/arn.go
new file mode 100644
index 000000000..f0a7f9bfc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/arn.go
@@ -0,0 +1,23 @@
+package s3manager
+
+import (
+	"fmt"
+	"github.com/aws/aws-sdk-go/aws/arn"
+)
+
+func validateSupportedARNType(bucket string) error {
+	if !arn.IsARN(bucket) {
+		return nil
+	}
+
+	parsedARN, err := arn.Parse(bucket)
+	if err != nil {
+		return err
+	}
+
+	if parsedARN.Service == "s3-object-lambda" {
+		return fmt.Errorf("manager does not support s3-object-lambda service ARNs")
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go
index 4b54b7c03..bbf3595a4 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go
@@ -192,6 +192,10 @@ func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ..
 // to perform a single GetObjectInput request for that object's range. This will
 // caused the part size, and concurrency configurations to be ignored.
 func (d Downloader) DownloadWithContext(ctx aws.Context, w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) {
+	if err := validateSupportedARNType(aws.StringValue(input.Bucket)); err != nil {
+		return 0, err
+	}
+
 	impl := downloader{w: w, in: input, cfg: d, ctx: ctx}
 
 	for _, option := range options {
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
index 7dba8347b..9fa98fa2f 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
@@ -391,6 +391,10 @@ func (u *uploader) upload() (*UploadOutput, error) {
 
 // init will initialize all default options.
 func (u *uploader) init() error {
+	if err := validateSupportedARNType(aws.StringValue(u.in.Bucket)); err != nil {
+		return err
+	}
+
 	if u.cfg.Concurrency == 0 {
 		u.cfg.Concurrency = DefaultUploadConcurrency
 	}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go
index ba319d9a6..fb8853710 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go
@@ -29,16 +29,16 @@ type UploadInput struct {
 	// the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
 	// When using this action with an access point through the AWS SDKs, you provide
 	// the access point ARN in place of the bucket name. For more information about
-	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
+	// in the Amazon S3 User Guide.
 	//
 	// When using this action with Amazon S3 on Outposts, you must direct requests
 	// to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
 	// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
 	// using this action using S3 on Outposts through the AWS SDKs, you provide
 	// the Outposts bucket ARN in place of the bucket name. For more information
-	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html)
-	// in the Amazon Simple Storage Service Developer Guide.
+	// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+	// in the Amazon S3 User Guide.
 	//
 	// Bucket is a required field
 	Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -82,7 +82,7 @@ type UploadInput struct {
 	// see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17).
 	ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
 
-	// The account id of the expected bucket owner. If the bucket is owned by a
+	// The account ID of the expected bucket owner. If the bucket is owned by a
 	// different account, the request will fail with an HTTP 403 (Access Denied)
 	// error.
 	ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
diff --git a/vendor/github.com/cheggaaa/pb/v3/element.go b/vendor/github.com/cheggaaa/pb/v3/element.go
index efd636b23..8f6b4bf11 100644
--- a/vendor/github.com/cheggaaa/pb/v3/element.go
+++ b/vendor/github.com/cheggaaa/pb/v3/element.go
@@ -231,6 +231,10 @@ var ElementBar ElementFunc = func(state *State, args ...string) string {
 	return p.buf.String()
 }
 
+func elapsedTime(state *State) time.Duration {
+	return state.Time().Sub(state.StartTime()).Truncate(time.Millisecond)
+}
+
 // ElementRemainingTime calculates remaining time based on speed (EWMA)
 // Optionally can take one or two string arguments.
 // First string will be used as value for format time duration string, default is "%s".
@@ -238,29 +242,23 @@ var ElementBar ElementFunc = func(state *State, args ...string) string {
 // Third string will be used when value not available, default is "?"
 // In template use as follows: {{rtime .}} or {{rtime . "%s remain"}} or {{rtime . "%s remain" "%s total" "???"}}
 var ElementRemainingTime ElementFunc = func(state *State, args ...string) string {
-	var rts string
-	sp := getSpeedObj(state).value(state)
-	if !state.IsFinished() {
-		if sp > 0 {
-			remain := float64(state.Total() - state.Value())
-			remainDur := time.Duration(remain/sp) * time.Second
-			rts = remainDur.String()
-		} else {
-			return argsHelper(args).getOr(2, "?")
-		}
-	} else {
-		rts = state.Time().Truncate(time.Second).Sub(state.StartTime().Truncate(time.Second)).String()
-		return fmt.Sprintf(argsHelper(args).getOr(1, "%s"), rts)
+	if state.IsFinished() {
+		return fmt.Sprintf(argsHelper(args).getOr(1, "%s"), elapsedTime(state))
 	}
-	return fmt.Sprintf(argsHelper(args).getOr(0, "%s"), rts)
+	sp := getSpeedObj(state).value(state)
+	if sp > 0 {
+		remain := float64(state.Total() - state.Value())
+		remainDur := time.Duration(remain/sp) * time.Second
+		return fmt.Sprintf(argsHelper(args).getOr(0, "%s"), remainDur)
+	}
+	return argsHelper(args).getOr(2, "?")
 }
 
 // ElementElapsedTime shows elapsed time
 // Optionally cat take one argument - it's format for time string.
 // In template use as follows: {{etime .}} or {{etime . "%s elapsed"}}
 var ElementElapsedTime ElementFunc = func(state *State, args ...string) string {
-	etm := state.Time().Truncate(time.Second).Sub(state.StartTime().Truncate(time.Second))
-	return fmt.Sprintf(argsHelper(args).getOr(0, "%s"), etm.String())
+	return fmt.Sprintf(argsHelper(args).getOr(0, "%s"), elapsedTime(state))
 }
 
 // ElementString get value from bar by given key and print them
diff --git a/vendor/github.com/cheggaaa/pb/v3/pb.go b/vendor/github.com/cheggaaa/pb/v3/pb.go
index 10e0b4a27..fb546e8a7 100644
--- a/vendor/github.com/cheggaaa/pb/v3/pb.go
+++ b/vendor/github.com/cheggaaa/pb/v3/pb.go
@@ -21,7 +21,7 @@ import (
 )
 
 // Version of ProgressBar library
-const Version = "3.0.6"
+const Version = "3.0.7"
 
 type key int
 
@@ -238,6 +238,12 @@ func (pb *ProgressBar) SetTotal(value int64) *ProgressBar {
 	return pb
 }
 
+// AddTotal adds to the total bar value
+func (pb *ProgressBar) AddTotal(value int64) *ProgressBar {
+	atomic.AddInt64(&pb.total, value)
+	return pb
+}
+
 // SetCurrent sets the current bar value
 func (pb *ProgressBar) SetCurrent(value int64) *ProgressBar {
 	atomic.StoreInt64(&pb.current, value)
diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go
index 1e7ff6420..066b4323b 100644
--- a/vendor/github.com/golang/protobuf/proto/registry.go
+++ b/vendor/github.com/golang/protobuf/proto/registry.go
@@ -13,6 +13,7 @@ import (
 	"strings"
 	"sync"
 
+	"google.golang.org/protobuf/reflect/protodesc"
 	"google.golang.org/protobuf/reflect/protoreflect"
 	"google.golang.org/protobuf/reflect/protoregistry"
 	"google.golang.org/protobuf/runtime/protoimpl"
@@ -62,14 +63,7 @@ func FileDescriptor(s filePath) fileDescGZIP {
 	// Find the descriptor in the v2 registry.
 	var b []byte
 	if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
-		if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok {
-			b = fd.ProtoLegacyRawDesc()
-		} else {
-			// TODO: Use protodesc.ToFileDescriptorProto to construct
-			// a descriptorpb.FileDescriptorProto and marshal it.
-			// However, doing so causes the proto package to have a dependency
-			// on descriptorpb, leading to cyclic dependency issues.
-		}
+		b, _ = Marshal(protodesc.ToFileDescriptorProto(fd))
 	}
 
 	// Locally cache the raw descriptor form for the file.
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
index e729dcff1..85f9f5736 100644
--- a/vendor/github.com/golang/protobuf/ptypes/any.go
+++ b/vendor/github.com/golang/protobuf/ptypes/any.go
@@ -19,6 +19,8 @@ const urlPrefix = "type.googleapis.com/"
 
 // AnyMessageName returns the message name contained in an anypb.Any message.
 // Most type assertions should use the Is function instead.
+//
+// Deprecated: Call the any.MessageName method instead.
 func AnyMessageName(any *anypb.Any) (string, error) {
 	name, err := anyMessageName(any)
 	return string(name), err
@@ -38,6 +40,8 @@ func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
 }
 
 // MarshalAny marshals the given message m into an anypb.Any message.
+//
+// Deprecated: Call the anypb.New function instead.
 func MarshalAny(m proto.Message) (*anypb.Any, error) {
 	switch dm := m.(type) {
 	case DynamicAny:
@@ -58,6 +62,9 @@ func MarshalAny(m proto.Message) (*anypb.Any, error) {
 // Empty returns a new message of the type specified in an anypb.Any message.
 // It returns protoregistry.NotFound if the corresponding message type could not
 // be resolved in the global registry.
+//
+// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead
+// to resolve the message name and create a new instance of it.
 func Empty(any *anypb.Any) (proto.Message, error) {
 	name, err := anyMessageName(any)
 	if err != nil {
@@ -76,6 +83,8 @@ func Empty(any *anypb.Any) (proto.Message, error) {
 //
 // The target message m may be a *DynamicAny message. If the underlying message
 // type could not be resolved, then this returns protoregistry.NotFound.
+//
+// Deprecated: Call the any.UnmarshalTo method instead.
 func UnmarshalAny(any *anypb.Any, m proto.Message) error {
 	if dm, ok := m.(*DynamicAny); ok {
 		if dm.Message == nil {
@@ -100,6 +109,8 @@ func UnmarshalAny(any *anypb.Any, m proto.Message) error {
 }
 
 // Is reports whether the Any message contains a message of the specified type.
+//
+// Deprecated: Call the any.MessageIs method instead.
 func Is(any *anypb.Any, m proto.Message) bool {
 	if any == nil || m == nil {
 		return false
@@ -119,6 +130,9 @@ func Is(any *anypb.Any, m proto.Message) bool {
 //   var x ptypes.DynamicAny
 //   if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
 //   fmt.Printf("unmarshaled message: %v", x.Message)
+//
+// Deprecated: Use the any.UnmarshalNew method instead to unmarshal
+// the any message contents into a new instance of the underlying message.
 type DynamicAny struct{ proto.Message }
 
 func (m DynamicAny) String() string {
diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go
index fb9edd5c6..d3c33259d 100644
--- a/vendor/github.com/golang/protobuf/ptypes/doc.go
+++ b/vendor/github.com/golang/protobuf/ptypes/doc.go
@@ -3,4 +3,8 @@
 // license that can be found in the LICENSE file.
 
 // Package ptypes provides functionality for interacting with well-known types.
+//
+// Deprecated: Well-known types have specialized functionality directly
+// injected into the generated packages for each message type.
+// See the deprecation notice for each function for the suggested alternative.
 package ptypes
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go
index 6110ae8a4..b2b55dd85 100644
--- a/vendor/github.com/golang/protobuf/ptypes/duration.go
+++ b/vendor/github.com/golang/protobuf/ptypes/duration.go
@@ -21,6 +21,8 @@ const (
 
 // Duration converts a durationpb.Duration to a time.Duration.
 // Duration returns an error if dur is invalid or overflows a time.Duration.
+//
+// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead.
 func Duration(dur *durationpb.Duration) (time.Duration, error) {
 	if err := validateDuration(dur); err != nil {
 		return 0, err
@@ -39,6 +41,8 @@ func Duration(dur *durationpb.Duration) (time.Duration, error) {
 }
 
 // DurationProto converts a time.Duration to a durationpb.Duration.
+//
+// Deprecated: Call the durationpb.New function instead.
 func DurationProto(d time.Duration) *durationpb.Duration {
 	nanos := d.Nanoseconds()
 	secs := nanos / 1e9
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
index 026d0d491..8368a3f70 100644
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
@@ -33,6 +33,8 @@ const (
 //
 // A nil Timestamp returns an error. The first return value in that case is
 // undefined.
+//
+// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead.
 func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
 	// Don't return the zero value on error, because corresponds to a valid
 	// timestamp. Instead return whatever time.Unix gives us.
@@ -46,6 +48,8 @@ func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
 }
 
 // TimestampNow returns a google.protobuf.Timestamp for the current time.
+//
+// Deprecated: Call the timestamppb.Now function instead.
 func TimestampNow() *timestamppb.Timestamp {
 	ts, err := TimestampProto(time.Now())
 	if err != nil {
@@ -56,6 +60,8 @@ func TimestampNow() *timestamppb.Timestamp {
 
 // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
 // It returns an error if the resulting Timestamp is invalid.
+//
+// Deprecated: Call the timestamppb.New function instead.
 func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
 	ts := &timestamppb.Timestamp{
 		Seconds: t.Unix(),
@@ -69,6 +75,9 @@ func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
 
 // TimestampString returns the RFC 3339 string for valid Timestamps.
 // For invalid Timestamps, it returns an error message in parentheses.
+//
+// Deprecated: Call the ts.AsTime method instead,
+// followed by a call to the Format method on the time.Time value.
 func TimestampString(ts *timestamppb.Timestamp) string {
 	t, err := Timestamp(ts)
 	if err != nil {
diff --git a/vendor/github.com/lithammer/go-jump-consistent-hash/.travis.yml b/vendor/github.com/lithammer/go-jump-consistent-hash/.travis.yml
deleted file mode 100644
index f9971eeb1..000000000
--- a/vendor/github.com/lithammer/go-jump-consistent-hash/.travis.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-language: go
-
-go:
-  - '1.6'
-  - '1.8'
-  - '1.10'
-  - '1.12'
-
-sudo: false
diff --git a/vendor/github.com/lithammer/go-jump-consistent-hash/README.md b/vendor/github.com/lithammer/go-jump-consistent-hash/README.md
index 3865b47fe..38c171d9a 100644
--- a/vendor/github.com/lithammer/go-jump-consistent-hash/README.md
+++ b/vendor/github.com/lithammer/go-jump-consistent-hash/README.md
@@ -1,6 +1,6 @@
 # Jump Consistent Hash
 
-[![Build Status](https://travis-ci.org/lithammer/go-jump-consistent-hash.svg?branch=master)](https://travis-ci.org/lithammer/go-jump-consistent-hash)
+[![Build Status](https://github.com/lithammer/go-jump-consistent-hash/workflows/Go/badge.svg)](https://github.com/lithammer/go-jump-consistent-hash/actions)
 [![Godoc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/lithammer/go-jump-consistent-hash)
 
 Go implementation of the jump consistent hash algorithm[1] by John Lamping and Eric Veach.
diff --git a/vendor/github.com/lithammer/go-jump-consistent-hash/go.mod b/vendor/github.com/lithammer/go-jump-consistent-hash/go.mod
new file mode 100644
index 000000000..b42bd5632
--- /dev/null
+++ b/vendor/github.com/lithammer/go-jump-consistent-hash/go.mod
@@ -0,0 +1,3 @@
+module github.com/lithammer/go-jump-consistent-hash
+
+go 1.16
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info.go
deleted file mode 100644
index 288f0e854..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/build_info.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build go1.12
-
-package prometheus
-
-import "runtime/debug"
-
-// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go 1.12+.
-func readBuildInfo() (path, version, sum string) {
-	path, version, sum = "unknown", "unknown", "unknown"
-	if bi, ok := debug.ReadBuildInfo(); ok {
-		path = bi.Main.Path
-		version = bi.Main.Version
-		sum = bi.Main.Sum
-	}
-	return
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go
deleted file mode 100644
index 6609e2877..000000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !go1.12
-
-package prometheus
-
-// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go versions before
-// 1.12. Remove this whole file once the minimum supported Go version is 1.12.
-func readBuildInfo() (path, version, sum string) {
-	return "unknown", "unknown", "unknown"
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
index 957d93a2d..4bb816ab7 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -20,7 +20,7 @@ import (
 	"strings"
 
 	"github.com/cespare/xxhash/v2"
-	//lint:ignore SA1019 Need to keep deprecated package for compatibility.
+	//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
 	"github.com/golang/protobuf/proto"
 	"github.com/prometheus/common/model"
 
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
index 6f67d1046..db43ca5ba 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -384,7 +384,12 @@ type memStatsMetrics []struct {
 // https://github.com/povilasv/prommod for an example of a collector for the
 // module dependencies.
 func NewBuildInfoCollector() Collector {
-	path, version, sum := readBuildInfo()
+	path, version, sum := "unknown", "unknown", "unknown"
+	if bi, ok := debug.ReadBuildInfo(); ok {
+		path = bi.Main.Path
+		version = bi.Main.Version
+		sum = bi.Main.Sum
+	}
 	c := &selfCollector{MustNewConstMetric(
 		NewDesc(
 			"go_build_info",
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
index f71e286be..3346fa1c5 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -22,7 +22,7 @@ import (
 	"sync/atomic"
 	"time"
 
-	//lint:ignore SA1019 Need to keep deprecated package for compatibility.
+	//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
 	"github.com/golang/protobuf/proto"
 
 	dto "github.com/prometheus/client_model/go"
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
index a2b80b1c1..dc121910a 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -17,7 +17,7 @@ import (
 	"strings"
 	"time"
 
-	//lint:ignore SA1019 Need to keep deprecated package for compatibility.
+	//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
 	"github.com/golang/protobuf/proto"
 	"github.com/prometheus/common/model"
 
@@ -58,7 +58,7 @@ type Metric interface {
 }
 
 // Opts bundles the options for creating most Metric types. Each metric
-// implementation XXX has its own XXXOpts type, but in most cases, it is just be
+// implementation XXX has its own XXXOpts type, but in most cases, it is just
 // an alias of this type (which might change when the requirement arises.)
 //
 // It is mandatory to set Name to a non-empty string. All other fields are
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
index 48f5ef9d7..383a7f594 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -26,7 +26,7 @@ import (
 	"unicode/utf8"
 
 	"github.com/cespare/xxhash/v2"
-	//lint:ignore SA1019 Need to keep deprecated package for compatibility.
+	//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
 	"github.com/golang/protobuf/proto"
 	"github.com/prometheus/common/expfmt"
 
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
index cf7007149..fb5ce22bf 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -23,7 +23,7 @@ import (
 	"time"
 
 	"github.com/beorn7/perks/quantile"
-	//lint:ignore SA1019 Need to keep deprecated package for compatibility.
+	//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
 	"github.com/golang/protobuf/proto"
 
 	dto "github.com/prometheus/client_model/go"
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
index 8304de477..c778711b8 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/value.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -19,7 +19,7 @@ import (
 	"time"
 	"unicode/utf8"
 
-	//lint:ignore SA1019 Need to keep deprecated package for compatibility.
+	//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
 	"github.com/golang/protobuf/proto"
 	"github.com/golang/protobuf/ptypes"
 
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
index 6ba49d85b..4ababe6c9 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -167,8 +167,8 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
 // calling the newMetric function provided during construction of the
 // MetricVec).
 //
-// It is possible to call this method without using the returned Metry to only
-// create the new Metric but leave it in its intitial state.
+// It is possible to call this method without using the returned Metric to only
+// create the new Metric but leave it in its initial state.
 //
 // Keeping the Metric for later use is possible (and should be considered if
 // performance is critical), but keep in mind that Reset, DeleteLabelValues and
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
index c1b12f084..74ee93280 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
@@ -17,7 +17,7 @@ import (
 	"fmt"
 	"sort"
 
-	//lint:ignore SA1019 Need to keep deprecated package for compatibility.
+	//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
 	"github.com/golang/protobuf/proto"
 
 	dto "github.com/prometheus/client_model/go"
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
index e684ebd11..7f67b16e4 100644
--- a/vendor/github.com/prometheus/common/model/time.go
+++ b/vendor/github.com/prometheus/common/model/time.go
@@ -15,6 +15,7 @@ package model
 
 import (
 	"encoding/json"
+	"errors"
 	"fmt"
 	"math"
 	"regexp"
@@ -202,13 +203,23 @@ func ParseDuration(durationStr string) (Duration, error) {
 
 	// Parse the match at pos `pos` in the regex and use `mult` to turn that
 	// into ms, then add that value to the total parsed duration.
+	var overflowErr error
 	m := func(pos int, mult time.Duration) {
 		if matches[pos] == "" {
 			return
 		}
 		n, _ := strconv.Atoi(matches[pos])
+
+		// Check if the provided duration overflows time.Duration (> ~ 290years).
+		if n > int((1<<63-1)/mult/time.Millisecond) {
+			overflowErr = errors.New("duration out of range")
+		}
 		d := time.Duration(n) * time.Millisecond
 		dur += d * mult
+
+		if dur < 0 {
+			overflowErr = errors.New("duration out of range")
+		}
 	}
 
 	m(2, 1000*60*60*24*365) // y
@@ -219,7 +230,7 @@ func ParseDuration(durationStr string) (Duration, error) {
 	m(12, 1000)             // s
 	m(14, 1)                // ms
 
-	return Duration(dur), nil
+	return Duration(dur), overflowErr
 }
 
 func (d Duration) String() string {
diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh
index d257fac50..d727cad19 100644
--- a/vendor/golang.org/x/sys/unix/mkall.sh
+++ b/vendor/golang.org/x/sys/unix/mkall.sh
@@ -199,7 +199,7 @@ illumos_amd64)
         mksyscall="go run mksyscall_solaris.go"
 	mkerrors=
 	mksysnum=
-	mktypes=
+	mktypes="GOARCH=$GOARCH go tool cgo -godefs"
 	;;
 *)
 	echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
index 87f9d1e35..086d69411 100644
--- a/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -56,6 +56,7 @@ includes_Darwin='
 #define _DARWIN_C_SOURCE
 #define KERNEL
 #define _DARWIN_USE_64_BIT_INODE
+#define __APPLE_USE_RFC_3542
 #include <stdint.h>
 #include <sys/attr.h>
 #include <sys/clonefile.h>
@@ -216,6 +217,7 @@ struct ltchars {
 #include <linux/genetlink.h>
 #include <linux/hdreg.h>
 #include <linux/hidraw.h>
+#include <linux/icmp.h>
 #include <linux/icmpv6.h>
 #include <linux/if.h>
 #include <linux/if_addr.h>
@@ -500,7 +502,7 @@ ccflags="$@"
 		$2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL)_/ ||
 		$2 ~ /^TP_STATUS_/ ||
 		$2 ~ /^FALLOC_/ ||
-		$2 == "ICMPV6_FILTER" ||
+		$2 ~ /^ICMP(V6)?_FILTER$/ ||
 		$2 == "SOMAXCONN" ||
 		$2 == "NAME_MAX" ||
 		$2 == "IFNAMSIZ" ||
diff --git a/vendor/golang.org/x/sys/unix/syscall_illumos.go b/vendor/golang.org/x/sys/unix/syscall_illumos.go
index bc442e3ba..c5c58806c 100644
--- a/vendor/golang.org/x/sys/unix/syscall_illumos.go
+++ b/vendor/golang.org/x/sys/unix/syscall_illumos.go
@@ -9,7 +9,9 @@
 
 package unix
 
-import "unsafe"
+import (
+	"unsafe"
+)
 
 func bytes2iovec(bs [][]byte) []Iovec {
 	iovecs := make([]Iovec, len(bs))
@@ -76,3 +78,52 @@ func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) {
 	}
 	return
 }
+
+//sys	putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error)
+
+func Putmsg(fd int, cl []byte, data []byte, flags int) (err error) {
+	var clp, datap *strbuf
+	if len(cl) > 0 {
+		clp = &strbuf{
+			Len: int32(len(cl)),
+			Buf: (*int8)(unsafe.Pointer(&cl[0])),
+		}
+	}
+	if len(data) > 0 {
+		datap = &strbuf{
+			Len: int32(len(data)),
+			Buf: (*int8)(unsafe.Pointer(&data[0])),
+		}
+	}
+	return putmsg(fd, clp, datap, flags)
+}
+
+//sys	getmsg(fd int, clptr *strbuf, dataptr *strbuf, flags *int) (err error)
+
+func Getmsg(fd int, cl []byte, data []byte) (retCl []byte, retData []byte, flags int, err error) {
+	var clp, datap *strbuf
+	if len(cl) > 0 {
+		clp = &strbuf{
+			Maxlen: int32(len(cl)),
+			Buf:    (*int8)(unsafe.Pointer(&cl[0])),
+		}
+	}
+	if len(data) > 0 {
+		datap = &strbuf{
+			Maxlen: int32(len(data)),
+			Buf:    (*int8)(unsafe.Pointer(&data[0])),
+		}
+	}
+
+	if err = getmsg(fd, clp, datap, &flags); err != nil {
+		return nil, nil, 0, err
+	}
+
+	if len(cl) > 0 {
+		retCl = cl[:clp.Len]
+	}
+	if len(data) > 0 {
+		retData = data[:datap.Len]
+	}
+	return retCl, retData, flags, nil
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go
index a941d8881..a1e45694b 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go
@@ -250,7 +250,7 @@ func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen
 }
 
 func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) error {
-	args := [4]uintptr{uintptr(s), uintptr(level), uintptr(name), uintptr(val)}
+	args := [5]uintptr{uintptr(s), uintptr(level), uintptr(name), uintptr(val), vallen}
 	_, _, err := Syscall(SYS_SOCKETCALL, netSetSockOpt, uintptr(unsafe.Pointer(&args)), 0)
 	if err != 0 {
 		return err
diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go
index 169497f06..77fcde7c1 100644
--- a/vendor/golang.org/x/sys/unix/syscall_solaris.go
+++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go
@@ -565,7 +565,12 @@ func Minor(dev uint64) uint32 {
  * Expose the ioctl function
  */
 
-//sys	ioctl(fd int, req uint, arg uintptr) (err error)
+//sys	ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) = libc.ioctl
+
+func ioctl(fd int, req uint, arg uintptr) (err error) {
+	_, err = ioctlRet(fd, req, arg)
+	return err
+}
 
 func IoctlSetTermio(fd int, req uint, value *Termio) error {
 	err := ioctl(fd, req, uintptr(unsafe.Pointer(value)))
diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go
index 0100cb12f..991996b60 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go
@@ -776,15 +776,24 @@ const (
 	IPV6_2292PKTINFO                  = 0x13
 	IPV6_2292PKTOPTIONS               = 0x19
 	IPV6_2292RTHDR                    = 0x18
+	IPV6_3542DSTOPTS                  = 0x32
+	IPV6_3542HOPLIMIT                 = 0x2f
+	IPV6_3542HOPOPTS                  = 0x31
+	IPV6_3542NEXTHOP                  = 0x30
+	IPV6_3542PKTINFO                  = 0x2e
+	IPV6_3542RTHDR                    = 0x33
 	IPV6_ADDR_MC_FLAGS_PREFIX         = 0x20
 	IPV6_ADDR_MC_FLAGS_TRANSIENT      = 0x10
 	IPV6_ADDR_MC_FLAGS_UNICAST_BASED  = 0x30
+	IPV6_AUTOFLOWLABEL                = 0x3b
 	IPV6_BINDV6ONLY                   = 0x1b
 	IPV6_BOUND_IF                     = 0x7d
 	IPV6_CHECKSUM                     = 0x1a
 	IPV6_DEFAULT_MULTICAST_HOPS       = 0x1
 	IPV6_DEFAULT_MULTICAST_LOOP       = 0x1
 	IPV6_DEFHLIM                      = 0x40
+	IPV6_DONTFRAG                     = 0x3e
+	IPV6_DSTOPTS                      = 0x32
 	IPV6_FAITH                        = 0x1d
 	IPV6_FLOWINFO_MASK                = 0xffffff0f
 	IPV6_FLOWLABEL_MASK               = 0xffff0f00
@@ -796,6 +805,8 @@ const (
 	IPV6_FW_GET                       = 0x22
 	IPV6_FW_ZERO                      = 0x21
 	IPV6_HLIMDEC                      = 0x1
+	IPV6_HOPLIMIT                     = 0x2f
+	IPV6_HOPOPTS                      = 0x31
 	IPV6_IPSEC_POLICY                 = 0x1c
 	IPV6_JOIN_GROUP                   = 0xc
 	IPV6_LEAVE_GROUP                  = 0xd
@@ -807,20 +818,34 @@ const (
 	IPV6_MAX_SOCK_SRC_FILTER          = 0x80
 	IPV6_MIN_MEMBERSHIPS              = 0x1f
 	IPV6_MMTU                         = 0x500
+	IPV6_MSFILTER                     = 0x4a
 	IPV6_MULTICAST_HOPS               = 0xa
 	IPV6_MULTICAST_IF                 = 0x9
 	IPV6_MULTICAST_LOOP               = 0xb
+	IPV6_NEXTHOP                      = 0x30
+	IPV6_PATHMTU                      = 0x2c
+	IPV6_PKTINFO                      = 0x2e
 	IPV6_PORTRANGE                    = 0xe
 	IPV6_PORTRANGE_DEFAULT            = 0x0
 	IPV6_PORTRANGE_HIGH               = 0x1
 	IPV6_PORTRANGE_LOW                = 0x2
+	IPV6_PREFER_TEMPADDR              = 0x3f
+	IPV6_RECVDSTOPTS                  = 0x28
+	IPV6_RECVHOPLIMIT                 = 0x25
+	IPV6_RECVHOPOPTS                  = 0x27
+	IPV6_RECVPATHMTU                  = 0x2b
+	IPV6_RECVPKTINFO                  = 0x3d
+	IPV6_RECVRTHDR                    = 0x26
 	IPV6_RECVTCLASS                   = 0x23
+	IPV6_RTHDR                        = 0x33
+	IPV6_RTHDRDSTOPTS                 = 0x39
 	IPV6_RTHDR_LOOSE                  = 0x0
 	IPV6_RTHDR_STRICT                 = 0x1
 	IPV6_RTHDR_TYPE_0                 = 0x0
 	IPV6_SOCKOPT_RESERVED1            = 0x3
 	IPV6_TCLASS                       = 0x24
 	IPV6_UNICAST_HOPS                 = 0x4
+	IPV6_USE_MIN_MTU                  = 0x2a
 	IPV6_V6ONLY                       = 0x1b
 	IPV6_VERSION                      = 0x60
 	IPV6_VERSION_MASK                 = 0xf0
diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go
index df26a1968..e644eaf5e 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go
@@ -776,15 +776,24 @@ const (
 	IPV6_2292PKTINFO                  = 0x13
 	IPV6_2292PKTOPTIONS               = 0x19
 	IPV6_2292RTHDR                    = 0x18
+	IPV6_3542DSTOPTS                  = 0x32
+	IPV6_3542HOPLIMIT                 = 0x2f
+	IPV6_3542HOPOPTS                  = 0x31
+	IPV6_3542NEXTHOP                  = 0x30
+	IPV6_3542PKTINFO                  = 0x2e
+	IPV6_3542RTHDR                    = 0x33
 	IPV6_ADDR_MC_FLAGS_PREFIX         = 0x20
 	IPV6_ADDR_MC_FLAGS_TRANSIENT      = 0x10
 	IPV6_ADDR_MC_FLAGS_UNICAST_BASED  = 0x30
+	IPV6_AUTOFLOWLABEL                = 0x3b
 	IPV6_BINDV6ONLY                   = 0x1b
 	IPV6_BOUND_IF                     = 0x7d
 	IPV6_CHECKSUM                     = 0x1a
 	IPV6_DEFAULT_MULTICAST_HOPS       = 0x1
 	IPV6_DEFAULT_MULTICAST_LOOP       = 0x1
 	IPV6_DEFHLIM                      = 0x40
+	IPV6_DONTFRAG                     = 0x3e
+	IPV6_DSTOPTS                      = 0x32
 	IPV6_FAITH                        = 0x1d
 	IPV6_FLOWINFO_MASK                = 0xffffff0f
 	IPV6_FLOWLABEL_MASK               = 0xffff0f00
@@ -796,6 +805,8 @@ const (
 	IPV6_FW_GET                       = 0x22
 	IPV6_FW_ZERO                      = 0x21
 	IPV6_HLIMDEC                      = 0x1
+	IPV6_HOPLIMIT                     = 0x2f
+	IPV6_HOPOPTS                      = 0x31
 	IPV6_IPSEC_POLICY                 = 0x1c
 	IPV6_JOIN_GROUP                   = 0xc
 	IPV6_LEAVE_GROUP                  = 0xd
@@ -807,20 +818,34 @@ const (
 	IPV6_MAX_SOCK_SRC_FILTER          = 0x80
 	IPV6_MIN_MEMBERSHIPS              = 0x1f
 	IPV6_MMTU                         = 0x500
+	IPV6_MSFILTER                     = 0x4a
 	IPV6_MULTICAST_HOPS               = 0xa
 	IPV6_MULTICAST_IF                 = 0x9
 	IPV6_MULTICAST_LOOP               = 0xb
+	IPV6_NEXTHOP                      = 0x30
+	IPV6_PATHMTU                      = 0x2c
+	IPV6_PKTINFO                      = 0x2e
 	IPV6_PORTRANGE                    = 0xe
 	IPV6_PORTRANGE_DEFAULT            = 0x0
 	IPV6_PORTRANGE_HIGH               = 0x1
 	IPV6_PORTRANGE_LOW                = 0x2
+	IPV6_PREFER_TEMPADDR              = 0x3f
+	IPV6_RECVDSTOPTS                  = 0x28
+	IPV6_RECVHOPLIMIT                 = 0x25
+	IPV6_RECVHOPOPTS                  = 0x27
+	IPV6_RECVPATHMTU                  = 0x2b
+	IPV6_RECVPKTINFO                  = 0x3d
+	IPV6_RECVRTHDR                    = 0x26
 	IPV6_RECVTCLASS                   = 0x23
+	IPV6_RTHDR                        = 0x33
+	IPV6_RTHDRDSTOPTS                 = 0x39
 	IPV6_RTHDR_LOOSE                  = 0x0
 	IPV6_RTHDR_STRICT                 = 0x1
 	IPV6_RTHDR_TYPE_0                 = 0x0
 	IPV6_SOCKOPT_RESERVED1            = 0x3
 	IPV6_TCLASS                       = 0x24
 	IPV6_UNICAST_HOPS                 = 0x4
+	IPV6_USE_MIN_MTU                  = 0x2a
 	IPV6_V6ONLY                       = 0x1b
 	IPV6_VERSION                      = 0x60
 	IPV6_VERSION_MASK                 = 0xf0
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go
index 504dd6cd2..3b1b9287b 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -974,6 +974,7 @@ const (
 	HUGETLBFS_MAGIC                             = 0x958458f6
 	IBSHIFT                                     = 0x10
 	ICMPV6_FILTER                               = 0x1
+	ICMP_FILTER                                 = 0x1
 	ICRNL                                       = 0x100
 	IFA_F_DADFAILED                             = 0x8
 	IFA_F_DEPRECATED                            = 0x20
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go
index b57c7050d..af5cb064e 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go
@@ -15,19 +15,25 @@ import (
 //go:cgo_import_dynamic libc_writev writev "libc.so"
 //go:cgo_import_dynamic libc_pwritev pwritev "libc.so"
 //go:cgo_import_dynamic libc_accept4 accept4 "libsocket.so"
+//go:cgo_import_dynamic libc_putmsg putmsg "libc.so"
+//go:cgo_import_dynamic libc_getmsg getmsg "libc.so"
 
 //go:linkname procreadv libc_readv
 //go:linkname procpreadv libc_preadv
 //go:linkname procwritev libc_writev
 //go:linkname procpwritev libc_pwritev
 //go:linkname procaccept4 libc_accept4
+//go:linkname procputmsg libc_putmsg
+//go:linkname procgetmsg libc_getmsg
 
 var (
 	procreadv,
 	procpreadv,
 	procwritev,
 	procpwritev,
-	procaccept4 syscallFunc
+	procaccept4,
+	procputmsg,
+	procgetmsg syscallFunc
 )
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
@@ -100,3 +106,23 @@ func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int,
 	}
 	return
 }
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) {
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procputmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getmsg(fd int, clptr *strbuf, dataptr *strbuf, flags *int) (err error) {
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(unsafe.Pointer(flags)), 0, 0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
index 7099f555a..4e18d5c99 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
@@ -619,8 +619,9 @@ func __minor(version int, dev uint64) (val uint) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func ioctl(fd int, req uint, arg uintptr) (err error) {
-	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0)
+func ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) {
+	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0)
+	ret = int(r0)
 	if e1 != 0 {
 		err = e1
 	}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go
new file mode 100644
index 000000000..1137a5a1f
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go
@@ -0,0 +1,40 @@
+// cgo -godefs types_illumos.go | go run mkpost.go
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
+//go:build amd64 && illumos
+// +build amd64,illumos
+
+package unix
+
+const (
+	TUNNEWPPA = 0x540001
+	TUNSETPPA = 0x540002
+
+	I_STR     = 0x5308
+	I_POP     = 0x5303
+	I_PUSH    = 0x5302
+	I_PLINK   = 0x5316
+	I_PUNLINK = 0x5317
+
+	IF_UNITSEL = -0x7ffb8cca
+)
+
+type strbuf struct {
+	Maxlen int32
+	Len    int32
+	Buf    *int8
+}
+
+type strioctl struct {
+	Cmd    int32
+	Timout int32
+	Len    int32
+	Dp     *int8
+}
+
+type lifreq struct {
+	Name   [32]int8
+	Lifru1 [4]byte
+	Type   uint32
+	Lifru  [336]byte
+}
diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go
index 6f3f7e6dd..50799301f 100644
--- a/vendor/google.golang.org/api/storage/v1/storage-gen.go
+++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go
@@ -2449,7 +2449,7 @@ func (c *BucketAccessControlsDeleteCall) Header() http.Header {
 
 func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -2597,7 +2597,7 @@ func (c *BucketAccessControlsGetCall) Header() http.Header {
 
 func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -2764,7 +2764,7 @@ func (c *BucketAccessControlsInsertCall) Header() http.Header {
 
 func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -2937,7 +2937,7 @@ func (c *BucketAccessControlsListCall) Header() http.Header {
 
 func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -3098,7 +3098,7 @@ func (c *BucketAccessControlsPatchCall) Header() http.Header {
 
 func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -3272,7 +3272,7 @@ func (c *BucketAccessControlsUpdateCall) Header() http.Header {
 
 func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -3458,7 +3458,7 @@ func (c *BucketsDeleteCall) Header() http.Header {
 
 func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -3637,7 +3637,7 @@ func (c *BucketsGetCall) Header() http.Header {
 
 func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -3843,7 +3843,7 @@ func (c *BucketsGetIamPolicyCall) Header() http.Header {
 
 func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -4060,7 +4060,7 @@ func (c *BucketsInsertCall) Header() http.Header {
 
 func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -4317,7 +4317,7 @@ func (c *BucketsListCall) Header() http.Header {
 
 func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -4527,7 +4527,7 @@ func (c *BucketsLockRetentionPolicyCall) Header() http.Header {
 
 func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -4762,7 +4762,7 @@ func (c *BucketsPatchCall) Header() http.Header {
 
 func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -4991,7 +4991,7 @@ func (c *BucketsSetIamPolicyCall) Header() http.Header {
 
 func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -5166,7 +5166,7 @@ func (c *BucketsTestIamPermissionsCall) Header() http.Header {
 
 func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -5406,7 +5406,7 @@ func (c *BucketsUpdateCall) Header() http.Header {
 
 func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -5618,7 +5618,7 @@ func (c *ChannelsStopCall) Header() http.Header {
 
 func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -5735,7 +5735,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header {
 
 func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -5883,7 +5883,7 @@ func (c *DefaultObjectAccessControlsGetCall) Header() http.Header {
 
 func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -6051,7 +6051,7 @@ func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header {
 
 func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -6241,7 +6241,7 @@ func (c *DefaultObjectAccessControlsListCall) Header() http.Header {
 
 func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -6414,7 +6414,7 @@ func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header {
 
 func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -6588,7 +6588,7 @@ func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header {
 
 func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -6760,7 +6760,7 @@ func (c *NotificationsDeleteCall) Header() http.Header {
 
 func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -6908,7 +6908,7 @@ func (c *NotificationsGetCall) Header() http.Header {
 
 func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -7078,7 +7078,7 @@ func (c *NotificationsInsertCall) Header() http.Header {
 
 func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -7253,7 +7253,7 @@ func (c *NotificationsListCall) Header() http.Header {
 
 func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -7426,7 +7426,7 @@ func (c *ObjectAccessControlsDeleteCall) Header() http.Header {
 
 func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -7598,7 +7598,7 @@ func (c *ObjectAccessControlsGetCall) Header() http.Header {
 
 func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -7789,7 +7789,7 @@ func (c *ObjectAccessControlsInsertCall) Header() http.Header {
 
 func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -7986,7 +7986,7 @@ func (c *ObjectAccessControlsListCall) Header() http.Header {
 
 func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -8171,7 +8171,7 @@ func (c *ObjectAccessControlsPatchCall) Header() http.Header {
 
 func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -8369,7 +8369,7 @@ func (c *ObjectAccessControlsUpdateCall) Header() http.Header {
 
 func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -8606,7 +8606,7 @@ func (c *ObjectsComposeCall) Header() http.Header {
 
 func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -8949,7 +8949,7 @@ func (c *ObjectsCopyCall) Header() http.Header {
 
 func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -9277,7 +9277,7 @@ func (c *ObjectsDeleteCall) Header() http.Header {
 
 func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -9510,7 +9510,7 @@ func (c *ObjectsGetCall) Header() http.Header {
 
 func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -9760,7 +9760,7 @@ func (c *ObjectsGetIamPolicyCall) Header() http.Header {
 
 func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -10077,7 +10077,7 @@ func (c *ObjectsInsertCall) Header() http.Header {
 
 func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -10450,7 +10450,7 @@ func (c *ObjectsListCall) Header() http.Header {
 
 func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -10767,7 +10767,7 @@ func (c *ObjectsPatchCall) Header() http.Header {
 
 func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -11159,7 +11159,7 @@ func (c *ObjectsRewriteCall) Header() http.Header {
 
 func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -11462,7 +11462,7 @@ func (c *ObjectsSetIamPolicyCall) Header() http.Header {
 
 func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -11662,7 +11662,7 @@ func (c *ObjectsTestIamPermissionsCall) Header() http.Header {
 
 func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -11923,7 +11923,7 @@ func (c *ObjectsUpdateCall) Header() http.Header {
 
 func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -12241,7 +12241,7 @@ func (c *ObjectsWatchAllCall) Header() http.Header {
 
 func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -12457,7 +12457,7 @@ func (c *ProjectsHmacKeysCreateCall) Header() http.Header {
 
 func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -12607,7 +12607,7 @@ func (c *ProjectsHmacKeysDeleteCall) Header() http.Header {
 
 func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -12742,7 +12742,7 @@ func (c *ProjectsHmacKeysGetCall) Header() http.Header {
 
 func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -12942,7 +12942,7 @@ func (c *ProjectsHmacKeysListCall) Header() http.Header {
 
 func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -13137,7 +13137,7 @@ func (c *ProjectsHmacKeysUpdateCall) Header() http.Header {
 
 func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
@@ -13314,7 +13314,7 @@ func (c *ProjectsServiceAccountGetCall) Header() http.Header {
 
 func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
-	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210314")
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210322")
 	for k, v := range c.header_ {
 		reqHeaders[k] = v
 	}
diff --git a/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go b/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go
index 8d283a3c1..cfe334201 100644
--- a/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2020 Google LLC
+// Copyright 2021 Google LLC
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@
 
 // Code generated by protoc-gen-go. DO NOT EDIT.
 // versions:
-// 	protoc-gen-go v1.25.0
+// 	protoc-gen-go v1.25.0-devel
 // 	protoc        v3.13.0
 // source: google/type/expr.proto
 
diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go
index b2e3930fb..6338c44dc 100644
--- a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go
+++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go
@@ -17,7 +17,6 @@ import (
 	"unicode/utf8"
 
 	"google.golang.org/protobuf/compiler/protogen"
-	"google.golang.org/protobuf/internal/encoding/messageset"
 	"google.golang.org/protobuf/internal/encoding/tag"
 	"google.golang.org/protobuf/internal/genid"
 	"google.golang.org/protobuf/internal/version"
@@ -58,7 +57,6 @@ var (
 	protojsonPackage     goImportPath = protogen.GoImportPath("google.golang.org/protobuf/encoding/protojson")
 	protoreflectPackage  goImportPath = protogen.GoImportPath("google.golang.org/protobuf/reflect/protoreflect")
 	protoregistryPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/reflect/protoregistry")
-	protoV1Package       goImportPath = protogen.GoImportPath("github.com/golang/protobuf/proto")
 )
 
 type goImportPath interface {
@@ -89,12 +87,6 @@ func GenerateFile(gen *protogen.Plugin, file *protogen.File) *protogen.Generated
 		g.P("_ = ", protoimplPackage.Ident("EnforceVersion"), "(", protoimplPackage.Ident("MaxVersion"), " - ", protoimpl.GenVersion, ")")
 		g.P(")")
 		g.P()
-
-		// TODO: Remove this after some soak-in period after the v2 release.
-		g.P("// This is a compile-time assertion that a sufficiently up-to-date version")
-		g.P("// of the legacy proto package is being used.")
-		g.P("const _ = ", protoV1Package.Ident("ProtoPackageIsVersion4"))
-		g.P()
 	}
 
 	for i, imps := 0, f.Desc.Imports(); i < imps.Len(); i++ {
@@ -116,17 +108,14 @@ func GenerateFile(gen *protogen.Plugin, file *protogen.File) *protogen.Generated
 // genStandaloneComments prints all leading comments for a FileDescriptorProto
 // location identified by the field number n.
 func genStandaloneComments(g *protogen.GeneratedFile, f *fileInfo, n int32) {
-	for _, loc := range f.Proto.GetSourceCodeInfo().GetLocation() {
-		if len(loc.Path) == 1 && loc.Path[0] == n {
-			for _, s := range loc.GetLeadingDetachedComments() {
-				g.P(protogen.Comments(s))
-				g.P()
-			}
-			if s := loc.GetLeadingComments(); s != "" {
-				g.P(protogen.Comments(s))
-				g.P()
-			}
-		}
+	loc := f.Desc.SourceLocations().ByPath(protoreflect.SourcePath{n})
+	for _, s := range loc.LeadingDetachedComments {
+		g.P(protogen.Comments(s))
+		g.P()
+	}
+	if s := loc.LeadingComments; s != "" {
+		g.P(protogen.Comments(s))
+		g.P()
 	}
 }
 
@@ -735,12 +724,6 @@ func genExtensions(g *protogen.GeneratedFile, f *fileInfo) {
 
 	g.P("var ", extensionTypesVarName(f), " = []", protoimplPackage.Ident("ExtensionInfo"), "{")
 	for _, x := range f.allExtensions {
-		// For MessageSet extensions, the name used is the parent message.
-		name := x.Desc.FullName()
-		if messageset.IsMessageSetExtension(x.Desc) {
-			name = name.Parent()
-		}
-
 		g.P("{")
 		g.P("ExtendedType: (*", x.Extendee.GoIdent, ")(nil),")
 		goType, pointer := fieldGoType(g, f, x.Extension)
@@ -749,7 +732,7 @@ func genExtensions(g *protogen.GeneratedFile, f *fileInfo) {
 		}
 		g.P("ExtensionType: (", goType, ")(nil),")
 		g.P("Field: ", x.Desc.Number(), ",")
-		g.P("Name: ", strconv.Quote(string(name)), ",")
+		g.P("Name: ", strconv.Quote(string(x.Desc.FullName())), ",")
 		g.P("Tag: ", strconv.Quote(fieldProtobufTagValue(x.Extension)), ",")
 		g.P("Filename: ", strconv.Quote(f.Desc.Path()), ",")
 		g.P("},")
diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go
index 9a1b7bdfa..dbaa529ca 100644
--- a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go
+++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go
@@ -971,9 +971,12 @@ func genMessageKnownFunctions(g *protogen.GeneratedFile, f *fileInfo, m *message
 		g.P()
 		g.P("			// Identify the next message to search within.")
 		g.P("			md = fd.Message() // may be nil")
-		g.P("			if fd.IsMap() {")
-		g.P("				md = fd.MapValue().Message() // may be nil")
+		g.P()
+		g.P("			// Repeated fields are only allowed at the last postion.")
+		g.P("			if fd.IsList() || fd.IsMap() {")
+		g.P("				md = nil")
 		g.P("			}")
+		g.P()
 		g.P("			return true")
 		g.P("		}) {")
 		g.P("			return i")
diff --git a/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go b/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go
index 3892d0583..17cbe1a39 100644
--- a/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go
+++ b/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go
@@ -13,7 +13,6 @@ package protogen
 import (
 	"bufio"
 	"bytes"
-	"encoding/binary"
 	"fmt"
 	"go/ast"
 	"go/parser"
@@ -21,7 +20,6 @@ import (
 	"go/token"
 	"go/types"
 	"io/ioutil"
-	"log"
 	"os"
 	"path"
 	"path/filepath"
@@ -165,8 +163,6 @@ func (opts Options) New(req *pluginpb.CodeGeneratorRequest) (*Plugin, error) {
 
 	packageNames := make(map[string]GoPackageName) // filename -> package name
 	importPaths := make(map[string]GoImportPath)   // filename -> import path
-	mfiles := make(map[string]bool)                // filename set
-	var packageImportPath GoImportPath
 	for _, param := range strings.Split(req.GetParameter(), ",") {
 		var value string
 		if i := strings.Index(param, "="); i >= 0 {
@@ -176,8 +172,6 @@ func (opts Options) New(req *pluginpb.CodeGeneratorRequest) (*Plugin, error) {
 		switch param {
 		case "":
 			// Ignore.
-		case "import_path":
-			packageImportPath = GoImportPath(value)
 		case "module":
 			gen.module = value
 		case "paths":
@@ -199,16 +193,13 @@ func (opts Options) New(req *pluginpb.CodeGeneratorRequest) (*Plugin, error) {
 			}
 		default:
 			if param[0] == 'M' {
-				if i := strings.Index(value, ";"); i >= 0 {
-					pkgName := GoPackageName(value[i+1:])
-					if otherName, ok := packageNames[param[1:]]; ok && pkgName != otherName {
-						return nil, fmt.Errorf("inconsistent package names for %q: %q != %q", value[:i], pkgName, otherName)
-					}
+				impPath, pkgName := splitImportPathAndPackageName(value)
+				if pkgName != "" {
 					packageNames[param[1:]] = pkgName
-					value = value[:i]
 				}
-				importPaths[param[1:]] = GoImportPath(value)
-				mfiles[param[1:]] = true
+				if impPath != "" {
+					importPaths[param[1:]] = impPath
+				}
 				continue
 			}
 			if opts.ParamFunc != nil {
@@ -218,17 +209,11 @@ func (opts Options) New(req *pluginpb.CodeGeneratorRequest) (*Plugin, error) {
 			}
 		}
 	}
-	if gen.module != "" {
-		// When the module= option is provided, we strip the module name
-		// prefix from generated files. This only makes sense if generated
-		// filenames are based on the import path, so default to paths=import
-		// and complain if source_relative was selected manually.
-		switch gen.pathType {
-		case pathTypeLegacy:
-			gen.pathType = pathTypeImport
-		case pathTypeSourceRelative:
-			return nil, fmt.Errorf("cannot use module= with paths=source_relative")
-		}
+	// When the module= option is provided, we strip the module name
+	// prefix from generated files. This only makes sense if generated
+	// filenames are based on the import path.
+	if gen.module != "" && gen.pathType == pathTypeSourceRelative {
+		return nil, fmt.Errorf("cannot use module= with paths=source_relative")
 	}
 
 	// Figure out the import path and package name for each file.
@@ -243,119 +228,52 @@ func (opts Options) New(req *pluginpb.CodeGeneratorRequest) (*Plugin, error) {
 	//
 	//     option go_package = "google.golang.org/protobuf/types/known/anypb";
 	//
-	// Build systems which want to exert full control over import paths may
-	// specify M<filename>=<import_path> flags.
-	//
-	// Other approaches are not recommend.
-	generatedFileNames := make(map[string]bool)
-	for _, name := range gen.Request.FileToGenerate {
-		generatedFileNames[name] = true
-	}
-	// We need to determine the import paths before the package names,
-	// because the Go package name for a file is sometimes derived from
-	// different file in the same package.
-	packageNameForImportPath := make(map[GoImportPath]GoPackageName)
+	// Alternatively, build systems which want to exert full control over
+	// import paths may specify M<filename>=<import_path> flags.
 	for _, fdesc := range gen.Request.ProtoFile {
+		// The "M" command-line flags take precedence over
+		// the "go_package" option in the .proto source file.
 		filename := fdesc.GetName()
-		packageName, importPath := goPackageOption(fdesc)
-		switch {
-		case importPaths[filename] != "":
-			// Command line: Mfoo.proto=quux/bar
-			//
-			// Explicit mapping of source file to import path.
-		case generatedFileNames[filename] && packageImportPath != "":
-			// Command line: import_path=quux/bar
-			//
-			// The import_path flag sets the import path for every file that
-			// we generate code for.
-			importPaths[filename] = packageImportPath
-		case importPath != "":
-			// Source file: option go_package = "quux/bar";
-			//
-			// The go_package option sets the import path. Most users should use this.
-			importPaths[filename] = importPath
-		default:
-			// Source filename.
-			//
-			// Last resort when nothing else is available.
-			importPaths[filename] = GoImportPath(path.Dir(filename))
+		impPath, pkgName := splitImportPathAndPackageName(fdesc.GetOptions().GetGoPackage())
+		if importPaths[filename] == "" && impPath != "" {
+			importPaths[filename] = impPath
 		}
-		if packageName != "" {
-			packageNameForImportPath[importPaths[filename]] = packageName
-		}
-	}
-	for _, fdesc := range gen.Request.ProtoFile {
-		filename := fdesc.GetName()
-		packageName, importPath := goPackageOption(fdesc)
-		defaultPackageName := packageNameForImportPath[importPaths[filename]]
-		switch {
-		case packageNames[filename] != "":
-			// A package name specified by the "M" command-line argument.
-		case packageName != "":
-			// TODO: For the "M" command-line argument, this means that the
-			// package name can be derived from the go_package option.
-			// Go package information should either consistently come from the
-			// command-line or the .proto source file, but not both.
-			// See how to make this consistent.
-
-			// Source file: option go_package = "quux/bar";
-			packageNames[filename] = packageName
-		case defaultPackageName != "":
-			// A go_package option in another file in the same package.
-			//
-			// This is a poor choice in general, since every source file should
-			// contain a go_package option. Supported mainly for historical
-			// compatibility.
-			packageNames[filename] = defaultPackageName
-		case generatedFileNames[filename] && packageImportPath != "":
-			// Command line: import_path=quux/bar
-			packageNames[filename] = cleanPackageName(path.Base(string(packageImportPath)))
-		case fdesc.GetPackage() != "":
-			// Source file: package quux.bar;
-			packageNames[filename] = cleanPackageName(fdesc.GetPackage())
-		default:
-			// Source filename.
-			packageNames[filename] = cleanPackageName(baseName(filename))
-		}
-
-		goPkgOpt := string(importPaths[filename])
-		if path.Base(string(goPkgOpt)) != string(packageNames[filename]) {
-			goPkgOpt += ";" + string(packageNames[filename])
+		if packageNames[filename] == "" && pkgName != "" {
+			packageNames[filename] = pkgName
 		}
 		switch {
-		case packageImportPath != "":
-			// Command line: import_path=quux/bar
-			warn("Deprecated use of the 'import_path' command-line argument. In %q, please specify:\n"+
-				"\toption go_package = %q;\n"+
-				"A future release of protoc-gen-go will no longer support the 'import_path' argument.\n"+
-				"See "+goPackageDocURL+" for more information.\n"+
-				"\n", fdesc.GetName(), goPkgOpt)
-		case mfiles[filename]:
-			// Command line: M=foo.proto=quux/bar
-		case packageName != "" && importPath == "":
-			// Source file: option go_package = "quux";
-			warn("Deprecated use of 'go_package' option without a full import path in %q, please specify:\n"+
-				"\toption go_package = %q;\n"+
-				"A future release of protoc-gen-go will require the import path be specified.\n"+
-				"See "+goPackageDocURL+" for more information.\n"+
-				"\n", fdesc.GetName(), goPkgOpt)
-		case packageName == "" && importPath == "":
-			// No Go package information provided.
-			dotIdx := strings.Index(goPkgOpt, ".")   // heuristic for top-level domain
-			slashIdx := strings.Index(goPkgOpt, "/") // heuristic for multi-segment path
-			if isFull := 0 <= dotIdx && dotIdx <= slashIdx; isFull {
-				warn("Missing 'go_package' option in %q, please specify:\n"+
-					"\toption go_package = %q;\n"+
-					"A future release of protoc-gen-go will require this be specified.\n"+
-					"See "+goPackageDocURL+" for more information.\n"+
-					"\n", fdesc.GetName(), goPkgOpt)
-			} else {
-				warn("Missing 'go_package' option in %q,\n"+
-					"please specify it with the full Go package path as\n"+
-					"a future release of protoc-gen-go will require this be specified.\n"+
-					"See "+goPackageDocURL+" for more information.\n"+
-					"\n", fdesc.GetName())
+		case importPaths[filename] == "":
+			// The import path must be specified one way or another.
+			return nil, fmt.Errorf(
+				"unable to determine Go import path for %q\n\n"+
+					"Please specify either:\n"+
+					"\t• a \"go_package\" option in the .proto source file, or\n"+
+					"\t• a \"M\" argument on the command line.\n\n"+
+					"See %v for more information.\n",
+				fdesc.GetName(), goPackageDocURL)
+		case !strings.Contains(string(importPaths[filename]), "/"):
+			// Check that import paths contain at least one slash to avoid a
+			// common mistake where import path is confused with package name.
+			return nil, fmt.Errorf(
+				"invalid Go import path %q for %q\n\n"+
+					"The import path must contain at least one forward slash ('/') character.\n\n"+
+					"See %v for more information.\n",
+				string(importPaths[filename]), fdesc.GetName(), goPackageDocURL)
+		case packageNames[filename] == "":
+			// If the package name is not explicitly specified,
+			// then derive a reasonable package name from the import path.
+			//
+			// NOTE: The package name is derived first from the import path in
+			// the "go_package" option (if present) before trying the "M" flag.
+			// The inverted order for this is because the primary use of the "M"
+			// flag is by build systems that have full control over the
+			// import paths all packages, where it is generally expected that
+			// the Go package name still be identical for the Go toolchain and
+			// for custom build systems like Bazel.
+			if impPath == "" {
+				impPath = importPaths[filename]
 			}
+			packageNames[filename] = cleanPackageName(path.Base(string(impPath)))
 		}
 	}
 
@@ -482,7 +400,7 @@ type File struct {
 	// of "dir/foo". Appending ".pb.go" produces an output file of "dir/foo.pb.go".
 	GeneratedFilenamePrefix string
 
-	comments map[pathKey]CommentSet
+	location Location
 }
 
 func newFile(gen *Plugin, p *descriptorpb.FileDescriptorProto, packageName GoPackageName, importPath GoImportPath) (*File, error) {
@@ -498,7 +416,7 @@ func newFile(gen *Plugin, p *descriptorpb.FileDescriptorProto, packageName GoPac
 		Proto:         p,
 		GoPackageName: packageName,
 		GoImportPath:  importPath,
-		comments:      make(map[pathKey]CommentSet),
+		location:      Location{SourceFile: desc.Path()},
 	}
 
 	// Determine the prefix for generated Go files.
@@ -507,12 +425,6 @@ func newFile(gen *Plugin, p *descriptorpb.FileDescriptorProto, packageName GoPac
 		prefix = prefix[:len(prefix)-len(ext)]
 	}
 	switch gen.pathType {
-	case pathTypeLegacy:
-		// The default is to derive the output filename from the Go import path
-		// if the file contains a go_package option,or from the input filename instead.
-		if _, importPath := goPackageOption(p); importPath != "" {
-			prefix = path.Join(string(importPath), path.Base(prefix))
-		}
 	case pathTypeImport:
 		// If paths=import, the output filename is derived from the Go import path.
 		prefix = path.Join(string(f.GoImportPath), path.Base(prefix))
@@ -526,19 +438,6 @@ func newFile(gen *Plugin, p *descriptorpb.FileDescriptorProto, packageName GoPac
 	}
 	f.GeneratedFilenamePrefix = prefix
 
-	for _, loc := range p.GetSourceCodeInfo().GetLocation() {
-		// Descriptors declarations are guaranteed to have unique comment sets.
-		// Other locations may not be unique, but we don't use them.
-		var leadingDetached []Comments
-		for _, s := range loc.GetLeadingDetachedComments() {
-			leadingDetached = append(leadingDetached, Comments(s))
-		}
-		f.comments[newPathKey(loc.Path)] = CommentSet{
-			LeadingDetached: leadingDetached,
-			Leading:         Comments(loc.GetLeadingComments()),
-			Trailing:        Comments(loc.GetTrailingComments()),
-		}
-	}
 	for i, eds := 0, desc.Enums(); i < eds.Len(); i++ {
 		f.Enums = append(f.Enums, newEnum(gen, f, nil, eds.Get(i)))
 	}
@@ -571,43 +470,13 @@ func newFile(gen *Plugin, p *descriptorpb.FileDescriptorProto, packageName GoPac
 	return f, nil
 }
 
-func (f *File) location(idxPath ...int32) Location {
-	return Location{
-		SourceFile: f.Desc.Path(),
-		Path:       idxPath,
+// splitImportPathAndPackageName splits off the optional Go package name
+// from the Go import path when seperated by a ';' delimiter.
+func splitImportPathAndPackageName(s string) (GoImportPath, GoPackageName) {
+	if i := strings.Index(s, ";"); i >= 0 {
+		return GoImportPath(s[:i]), GoPackageName(s[i+1:])
 	}
-}
-
-// goPackageOption interprets a file's go_package option.
-// If there is no go_package, it returns ("", "").
-// If there's a simple name, it returns (pkg, "").
-// If the option implies an import path, it returns (pkg, impPath).
-func goPackageOption(d *descriptorpb.FileDescriptorProto) (pkg GoPackageName, impPath GoImportPath) {
-	opt := d.GetOptions().GetGoPackage()
-	if opt == "" {
-		return "", ""
-	}
-	rawPkg, impPath := goPackageOptionRaw(opt)
-	pkg = cleanPackageName(rawPkg)
-	if string(pkg) != rawPkg && impPath != "" {
-		warn("Malformed 'go_package' option in %q, please specify:\n"+
-			"\toption go_package = %q;\n"+
-			"A future release of protoc-gen-go will reject this.\n"+
-			"See "+goPackageDocURL+" for more information.\n"+
-			"\n", d.GetName(), string(impPath)+";"+string(pkg))
-	}
-	return pkg, impPath
-}
-func goPackageOptionRaw(opt string) (rawPkg string, impPath GoImportPath) {
-	// A semicolon-delimited suffix delimits the import path and package name.
-	if i := strings.Index(opt, ";"); i >= 0 {
-		return opt[i+1:], GoImportPath(opt[:i])
-	}
-	// The presence of a slash implies there's an import path.
-	if i := strings.LastIndex(opt, "/"); i >= 0 {
-		return opt[i+1:], GoImportPath(opt)
-	}
-	return opt, ""
+	return GoImportPath(s), ""
 }
 
 // An Enum describes an enum.
@@ -625,15 +494,15 @@ type Enum struct {
 func newEnum(gen *Plugin, f *File, parent *Message, desc protoreflect.EnumDescriptor) *Enum {
 	var loc Location
 	if parent != nil {
-		loc = parent.Location.appendPath(int32(genid.DescriptorProto_EnumType_field_number), int32(desc.Index()))
+		loc = parent.Location.appendPath(genid.DescriptorProto_EnumType_field_number, desc.Index())
 	} else {
-		loc = f.location(int32(genid.FileDescriptorProto_EnumType_field_number), int32(desc.Index()))
+		loc = f.location.appendPath(genid.FileDescriptorProto_EnumType_field_number, desc.Index())
 	}
 	enum := &Enum{
 		Desc:     desc,
 		GoIdent:  newGoIdent(f, desc),
 		Location: loc,
-		Comments: f.comments[newPathKey(loc.Path)],
+		Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)),
 	}
 	gen.enumsByName[desc.FullName()] = enum
 	for i, vds := 0, enum.Desc.Values(); i < vds.Len(); i++ {
@@ -664,13 +533,13 @@ func newEnumValue(gen *Plugin, f *File, message *Message, enum *Enum, desc proto
 		parentIdent = message.GoIdent
 	}
 	name := parentIdent.GoName + "_" + string(desc.Name())
-	loc := enum.Location.appendPath(int32(genid.EnumDescriptorProto_Value_field_number), int32(desc.Index()))
+	loc := enum.Location.appendPath(genid.EnumDescriptorProto_Value_field_number, desc.Index())
 	return &EnumValue{
 		Desc:     desc,
 		GoIdent:  f.GoImportPath.Ident(name),
 		Parent:   enum,
 		Location: loc,
-		Comments: f.comments[newPathKey(loc.Path)],
+		Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)),
 	}
 }
 
@@ -694,15 +563,15 @@ type Message struct {
 func newMessage(gen *Plugin, f *File, parent *Message, desc protoreflect.MessageDescriptor) *Message {
 	var loc Location
 	if parent != nil {
-		loc = parent.Location.appendPath(int32(genid.DescriptorProto_NestedType_field_number), int32(desc.Index()))
+		loc = parent.Location.appendPath(genid.DescriptorProto_NestedType_field_number, desc.Index())
 	} else {
-		loc = f.location(int32(genid.FileDescriptorProto_MessageType_field_number), int32(desc.Index()))
+		loc = f.location.appendPath(genid.FileDescriptorProto_MessageType_field_number, desc.Index())
 	}
 	message := &Message{
 		Desc:     desc,
 		GoIdent:  newGoIdent(f, desc),
 		Location: loc,
-		Comments: f.comments[newPathKey(loc.Path)],
+		Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)),
 	}
 	gen.messagesByName[desc.FullName()] = message
 	for i, eds := 0, desc.Enums(); i < eds.Len(); i++ {
@@ -852,11 +721,11 @@ func newField(gen *Plugin, f *File, message *Message, desc protoreflect.FieldDes
 	var loc Location
 	switch {
 	case desc.IsExtension() && message == nil:
-		loc = f.location(int32(genid.FileDescriptorProto_Extension_field_number), int32(desc.Index()))
+		loc = f.location.appendPath(genid.FileDescriptorProto_Extension_field_number, desc.Index())
 	case desc.IsExtension() && message != nil:
-		loc = message.Location.appendPath(int32(genid.DescriptorProto_Extension_field_number), int32(desc.Index()))
+		loc = message.Location.appendPath(genid.DescriptorProto_Extension_field_number, desc.Index())
 	default:
-		loc = message.Location.appendPath(int32(genid.DescriptorProto_Field_field_number), int32(desc.Index()))
+		loc = message.Location.appendPath(genid.DescriptorProto_Field_field_number, desc.Index())
 	}
 	camelCased := strs.GoCamelCase(string(desc.Name()))
 	var parentPrefix string
@@ -872,7 +741,7 @@ func newField(gen *Plugin, f *File, message *Message, desc protoreflect.FieldDes
 		},
 		Parent:   message,
 		Location: loc,
-		Comments: f.comments[newPathKey(loc.Path)],
+		Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)),
 	}
 	return field
 }
@@ -927,7 +796,7 @@ type Oneof struct {
 }
 
 func newOneof(gen *Plugin, f *File, message *Message, desc protoreflect.OneofDescriptor) *Oneof {
-	loc := message.Location.appendPath(int32(genid.DescriptorProto_OneofDecl_field_number), int32(desc.Index()))
+	loc := message.Location.appendPath(genid.DescriptorProto_OneofDecl_field_number, desc.Index())
 	camelCased := strs.GoCamelCase(string(desc.Name()))
 	parentPrefix := message.GoIdent.GoName + "_"
 	return &Oneof{
@@ -939,7 +808,7 @@ func newOneof(gen *Plugin, f *File, message *Message, desc protoreflect.OneofDes
 			GoName:       parentPrefix + camelCased,
 		},
 		Location: loc,
-		Comments: f.comments[newPathKey(loc.Path)],
+		Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)),
 	}
 }
 
@@ -959,12 +828,12 @@ type Service struct {
 }
 
 func newService(gen *Plugin, f *File, desc protoreflect.ServiceDescriptor) *Service {
-	loc := f.location(int32(genid.FileDescriptorProto_Service_field_number), int32(desc.Index()))
+	loc := f.location.appendPath(genid.FileDescriptorProto_Service_field_number, desc.Index())
 	service := &Service{
 		Desc:     desc,
 		GoName:   strs.GoCamelCase(string(desc.Name())),
 		Location: loc,
-		Comments: f.comments[newPathKey(loc.Path)],
+		Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)),
 	}
 	for i, mds := 0, desc.Methods(); i < mds.Len(); i++ {
 		service.Methods = append(service.Methods, newMethod(gen, f, service, mds.Get(i)))
@@ -988,13 +857,13 @@ type Method struct {
 }
 
 func newMethod(gen *Plugin, f *File, service *Service, desc protoreflect.MethodDescriptor) *Method {
-	loc := service.Location.appendPath(int32(genid.ServiceDescriptorProto_Method_field_number), int32(desc.Index()))
+	loc := service.Location.appendPath(genid.ServiceDescriptorProto_Method_field_number, desc.Index())
 	method := &Method{
 		Desc:     desc,
 		GoName:   strs.GoCamelCase(string(desc.Name())),
 		Parent:   service,
 		Location: loc,
-		Comments: f.comments[newPathKey(loc.Path)],
+		Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)),
 	}
 	return method
 }
@@ -1081,7 +950,7 @@ func (g *GeneratedFile) QualifiedGoIdent(ident GoIdent) string {
 	if packageName, ok := g.packageNames[ident.GoImportPath]; ok {
 		return string(packageName) + "." + ident.GoName
 	}
-	packageName := cleanPackageName(baseName(string(ident.GoImportPath)))
+	packageName := cleanPackageName(path.Base(string(ident.GoImportPath)))
 	for i, orig := 1, packageName; g.usedPackageNames[packageName]; i++ {
 		packageName = orig + GoPackageName(strconv.Itoa(i))
 	}
@@ -1328,24 +1197,10 @@ func cleanPackageName(name string) GoPackageName {
 	return GoPackageName(strs.GoSanitized(name))
 }
 
-// baseName returns the last path element of the name, with the last dotted suffix removed.
-func baseName(name string) string {
-	// First, find the last element
-	if i := strings.LastIndex(name, "/"); i >= 0 {
-		name = name[i+1:]
-	}
-	// Now drop the suffix
-	if i := strings.LastIndex(name, "."); i >= 0 {
-		name = name[:i]
-	}
-	return name
-}
-
 type pathType int
 
 const (
-	pathTypeLegacy pathType = iota
-	pathTypeImport
+	pathTypeImport pathType = iota
 	pathTypeSourceRelative
 )
 
@@ -1359,28 +1214,10 @@ type Location struct {
 }
 
 // appendPath add elements to a Location's path, returning a new Location.
-func (loc Location) appendPath(a ...int32) Location {
-	var n protoreflect.SourcePath
-	n = append(n, loc.Path...)
-	n = append(n, a...)
-	return Location{
-		SourceFile: loc.SourceFile,
-		Path:       n,
-	}
-}
-
-// A pathKey is a representation of a location path suitable for use as a map key.
-type pathKey struct {
-	s string
-}
-
-// newPathKey converts a location path to a pathKey.
-func newPathKey(idxPath []int32) pathKey {
-	buf := make([]byte, 4*len(idxPath))
-	for i, x := range idxPath {
-		binary.LittleEndian.PutUint32(buf[i*4:], uint32(x))
-	}
-	return pathKey{string(buf)}
+func (loc Location) appendPath(num protoreflect.FieldNumber, idx int) Location {
+	loc.Path = append(protoreflect.SourcePath(nil), loc.Path...) // make copy
+	loc.Path = append(loc.Path, int32(num), int32(idx))
+	return loc
 }
 
 // CommentSet is a set of leading and trailing comments associated
@@ -1391,6 +1228,18 @@ type CommentSet struct {
 	Trailing        Comments
 }
 
+func makeCommentSet(loc protoreflect.SourceLocation) CommentSet {
+	var leadingDetached []Comments
+	for _, s := range loc.LeadingDetachedComments {
+		leadingDetached = append(leadingDetached, Comments(s))
+	}
+	return CommentSet{
+		LeadingDetached: leadingDetached,
+		Leading:         Comments(loc.LeadingComments),
+		Trailing:        Comments(loc.TrailingComments),
+	}
+}
+
 // Comments is a comments string as provided by protoc.
 type Comments string
 
@@ -1409,11 +1258,3 @@ func (c Comments) String() string {
 	}
 	return string(b)
 }
-
-var warnings = true
-
-func warn(format string, a ...interface{}) {
-	if warnings {
-		log.Printf("WARNING: "+format, a...)
-	}
-}
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
index cab95a427..8fb1d9e08 100644
--- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
@@ -6,7 +6,6 @@ package prototext
 
 import (
 	"fmt"
-	"strings"
 	"unicode/utf8"
 
 	"google.golang.org/protobuf/internal/encoding/messageset"
@@ -23,6 +22,7 @@ import (
 )
 
 // Unmarshal reads the given []byte into the given proto.Message.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
 func Unmarshal(b []byte, m proto.Message) error {
 	return UnmarshalOptions{}.Unmarshal(b, m)
 }
@@ -51,8 +51,9 @@ type UnmarshalOptions struct {
 	}
 }
 
-// Unmarshal reads the given []byte and populates the given proto.Message using options in
-// UnmarshalOptions object.
+// Unmarshal reads the given []byte and populates the given proto.Message
+// using options in the UnmarshalOptions object.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
 func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error {
 	return o.unmarshal(b, m)
 }
@@ -158,21 +159,11 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error {
 		switch tok.NameKind() {
 		case text.IdentName:
 			name = pref.Name(tok.IdentName())
-			fd = fieldDescs.ByName(name)
-			if fd == nil {
-				// The proto name of a group field is in all lowercase,
-				// while the textproto field name is the group message name.
-				gd := fieldDescs.ByName(pref.Name(strings.ToLower(string(name))))
-				if gd != nil && gd.Kind() == pref.GroupKind && gd.Message().Name() == name {
-					fd = gd
-				}
-			} else if fd.Kind() == pref.GroupKind && fd.Message().Name() != name {
-				fd = nil // reset since field name is actually the message name
-			}
+			fd = fieldDescs.ByTextName(string(name))
 
 		case text.TypeName:
 			// Handle extensions only. This code path is not for Any.
-			xt, xtErr = d.findExtension(pref.FullName(tok.TypeName()))
+			xt, xtErr = d.opts.Resolver.FindExtensionByName(pref.FullName(tok.TypeName()))
 
 		case text.FieldNumber:
 			isFieldNumberName = true
@@ -269,15 +260,6 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error {
 	return nil
 }
 
-// findExtension returns protoreflect.ExtensionType from the Resolver if found.
-func (d decoder) findExtension(xtName pref.FullName) (pref.ExtensionType, error) {
-	xt, err := d.opts.Resolver.FindExtensionByName(xtName)
-	if err == nil {
-		return xt, nil
-	}
-	return messageset.FindMessageSetExtension(d.opts.Resolver, xtName)
-}
-
 // unmarshalSingular unmarshals a non-repeated field value specified by the
 // given FieldDescriptor.
 func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) error {
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
index 0877d71c5..8d5304dc5 100644
--- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
@@ -6,7 +6,6 @@ package prototext
 
 import (
 	"fmt"
-	"sort"
 	"strconv"
 	"unicode/utf8"
 
@@ -16,10 +15,11 @@ import (
 	"google.golang.org/protobuf/internal/errors"
 	"google.golang.org/protobuf/internal/flags"
 	"google.golang.org/protobuf/internal/genid"
-	"google.golang.org/protobuf/internal/mapsort"
+	"google.golang.org/protobuf/internal/order"
 	"google.golang.org/protobuf/internal/pragma"
 	"google.golang.org/protobuf/internal/strs"
 	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
 	pref "google.golang.org/protobuf/reflect/protoreflect"
 	"google.golang.org/protobuf/reflect/protoregistry"
 )
@@ -169,35 +169,15 @@ func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error {
 		// If unable to expand, continue on to marshal Any as a regular message.
 	}
 
-	// Marshal known fields.
-	fieldDescs := messageDesc.Fields()
-	size := fieldDescs.Len()
-	for i := 0; i < size; {
-		fd := fieldDescs.Get(i)
-		if od := fd.ContainingOneof(); od != nil {
-			fd = m.WhichOneof(od)
-			i += od.Fields().Len()
-		} else {
-			i++
+	// Marshal fields.
+	var err error
+	order.RangeFields(m, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+		if err = e.marshalField(fd.TextName(), v, fd); err != nil {
+			return false
 		}
-
-		if fd == nil || !m.Has(fd) {
-			continue
-		}
-
-		name := fd.Name()
-		// Use type name for group field name.
-		if fd.Kind() == pref.GroupKind {
-			name = fd.Message().Name()
-		}
-		val := m.Get(fd)
-		if err := e.marshalField(string(name), val, fd); err != nil {
-			return err
-		}
-	}
-
-	// Marshal extensions.
-	if err := e.marshalExtensions(m); err != nil {
+		return true
+	})
+	if err != nil {
 		return err
 	}
 
@@ -290,7 +270,7 @@ func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescripto
 // marshalMap marshals the given protoreflect.Map as multiple name-value fields.
 func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) error {
 	var err error
-	mapsort.Range(mmap, fd.MapKey().Kind(), func(key pref.MapKey, val pref.Value) bool {
+	order.RangeEntries(mmap, order.GenericKeyOrder, func(key pref.MapKey, val pref.Value) bool {
 		e.WriteName(name)
 		e.StartMessage()
 		defer e.EndMessage()
@@ -311,48 +291,6 @@ func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor)
 	return err
 }
 
-// marshalExtensions marshals extension fields.
-func (e encoder) marshalExtensions(m pref.Message) error {
-	type entry struct {
-		key   string
-		value pref.Value
-		desc  pref.FieldDescriptor
-	}
-
-	// Get a sorted list based on field key first.
-	var entries []entry
-	m.Range(func(fd pref.FieldDescriptor, v pref.Value) bool {
-		if !fd.IsExtension() {
-			return true
-		}
-		// For MessageSet extensions, the name used is the parent message.
-		name := fd.FullName()
-		if messageset.IsMessageSetExtension(fd) {
-			name = name.Parent()
-		}
-		entries = append(entries, entry{
-			key:   string(name),
-			value: v,
-			desc:  fd,
-		})
-		return true
-	})
-	// Sort extensions lexicographically.
-	sort.Slice(entries, func(i, j int) bool {
-		return entries[i].key < entries[j].key
-	})
-
-	// Write out sorted list.
-	for _, entry := range entries {
-		// Extension field name is the proto field name enclosed in [].
-		name := "[" + entry.key + "]"
-		if err := e.marshalField(name, entry.value, entry.desc); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
 // marshalUnknown parses the given []byte and marshals fields out.
 // This function assumes proper encoding in the given []byte.
 func (e encoder) marshalUnknown(b []byte) {
diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
index e7af0fe0d..360c63329 100644
--- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
+++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
@@ -42,6 +42,8 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string {
 			name = "FileImports"
 		case pref.Descriptor:
 			name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s"
+		default:
+			name = reflect.ValueOf(vs).Elem().Type().Name()
 		}
 		start, end = name+"{", "}"
 	}
diff --git a/vendor/google.golang.org/protobuf/internal/detrand/rand.go b/vendor/google.golang.org/protobuf/internal/detrand/rand.go
index a904dd1f9..49c8676d4 100644
--- a/vendor/google.golang.org/protobuf/internal/detrand/rand.go
+++ b/vendor/google.golang.org/protobuf/internal/detrand/rand.go
@@ -26,6 +26,14 @@ func Bool() bool {
 	return randSeed%2 == 1
 }
 
+// Intn returns a deterministically random integer between 0 and n-1, inclusive.
+func Intn(n int) int {
+	if n <= 0 {
+		panic("must be positive")
+	}
+	return int(randSeed % uint64(n))
+}
+
 // randSeed is a best-effort at an approximate hash of the Go binary.
 var randSeed = binaryHash()
 
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go
index b1eeea507..c1866f3c1 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go
@@ -11,10 +11,9 @@ import (
 	"google.golang.org/protobuf/encoding/protowire"
 	"google.golang.org/protobuf/internal/errors"
 	pref "google.golang.org/protobuf/reflect/protoreflect"
-	preg "google.golang.org/protobuf/reflect/protoregistry"
 )
 
-// The MessageSet wire format is equivalent to a message defiend as follows,
+// The MessageSet wire format is equivalent to a message defined as follows,
 // where each Item defines an extension field with a field number of 'type_id'
 // and content of 'message'. MessageSet extensions must be non-repeated message
 // fields.
@@ -48,33 +47,17 @@ func IsMessageSet(md pref.MessageDescriptor) bool {
 	return ok && xmd.IsMessageSet()
 }
 
-// IsMessageSetExtension reports this field extends a MessageSet.
+// IsMessageSetExtension reports this field properly extends a MessageSet.
 func IsMessageSetExtension(fd pref.FieldDescriptor) bool {
-	if fd.Name() != ExtensionName {
+	switch {
+	case fd.Name() != ExtensionName:
+		return false
+	case !IsMessageSet(fd.ContainingMessage()):
+		return false
+	case fd.FullName().Parent() != fd.Message().FullName():
 		return false
 	}
-	if fd.FullName().Parent() != fd.Message().FullName() {
-		return false
-	}
-	return IsMessageSet(fd.ContainingMessage())
-}
-
-// FindMessageSetExtension locates a MessageSet extension field by name.
-// In text and JSON formats, the extension name used is the message itself.
-// The extension field name is derived by appending ExtensionName.
-func FindMessageSetExtension(r preg.ExtensionTypeResolver, s pref.FullName) (pref.ExtensionType, error) {
-	name := s.Append(ExtensionName)
-	xt, err := r.FindExtensionByName(name)
-	if err != nil {
-		if err == preg.NotFound {
-			return nil, err
-		}
-		return nil, errors.Wrap(err, "%q", name)
-	}
-	if !IsMessageSetExtension(xt.TypeDescriptor()) {
-		return nil, preg.NotFound
-	}
-	return xt, nil
+	return true
 }
 
 // SizeField returns the size of a MessageSet item field containing an extension
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
index 16c02d7b6..38f1931c6 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
@@ -104,7 +104,7 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p
 		case strings.HasPrefix(s, "json="):
 			jsonName := s[len("json="):]
 			if jsonName != strs.JSONCamelCase(string(f.L0.FullName.Name())) {
-				f.L1.JSONName.Init(jsonName)
+				f.L1.StringName.InitJSON(jsonName)
 			}
 		case s == "packed":
 			f.L1.HasPacked = true
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go
index c4ba1c598..aa66bdd06 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go
@@ -32,7 +32,6 @@ type Encoder struct {
 	encoderState
 
 	indent      string
-	newline     string // set to "\n" if len(indent) > 0
 	delims      [2]byte
 	outputASCII bool
 }
@@ -61,7 +60,6 @@ func NewEncoder(indent string, delims [2]byte, outputASCII bool) (*Encoder, erro
 			return nil, errors.New("indent may only be composed of space and tab characters")
 		}
 		e.indent = indent
-		e.newline = "\n"
 	}
 	switch delims {
 	case [2]byte{0, 0}:
@@ -126,7 +124,7 @@ func appendString(out []byte, in string, outputASCII bool) []byte {
 			// are used to represent both the proto string and bytes type.
 			r = rune(in[0])
 			fallthrough
-		case r < ' ' || r == '"' || r == '\\':
+		case r < ' ' || r == '"' || r == '\\' || r == 0x7f:
 			out = append(out, '\\')
 			switch r {
 			case '"', '\\':
@@ -143,7 +141,7 @@ func appendString(out []byte, in string, outputASCII bool) []byte {
 				out = strconv.AppendUint(out, uint64(r), 16)
 			}
 			in = in[n:]
-		case outputASCII && r >= utf8.RuneSelf:
+		case r >= utf8.RuneSelf && (outputASCII || r <= 0x009f):
 			out = append(out, '\\')
 			if r <= math.MaxUint16 {
 				out = append(out, 'u')
@@ -168,7 +166,7 @@ func appendString(out []byte, in string, outputASCII bool) []byte {
 // escaping. If no characters need escaping, this returns the input length.
 func indexNeedEscapeInString(s string) int {
 	for i := 0; i < len(s); i++ {
-		if c := s[i]; c < ' ' || c == '"' || c == '\'' || c == '\\' || c >= utf8.RuneSelf {
+		if c := s[i]; c < ' ' || c == '"' || c == '\'' || c == '\\' || c >= 0x7f {
 			return i
 		}
 	}
diff --git a/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go b/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go
deleted file mode 100644
index 517c4e2a0..000000000
--- a/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package fieldsort defines an ordering of fields.
-//
-// The ordering defined by this package matches the historic behavior of the proto
-// package, placing extensions first and oneofs last.
-//
-// There is no guarantee about stability of the wire encoding, and users should not
-// depend on the order defined in this package as it is subject to change without
-// notice.
-package fieldsort
-
-import (
-	"google.golang.org/protobuf/reflect/protoreflect"
-)
-
-// Less returns true if field a comes before field j in ordered wire marshal output.
-func Less(a, b protoreflect.FieldDescriptor) bool {
-	ea := a.IsExtension()
-	eb := b.IsExtension()
-	oa := a.ContainingOneof()
-	ob := b.ContainingOneof()
-	switch {
-	case ea != eb:
-		return ea
-	case oa != nil && ob != nil:
-		if oa == ob {
-			return a.Number() < b.Number()
-		}
-		return oa.Index() < ob.Index()
-	case oa != nil && !oa.IsSynthetic():
-		return false
-	case ob != nil && !ob.IsSynthetic():
-		return true
-	default:
-		return a.Number() < b.Number()
-	}
-}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/build.go b/vendor/google.golang.org/protobuf/internal/filedesc/build.go
index d02d770c9..b293b6947 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/build.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/build.go
@@ -3,6 +3,9 @@
 // license that can be found in the LICENSE file.
 
 // Package filedesc provides functionality for constructing descriptors.
+//
+// The types in this package implement interfaces in the protoreflect package
+// related to protobuf descripriptors.
 package filedesc
 
 import (
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
index 9385126fb..98ab142ae 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
@@ -13,6 +13,7 @@ import (
 	"google.golang.org/protobuf/internal/descfmt"
 	"google.golang.org/protobuf/internal/descopts"
 	"google.golang.org/protobuf/internal/encoding/defval"
+	"google.golang.org/protobuf/internal/encoding/messageset"
 	"google.golang.org/protobuf/internal/genid"
 	"google.golang.org/protobuf/internal/pragma"
 	"google.golang.org/protobuf/internal/strs"
@@ -99,15 +100,6 @@ func (fd *File) lazyInitOnce() {
 	fd.mu.Unlock()
 }
 
-// ProtoLegacyRawDesc is a pseudo-internal API for allowing the v1 code
-// to be able to retrieve the raw descriptor.
-//
-// WARNING: This method is exempt from the compatibility promise and may be
-// removed in the future without warning.
-func (fd *File) ProtoLegacyRawDesc() []byte {
-	return fd.builder.RawDescriptor
-}
-
 // GoPackagePath is a pseudo-internal API for determining the Go package path
 // that this file descriptor is declared in.
 //
@@ -207,7 +199,7 @@ type (
 		Number           pref.FieldNumber
 		Cardinality      pref.Cardinality // must be consistent with Message.RequiredNumbers
 		Kind             pref.Kind
-		JSONName         jsonName
+		StringName       stringName
 		IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
 		IsWeak           bool // promoted from google.protobuf.FieldOptions
 		HasPacked        bool // promoted from google.protobuf.FieldOptions
@@ -277,8 +269,9 @@ func (fd *Field) Options() pref.ProtoMessage {
 func (fd *Field) Number() pref.FieldNumber      { return fd.L1.Number }
 func (fd *Field) Cardinality() pref.Cardinality { return fd.L1.Cardinality }
 func (fd *Field) Kind() pref.Kind               { return fd.L1.Kind }
-func (fd *Field) HasJSONName() bool             { return fd.L1.JSONName.has }
-func (fd *Field) JSONName() string              { return fd.L1.JSONName.get(fd) }
+func (fd *Field) HasJSONName() bool             { return fd.L1.StringName.hasJSON }
+func (fd *Field) JSONName() string              { return fd.L1.StringName.getJSON(fd) }
+func (fd *Field) TextName() string              { return fd.L1.StringName.getText(fd) }
 func (fd *Field) HasPresence() bool {
 	return fd.L1.Cardinality != pref.Repeated && (fd.L0.ParentFile.L1.Syntax == pref.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil)
 }
@@ -373,7 +366,7 @@ type (
 	}
 	ExtensionL2 struct {
 		Options          func() pref.ProtoMessage
-		JSONName         jsonName
+		StringName       stringName
 		IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
 		IsPacked         bool // promoted from google.protobuf.FieldOptions
 		Default          defaultValue
@@ -391,8 +384,9 @@ func (xd *Extension) Options() pref.ProtoMessage {
 func (xd *Extension) Number() pref.FieldNumber      { return xd.L1.Number }
 func (xd *Extension) Cardinality() pref.Cardinality { return xd.L1.Cardinality }
 func (xd *Extension) Kind() pref.Kind               { return xd.L1.Kind }
-func (xd *Extension) HasJSONName() bool             { return xd.lazyInit().JSONName.has }
-func (xd *Extension) JSONName() string              { return xd.lazyInit().JSONName.get(xd) }
+func (xd *Extension) HasJSONName() bool             { return xd.lazyInit().StringName.hasJSON }
+func (xd *Extension) JSONName() string              { return xd.lazyInit().StringName.getJSON(xd) }
+func (xd *Extension) TextName() string              { return xd.lazyInit().StringName.getText(xd) }
 func (xd *Extension) HasPresence() bool             { return xd.L1.Cardinality != pref.Repeated }
 func (xd *Extension) HasOptionalKeyword() bool {
 	return (xd.L0.ParentFile.L1.Syntax == pref.Proto2 && xd.L1.Cardinality == pref.Optional) || xd.lazyInit().IsProto3Optional
@@ -506,27 +500,50 @@ func (d *Base) Syntax() pref.Syntax                 { return d.L0.ParentFile.Syn
 func (d *Base) IsPlaceholder() bool                 { return false }
 func (d *Base) ProtoInternal(pragma.DoNotImplement) {}
 
-type jsonName struct {
-	has  bool
-	once sync.Once
-	name string
+type stringName struct {
+	hasJSON  bool
+	once     sync.Once
+	nameJSON string
+	nameText string
 }
 
-// Init initializes the name. It is exported for use by other internal packages.
-func (js *jsonName) Init(s string) {
-	js.has = true
-	js.name = s
+// InitJSON initializes the name. It is exported for use by other internal packages.
+func (s *stringName) InitJSON(name string) {
+	s.hasJSON = true
+	s.nameJSON = name
 }
 
-func (js *jsonName) get(fd pref.FieldDescriptor) string {
-	if !js.has {
-		js.once.Do(func() {
-			js.name = strs.JSONCamelCase(string(fd.Name()))
-		})
-	}
-	return js.name
+func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName {
+	s.once.Do(func() {
+		if fd.IsExtension() {
+			// For extensions, JSON and text are formatted the same way.
+			var name string
+			if messageset.IsMessageSetExtension(fd) {
+				name = string("[" + fd.FullName().Parent() + "]")
+			} else {
+				name = string("[" + fd.FullName() + "]")
+			}
+			s.nameJSON = name
+			s.nameText = name
+		} else {
+			// Format the JSON name.
+			if !s.hasJSON {
+				s.nameJSON = strs.JSONCamelCase(string(fd.Name()))
+			}
+
+			// Format the text name.
+			s.nameText = string(fd.Name())
+			if fd.Kind() == pref.GroupKind {
+				s.nameText = string(fd.Message().Name())
+			}
+		}
+	})
+	return s
 }
 
+func (s *stringName) getJSON(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameJSON }
+func (s *stringName) getText(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameText }
+
 func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue {
 	dv := defaultValue{has: v.IsValid(), val: v, enum: ev}
 	if b, ok := v.Interface().([]byte); ok {
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
index e672233e7..198451e3e 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
@@ -451,7 +451,7 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des
 			case genid.FieldDescriptorProto_Name_field_number:
 				fd.L0.FullName = appendFullName(sb, pd.FullName(), v)
 			case genid.FieldDescriptorProto_JsonName_field_number:
-				fd.L1.JSONName.Init(sb.MakeString(v))
+				fd.L1.StringName.InitJSON(sb.MakeString(v))
 			case genid.FieldDescriptorProto_DefaultValue_field_number:
 				fd.L1.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages
 			case genid.FieldDescriptorProto_TypeName_field_number:
@@ -551,7 +551,7 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) {
 			b = b[m:]
 			switch num {
 			case genid.FieldDescriptorProto_JsonName_field_number:
-				xd.L2.JSONName.Init(sb.MakeString(v))
+				xd.L2.StringName.InitJSON(sb.MakeString(v))
 			case genid.FieldDescriptorProto_DefaultValue_field_number:
 				xd.L2.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions
 			case genid.FieldDescriptorProto_TypeName_field_number:
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go
index c876cd34d..aa294fff9 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go
@@ -6,9 +6,12 @@ package filedesc
 
 import (
 	"fmt"
+	"math"
 	"sort"
 	"sync"
 
+	"google.golang.org/protobuf/internal/genid"
+
 	"google.golang.org/protobuf/encoding/protowire"
 	"google.golang.org/protobuf/internal/descfmt"
 	"google.golang.org/protobuf/internal/errors"
@@ -245,6 +248,7 @@ type OneofFields struct {
 	once   sync.Once
 	byName map[pref.Name]pref.FieldDescriptor        // protected by once
 	byJSON map[string]pref.FieldDescriptor           // protected by once
+	byText map[string]pref.FieldDescriptor           // protected by once
 	byNum  map[pref.FieldNumber]pref.FieldDescriptor // protected by once
 }
 
@@ -252,6 +256,7 @@ func (p *OneofFields) Len() int                                         { return
 func (p *OneofFields) Get(i int) pref.FieldDescriptor                   { return p.List[i] }
 func (p *OneofFields) ByName(s pref.Name) pref.FieldDescriptor          { return p.lazyInit().byName[s] }
 func (p *OneofFields) ByJSONName(s string) pref.FieldDescriptor         { return p.lazyInit().byJSON[s] }
+func (p *OneofFields) ByTextName(s string) pref.FieldDescriptor         { return p.lazyInit().byText[s] }
 func (p *OneofFields) ByNumber(n pref.FieldNumber) pref.FieldDescriptor { return p.lazyInit().byNum[n] }
 func (p *OneofFields) Format(s fmt.State, r rune)                       { descfmt.FormatList(s, r, p) }
 func (p *OneofFields) ProtoInternal(pragma.DoNotImplement)              {}
@@ -261,11 +266,13 @@ func (p *OneofFields) lazyInit() *OneofFields {
 		if len(p.List) > 0 {
 			p.byName = make(map[pref.Name]pref.FieldDescriptor, len(p.List))
 			p.byJSON = make(map[string]pref.FieldDescriptor, len(p.List))
+			p.byText = make(map[string]pref.FieldDescriptor, len(p.List))
 			p.byNum = make(map[pref.FieldNumber]pref.FieldDescriptor, len(p.List))
 			for _, f := range p.List {
 				// Field names and numbers are guaranteed to be unique.
 				p.byName[f.Name()] = f
 				p.byJSON[f.JSONName()] = f
+				p.byText[f.TextName()] = f
 				p.byNum[f.Number()] = f
 			}
 		}
@@ -274,9 +281,170 @@ func (p *OneofFields) lazyInit() *OneofFields {
 }
 
 type SourceLocations struct {
+	// List is a list of SourceLocations.
+	// The SourceLocation.Next field does not need to be populated
+	// as it will be lazily populated upon first need.
 	List []pref.SourceLocation
+
+	// File is the parent file descriptor that these locations are relative to.
+	// If non-nil, ByDescriptor verifies that the provided descriptor
+	// is a child of this file descriptor.
+	File pref.FileDescriptor
+
+	once   sync.Once
+	byPath map[pathKey]int
 }
 
-func (p *SourceLocations) Len() int                            { return len(p.List) }
-func (p *SourceLocations) Get(i int) pref.SourceLocation       { return p.List[i] }
+func (p *SourceLocations) Len() int                      { return len(p.List) }
+func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.lazyInit().List[i] }
+func (p *SourceLocations) byKey(k pathKey) pref.SourceLocation {
+	if i, ok := p.lazyInit().byPath[k]; ok {
+		return p.List[i]
+	}
+	return pref.SourceLocation{}
+}
+func (p *SourceLocations) ByPath(path pref.SourcePath) pref.SourceLocation {
+	return p.byKey(newPathKey(path))
+}
+func (p *SourceLocations) ByDescriptor(desc pref.Descriptor) pref.SourceLocation {
+	if p.File != nil && desc != nil && p.File != desc.ParentFile() {
+		return pref.SourceLocation{} // mismatching parent files
+	}
+	var pathArr [16]int32
+	path := pathArr[:0]
+	for {
+		switch desc.(type) {
+		case pref.FileDescriptor:
+			// Reverse the path since it was constructed in reverse.
+			for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 {
+				path[i], path[j] = path[j], path[i]
+			}
+			return p.byKey(newPathKey(path))
+		case pref.MessageDescriptor:
+			path = append(path, int32(desc.Index()))
+			desc = desc.Parent()
+			switch desc.(type) {
+			case pref.FileDescriptor:
+				path = append(path, int32(genid.FileDescriptorProto_MessageType_field_number))
+			case pref.MessageDescriptor:
+				path = append(path, int32(genid.DescriptorProto_NestedType_field_number))
+			default:
+				return pref.SourceLocation{}
+			}
+		case pref.FieldDescriptor:
+			isExtension := desc.(pref.FieldDescriptor).IsExtension()
+			path = append(path, int32(desc.Index()))
+			desc = desc.Parent()
+			if isExtension {
+				switch desc.(type) {
+				case pref.FileDescriptor:
+					path = append(path, int32(genid.FileDescriptorProto_Extension_field_number))
+				case pref.MessageDescriptor:
+					path = append(path, int32(genid.DescriptorProto_Extension_field_number))
+				default:
+					return pref.SourceLocation{}
+				}
+			} else {
+				switch desc.(type) {
+				case pref.MessageDescriptor:
+					path = append(path, int32(genid.DescriptorProto_Field_field_number))
+				default:
+					return pref.SourceLocation{}
+				}
+			}
+		case pref.OneofDescriptor:
+			path = append(path, int32(desc.Index()))
+			desc = desc.Parent()
+			switch desc.(type) {
+			case pref.MessageDescriptor:
+				path = append(path, int32(genid.DescriptorProto_OneofDecl_field_number))
+			default:
+				return pref.SourceLocation{}
+			}
+		case pref.EnumDescriptor:
+			path = append(path, int32(desc.Index()))
+			desc = desc.Parent()
+			switch desc.(type) {
+			case pref.FileDescriptor:
+				path = append(path, int32(genid.FileDescriptorProto_EnumType_field_number))
+			case pref.MessageDescriptor:
+				path = append(path, int32(genid.DescriptorProto_EnumType_field_number))
+			default:
+				return pref.SourceLocation{}
+			}
+		case pref.EnumValueDescriptor:
+			path = append(path, int32(desc.Index()))
+			desc = desc.Parent()
+			switch desc.(type) {
+			case pref.EnumDescriptor:
+				path = append(path, int32(genid.EnumDescriptorProto_Value_field_number))
+			default:
+				return pref.SourceLocation{}
+			}
+		case pref.ServiceDescriptor:
+			path = append(path, int32(desc.Index()))
+			desc = desc.Parent()
+			switch desc.(type) {
+			case pref.FileDescriptor:
+				path = append(path, int32(genid.FileDescriptorProto_Service_field_number))
+			default:
+				return pref.SourceLocation{}
+			}
+		case pref.MethodDescriptor:
+			path = append(path, int32(desc.Index()))
+			desc = desc.Parent()
+			switch desc.(type) {
+			case pref.ServiceDescriptor:
+				path = append(path, int32(genid.ServiceDescriptorProto_Method_field_number))
+			default:
+				return pref.SourceLocation{}
+			}
+		default:
+			return pref.SourceLocation{}
+		}
+	}
+}
+func (p *SourceLocations) lazyInit() *SourceLocations {
+	p.once.Do(func() {
+		if len(p.List) > 0 {
+			// Collect all the indexes for a given path.
+			pathIdxs := make(map[pathKey][]int, len(p.List))
+			for i, l := range p.List {
+				k := newPathKey(l.Path)
+				pathIdxs[k] = append(pathIdxs[k], i)
+			}
+
+			// Update the next index for all locations.
+			p.byPath = make(map[pathKey]int, len(p.List))
+			for k, idxs := range pathIdxs {
+				for i := 0; i < len(idxs)-1; i++ {
+					p.List[idxs[i]].Next = idxs[i+1]
+				}
+				p.List[idxs[len(idxs)-1]].Next = 0
+				p.byPath[k] = idxs[0] // record the first location for this path
+			}
+		}
+	})
+	return p
+}
 func (p *SourceLocations) ProtoInternal(pragma.DoNotImplement) {}
+
+// pathKey is a comparable representation of protoreflect.SourcePath.
+type pathKey struct {
+	arr [16]uint8 // first n-1 path segments; last element is the length
+	str string    // used if the path does not fit in arr
+}
+
+func newPathKey(p pref.SourcePath) (k pathKey) {
+	if len(p) < len(k.arr) {
+		for i, ps := range p {
+			if ps < 0 || math.MaxUint8 <= ps {
+				return pathKey{str: p.String()}
+			}
+			k.arr[i] = uint8(ps)
+		}
+		k.arr[len(k.arr)-1] = uint8(len(p))
+		return k
+	}
+	return pathKey{str: p.String()}
+}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go
index 6a8825e80..30db19fdc 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go
@@ -142,6 +142,7 @@ type Fields struct {
 	once   sync.Once
 	byName map[protoreflect.Name]*Field        // protected by once
 	byJSON map[string]*Field                   // protected by once
+	byText map[string]*Field                   // protected by once
 	byNum  map[protoreflect.FieldNumber]*Field // protected by once
 }
 
@@ -163,6 +164,12 @@ func (p *Fields) ByJSONName(s string) protoreflect.FieldDescriptor {
 	}
 	return nil
 }
+func (p *Fields) ByTextName(s string) protoreflect.FieldDescriptor {
+	if d := p.lazyInit().byText[s]; d != nil {
+		return d
+	}
+	return nil
+}
 func (p *Fields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor {
 	if d := p.lazyInit().byNum[n]; d != nil {
 		return d
@@ -178,6 +185,7 @@ func (p *Fields) lazyInit() *Fields {
 		if len(p.List) > 0 {
 			p.byName = make(map[protoreflect.Name]*Field, len(p.List))
 			p.byJSON = make(map[string]*Field, len(p.List))
+			p.byText = make(map[string]*Field, len(p.List))
 			p.byNum = make(map[protoreflect.FieldNumber]*Field, len(p.List))
 			for i := range p.List {
 				d := &p.List[i]
@@ -187,6 +195,9 @@ func (p *Fields) lazyInit() *Fields {
 				if _, ok := p.byJSON[d.JSONName()]; !ok {
 					p.byJSON[d.JSONName()] = d
 				}
+				if _, ok := p.byText[d.TextName()]; !ok {
+					p.byText[d.TextName()] = d
+				}
 				if _, ok := p.byNum[d.Number()]; !ok {
 					p.byNum[d.Number()] = d
 				}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go
index b5974528d..abee5f30e 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go
@@ -167,7 +167,7 @@ func (Export) MessageTypeOf(m message) pref.MessageType {
 	if mv := (Export{}).protoMessageV2Of(m); mv != nil {
 		return mv.ProtoReflect().Type()
 	}
-	return legacyLoadMessageInfo(reflect.TypeOf(m), "")
+	return legacyLoadMessageType(reflect.TypeOf(m), "")
 }
 
 // MessageStringOf returns the message value as a string,
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
index c00744d38..cb4b482d1 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
@@ -10,6 +10,7 @@ import (
 	"sync"
 
 	"google.golang.org/protobuf/encoding/protowire"
+	"google.golang.org/protobuf/internal/errors"
 	"google.golang.org/protobuf/proto"
 	pref "google.golang.org/protobuf/reflect/protoreflect"
 	preg "google.golang.org/protobuf/reflect/protoregistry"
@@ -20,6 +21,7 @@ type errInvalidUTF8 struct{}
 
 func (errInvalidUTF8) Error() string     { return "string field contains invalid UTF-8" }
 func (errInvalidUTF8) InvalidUTF8() bool { return true }
+func (errInvalidUTF8) Unwrap() error     { return errors.Error }
 
 // initOneofFieldCoders initializes the fast-path functions for the fields in a oneof.
 //
@@ -242,7 +244,7 @@ func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI
 	}
 	v, n := protowire.ConsumeBytes(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	if p.Elem().IsNil() {
 		p.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
@@ -276,7 +278,7 @@ func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarsh
 	}
 	v, n := protowire.ConsumeBytes(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
 		Buf:     v,
@@ -420,7 +422,7 @@ func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowir
 	}
 	b, n := protowire.ConsumeGroup(num, b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
 		Buf:     b,
@@ -494,7 +496,7 @@ func consumeMessageSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderF
 	}
 	v, n := protowire.ConsumeBytes(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	m := reflect.New(f.mi.GoReflectType.Elem()).Interface()
 	mp := pointerOfIface(m)
@@ -550,7 +552,7 @@ func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowir
 	}
 	v, n := protowire.ConsumeBytes(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	mp := reflect.New(goType.Elem())
 	o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
@@ -613,7 +615,7 @@ func consumeMessageSliceValue(b []byte, listv pref.Value, _ protowire.Number, wt
 	}
 	v, n := protowire.ConsumeBytes(b)
 	if n < 0 {
-		return pref.Value{}, out, protowire.ParseError(n)
+		return pref.Value{}, out, errDecode
 	}
 	m := list.NewElement()
 	o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
@@ -681,7 +683,7 @@ func consumeGroupSliceValue(b []byte, listv pref.Value, num protowire.Number, wt
 	}
 	b, n := protowire.ConsumeGroup(num, b)
 	if n < 0 {
-		return pref.Value{}, out, protowire.ParseError(n)
+		return pref.Value{}, out, errDecode
 	}
 	m := list.NewElement()
 	o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
@@ -767,7 +769,7 @@ func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire
 	}
 	b, n := protowire.ConsumeGroup(num, b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	mp := reflect.New(goType.Elem())
 	o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go
index ff198d0a1..1a509b63e 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go
@@ -15,13 +15,13 @@ import (
 )
 
 // sizeBool returns the size of wire encoding a bool pointer as a Bool.
-func sizeBool(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeBool(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := *p.Bool()
 	return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v))
 }
 
 // appendBool wire encodes a bool pointer as a Bool.
-func appendBool(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendBool(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Bool()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendVarint(b, protowire.EncodeBool(v))
@@ -29,7 +29,7 @@ func appendBool(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byt
 }
 
 // consumeBool wire decodes a bool pointer as a Bool.
-func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.VarintType {
 		return out, errUnknown
 	}
@@ -45,7 +45,7 @@ func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*p.Bool() = protowire.DecodeBool(v)
 	out.n = n
@@ -61,7 +61,7 @@ var coderBool = pointerCoderFuncs{
 
 // sizeBoolNoZero returns the size of wire encoding a bool pointer as a Bool.
 // The zero value is not encoded.
-func sizeBoolNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeBoolNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := *p.Bool()
 	if v == false {
 		return 0
@@ -71,7 +71,7 @@ func sizeBoolNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
 
 // appendBoolNoZero wire encodes a bool pointer as a Bool.
 // The zero value is not encoded.
-func appendBoolNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendBoolNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Bool()
 	if v == false {
 		return b, nil
@@ -90,14 +90,14 @@ var coderBoolNoZero = pointerCoderFuncs{
 
 // sizeBoolPtr returns the size of wire encoding a *bool pointer as a Bool.
 // It panics if the pointer is nil.
-func sizeBoolPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeBoolPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := **p.BoolPtr()
 	return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v))
 }
 
 // appendBoolPtr wire encodes a *bool pointer as a Bool.
 // It panics if the pointer is nil.
-func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := **p.BoolPtr()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendVarint(b, protowire.EncodeBool(v))
@@ -105,7 +105,7 @@ func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]
 }
 
 // consumeBoolPtr wire decodes a *bool pointer as a Bool.
-func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.VarintType {
 		return out, errUnknown
 	}
@@ -121,7 +121,7 @@ func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo,
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	vp := p.BoolPtr()
 	if *vp == nil {
@@ -140,7 +140,7 @@ var coderBoolPtr = pointerCoderFuncs{
 }
 
 // sizeBoolSlice returns the size of wire encoding a []bool pointer as a repeated Bool.
-func sizeBoolSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeBoolSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	s := *p.BoolSlice()
 	for _, v := range s {
 		size += f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v))
@@ -149,7 +149,7 @@ func sizeBoolSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
 }
 
 // appendBoolSlice encodes a []bool pointer as a repeated Bool.
-func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.BoolSlice()
 	for _, v := range s {
 		b = protowire.AppendVarint(b, f.wiretag)
@@ -159,13 +159,13 @@ func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) (
 }
 
 // consumeBoolSlice wire decodes a []bool pointer as a repeated Bool.
-func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	sp := p.BoolSlice()
 	if wtyp == protowire.BytesType {
 		s := *sp
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return out, protowire.ParseError(n)
+			return out, errDecode
 		}
 		for len(b) > 0 {
 			var v uint64
@@ -180,7 +180,7 @@ func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf
 				v, n = protowire.ConsumeVarint(b)
 			}
 			if n < 0 {
-				return out, protowire.ParseError(n)
+				return out, errDecode
 			}
 			s = append(s, protowire.DecodeBool(v))
 			b = b[n:]
@@ -204,7 +204,7 @@ func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*sp = append(*sp, protowire.DecodeBool(v))
 	out.n = n
@@ -219,7 +219,7 @@ var coderBoolSlice = pointerCoderFuncs{
 }
 
 // sizeBoolPackedSlice returns the size of wire encoding a []bool pointer as a packed repeated Bool.
-func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	s := *p.BoolSlice()
 	if len(s) == 0 {
 		return 0
@@ -232,7 +232,7 @@ func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size i
 }
 
 // appendBoolPackedSlice encodes a []bool pointer as a packed repeated Bool.
-func appendBoolPackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendBoolPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.BoolSlice()
 	if len(s) == 0 {
 		return b, nil
@@ -257,19 +257,19 @@ var coderBoolPackedSlice = pointerCoderFuncs{
 }
 
 // sizeBoolValue returns the size of wire encoding a bool value as a Bool.
-func sizeBoolValue(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeBoolValue(v protoreflect.Value, tagsize int, opts marshalOptions) int {
 	return tagsize + protowire.SizeVarint(protowire.EncodeBool(v.Bool()))
 }
 
 // appendBoolValue encodes a bool value as a Bool.
-func appendBoolValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendBoolValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	b = protowire.AppendVarint(b, wiretag)
 	b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool()))
 	return b, nil
 }
 
 // consumeBoolValue decodes a bool value as a Bool.
-func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	if wtyp != protowire.VarintType {
 		return protoreflect.Value{}, out, errUnknown
 	}
@@ -285,7 +285,7 @@ func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp p
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	out.n = n
 	return protoreflect.ValueOfBool(protowire.DecodeBool(v)), out, nil
@@ -299,7 +299,7 @@ var coderBoolValue = valueCoderFuncs{
 }
 
 // sizeBoolSliceValue returns the size of wire encoding a []bool value as a repeated Bool.
-func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	for i, llen := 0, list.Len(); i < llen; i++ {
 		v := list.Get(i)
@@ -309,7 +309,7 @@ func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions)
 }
 
 // appendBoolSliceValue encodes a []bool value as a repeated Bool.
-func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	for i, llen := 0, list.Len(); i < llen; i++ {
 		v := list.Get(i)
@@ -320,12 +320,12 @@ func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _
 }
 
 // consumeBoolSliceValue wire decodes a []bool value as a repeated Bool.
-func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	list := listv.List()
 	if wtyp == protowire.BytesType {
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return protoreflect.Value{}, out, protowire.ParseError(n)
+			return protoreflect.Value{}, out, errDecode
 		}
 		for len(b) > 0 {
 			var v uint64
@@ -340,7 +340,7 @@ func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numbe
 				v, n = protowire.ConsumeVarint(b)
 			}
 			if n < 0 {
-				return protoreflect.Value{}, out, protowire.ParseError(n)
+				return protoreflect.Value{}, out, errDecode
 			}
 			list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v)))
 			b = b[n:]
@@ -363,7 +363,7 @@ func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numbe
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v)))
 	out.n = n
@@ -378,7 +378,7 @@ var coderBoolSliceValue = valueCoderFuncs{
 }
 
 // sizeBoolPackedSliceValue returns the size of wire encoding a []bool value as a packed repeated Bool.
-func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	llen := list.Len()
 	if llen == 0 {
@@ -393,7 +393,7 @@ func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOp
 }
 
 // appendBoolPackedSliceValue encodes a []bool value as a packed repeated Bool.
-func appendBoolPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendBoolPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	llen := list.Len()
 	if llen == 0 {
@@ -421,19 +421,19 @@ var coderBoolPackedSliceValue = valueCoderFuncs{
 }
 
 // sizeEnumValue returns the size of wire encoding a  value as a Enum.
-func sizeEnumValue(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeEnumValue(v protoreflect.Value, tagsize int, opts marshalOptions) int {
 	return tagsize + protowire.SizeVarint(uint64(v.Enum()))
 }
 
 // appendEnumValue encodes a  value as a Enum.
-func appendEnumValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendEnumValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	b = protowire.AppendVarint(b, wiretag)
 	b = protowire.AppendVarint(b, uint64(v.Enum()))
 	return b, nil
 }
 
 // consumeEnumValue decodes a  value as a Enum.
-func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	if wtyp != protowire.VarintType {
 		return protoreflect.Value{}, out, errUnknown
 	}
@@ -449,7 +449,7 @@ func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp p
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	out.n = n
 	return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), out, nil
@@ -463,7 +463,7 @@ var coderEnumValue = valueCoderFuncs{
 }
 
 // sizeEnumSliceValue returns the size of wire encoding a [] value as a repeated Enum.
-func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	for i, llen := 0, list.Len(); i < llen; i++ {
 		v := list.Get(i)
@@ -473,7 +473,7 @@ func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions)
 }
 
 // appendEnumSliceValue encodes a [] value as a repeated Enum.
-func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	for i, llen := 0, list.Len(); i < llen; i++ {
 		v := list.Get(i)
@@ -484,12 +484,12 @@ func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _
 }
 
 // consumeEnumSliceValue wire decodes a [] value as a repeated Enum.
-func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	list := listv.List()
 	if wtyp == protowire.BytesType {
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return protoreflect.Value{}, out, protowire.ParseError(n)
+			return protoreflect.Value{}, out, errDecode
 		}
 		for len(b) > 0 {
 			var v uint64
@@ -504,7 +504,7 @@ func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numbe
 				v, n = protowire.ConsumeVarint(b)
 			}
 			if n < 0 {
-				return protoreflect.Value{}, out, protowire.ParseError(n)
+				return protoreflect.Value{}, out, errDecode
 			}
 			list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)))
 			b = b[n:]
@@ -527,7 +527,7 @@ func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numbe
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)))
 	out.n = n
@@ -542,7 +542,7 @@ var coderEnumSliceValue = valueCoderFuncs{
 }
 
 // sizeEnumPackedSliceValue returns the size of wire encoding a [] value as a packed repeated Enum.
-func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	llen := list.Len()
 	if llen == 0 {
@@ -557,7 +557,7 @@ func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOp
 }
 
 // appendEnumPackedSliceValue encodes a [] value as a packed repeated Enum.
-func appendEnumPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendEnumPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	llen := list.Len()
 	if llen == 0 {
@@ -585,13 +585,13 @@ var coderEnumPackedSliceValue = valueCoderFuncs{
 }
 
 // sizeInt32 returns the size of wire encoding a int32 pointer as a Int32.
-func sizeInt32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeInt32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := *p.Int32()
 	return f.tagsize + protowire.SizeVarint(uint64(v))
 }
 
 // appendInt32 wire encodes a int32 pointer as a Int32.
-func appendInt32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendInt32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Int32()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendVarint(b, uint64(v))
@@ -599,7 +599,7 @@ func appendInt32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]by
 }
 
 // consumeInt32 wire decodes a int32 pointer as a Int32.
-func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.VarintType {
 		return out, errUnknown
 	}
@@ -615,7 +615,7 @@ func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*p.Int32() = int32(v)
 	out.n = n
@@ -631,7 +631,7 @@ var coderInt32 = pointerCoderFuncs{
 
 // sizeInt32NoZero returns the size of wire encoding a int32 pointer as a Int32.
 // The zero value is not encoded.
-func sizeInt32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeInt32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := *p.Int32()
 	if v == 0 {
 		return 0
@@ -641,7 +641,7 @@ func sizeInt32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int)
 
 // appendInt32NoZero wire encodes a int32 pointer as a Int32.
 // The zero value is not encoded.
-func appendInt32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendInt32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Int32()
 	if v == 0 {
 		return b, nil
@@ -660,14 +660,14 @@ var coderInt32NoZero = pointerCoderFuncs{
 
 // sizeInt32Ptr returns the size of wire encoding a *int32 pointer as a Int32.
 // It panics if the pointer is nil.
-func sizeInt32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeInt32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := **p.Int32Ptr()
 	return f.tagsize + protowire.SizeVarint(uint64(v))
 }
 
 // appendInt32Ptr wire encodes a *int32 pointer as a Int32.
 // It panics if the pointer is nil.
-func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := **p.Int32Ptr()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendVarint(b, uint64(v))
@@ -675,7 +675,7 @@ func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([
 }
 
 // consumeInt32Ptr wire decodes a *int32 pointer as a Int32.
-func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.VarintType {
 		return out, errUnknown
 	}
@@ -691,7 +691,7 @@ func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	vp := p.Int32Ptr()
 	if *vp == nil {
@@ -710,7 +710,7 @@ var coderInt32Ptr = pointerCoderFuncs{
 }
 
 // sizeInt32Slice returns the size of wire encoding a []int32 pointer as a repeated Int32.
-func sizeInt32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeInt32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	s := *p.Int32Slice()
 	for _, v := range s {
 		size += f.tagsize + protowire.SizeVarint(uint64(v))
@@ -719,7 +719,7 @@ func sizeInt32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
 }
 
 // appendInt32Slice encodes a []int32 pointer as a repeated Int32.
-func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.Int32Slice()
 	for _, v := range s {
 		b = protowire.AppendVarint(b, f.wiretag)
@@ -729,13 +729,13 @@ func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions)
 }
 
 // consumeInt32Slice wire decodes a []int32 pointer as a repeated Int32.
-func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	sp := p.Int32Slice()
 	if wtyp == protowire.BytesType {
 		s := *sp
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return out, protowire.ParseError(n)
+			return out, errDecode
 		}
 		for len(b) > 0 {
 			var v uint64
@@ -750,7 +750,7 @@ func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn
 				v, n = protowire.ConsumeVarint(b)
 			}
 			if n < 0 {
-				return out, protowire.ParseError(n)
+				return out, errDecode
 			}
 			s = append(s, int32(v))
 			b = b[n:]
@@ -774,7 +774,7 @@ func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*sp = append(*sp, int32(v))
 	out.n = n
@@ -789,7 +789,7 @@ var coderInt32Slice = pointerCoderFuncs{
 }
 
 // sizeInt32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Int32.
-func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	s := *p.Int32Slice()
 	if len(s) == 0 {
 		return 0
@@ -802,7 +802,7 @@ func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size
 }
 
 // appendInt32PackedSlice encodes a []int32 pointer as a packed repeated Int32.
-func appendInt32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendInt32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.Int32Slice()
 	if len(s) == 0 {
 		return b, nil
@@ -827,19 +827,19 @@ var coderInt32PackedSlice = pointerCoderFuncs{
 }
 
 // sizeInt32Value returns the size of wire encoding a int32 value as a Int32.
-func sizeInt32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeInt32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
 	return tagsize + protowire.SizeVarint(uint64(int32(v.Int())))
 }
 
 // appendInt32Value encodes a int32 value as a Int32.
-func appendInt32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendInt32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	b = protowire.AppendVarint(b, wiretag)
 	b = protowire.AppendVarint(b, uint64(int32(v.Int())))
 	return b, nil
 }
 
 // consumeInt32Value decodes a int32 value as a Int32.
-func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	if wtyp != protowire.VarintType {
 		return protoreflect.Value{}, out, errUnknown
 	}
@@ -855,7 +855,7 @@ func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	out.n = n
 	return protoreflect.ValueOfInt32(int32(v)), out, nil
@@ -869,7 +869,7 @@ var coderInt32Value = valueCoderFuncs{
 }
 
 // sizeInt32SliceValue returns the size of wire encoding a []int32 value as a repeated Int32.
-func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	for i, llen := 0, list.Len(); i < llen; i++ {
 		v := list.Get(i)
@@ -879,7 +879,7 @@ func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions
 }
 
 // appendInt32SliceValue encodes a []int32 value as a repeated Int32.
-func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	for i, llen := 0, list.Len(); i < llen; i++ {
 		v := list.Get(i)
@@ -890,12 +890,12 @@ func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _
 }
 
 // consumeInt32SliceValue wire decodes a []int32 value as a repeated Int32.
-func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	list := listv.List()
 	if wtyp == protowire.BytesType {
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return protoreflect.Value{}, out, protowire.ParseError(n)
+			return protoreflect.Value{}, out, errDecode
 		}
 		for len(b) > 0 {
 			var v uint64
@@ -910,7 +910,7 @@ func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb
 				v, n = protowire.ConsumeVarint(b)
 			}
 			if n < 0 {
-				return protoreflect.Value{}, out, protowire.ParseError(n)
+				return protoreflect.Value{}, out, errDecode
 			}
 			list.Append(protoreflect.ValueOfInt32(int32(v)))
 			b = b[n:]
@@ -933,7 +933,7 @@ func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	list.Append(protoreflect.ValueOfInt32(int32(v)))
 	out.n = n
@@ -948,7 +948,7 @@ var coderInt32SliceValue = valueCoderFuncs{
 }
 
 // sizeInt32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Int32.
-func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	llen := list.Len()
 	if llen == 0 {
@@ -963,7 +963,7 @@ func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalO
 }
 
 // appendInt32PackedSliceValue encodes a []int32 value as a packed repeated Int32.
-func appendInt32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendInt32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	llen := list.Len()
 	if llen == 0 {
@@ -991,13 +991,13 @@ var coderInt32PackedSliceValue = valueCoderFuncs{
 }
 
 // sizeSint32 returns the size of wire encoding a int32 pointer as a Sint32.
-func sizeSint32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSint32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := *p.Int32()
 	return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v)))
 }
 
 // appendSint32 wire encodes a int32 pointer as a Sint32.
-func appendSint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSint32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Int32()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v)))
@@ -1005,7 +1005,7 @@ func appendSint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b
 }
 
 // consumeSint32 wire decodes a int32 pointer as a Sint32.
-func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.VarintType {
 		return out, errUnknown
 	}
@@ -1021,7 +1021,7 @@ func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo,
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*p.Int32() = int32(protowire.DecodeZigZag(v & math.MaxUint32))
 	out.n = n
@@ -1037,7 +1037,7 @@ var coderSint32 = pointerCoderFuncs{
 
 // sizeSint32NoZero returns the size of wire encoding a int32 pointer as a Sint32.
 // The zero value is not encoded.
-func sizeSint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSint32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := *p.Int32()
 	if v == 0 {
 		return 0
@@ -1047,7 +1047,7 @@ func sizeSint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int)
 
 // appendSint32NoZero wire encodes a int32 pointer as a Sint32.
 // The zero value is not encoded.
-func appendSint32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSint32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Int32()
 	if v == 0 {
 		return b, nil
@@ -1066,14 +1066,14 @@ var coderSint32NoZero = pointerCoderFuncs{
 
 // sizeSint32Ptr returns the size of wire encoding a *int32 pointer as a Sint32.
 // It panics if the pointer is nil.
-func sizeSint32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSint32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := **p.Int32Ptr()
 	return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v)))
 }
 
 // appendSint32Ptr wire encodes a *int32 pointer as a Sint32.
 // It panics if the pointer is nil.
-func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := **p.Int32Ptr()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v)))
@@ -1081,7 +1081,7 @@ func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) (
 }
 
 // consumeSint32Ptr wire decodes a *int32 pointer as a Sint32.
-func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.VarintType {
 		return out, errUnknown
 	}
@@ -1097,7 +1097,7 @@ func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	vp := p.Int32Ptr()
 	if *vp == nil {
@@ -1116,7 +1116,7 @@ var coderSint32Ptr = pointerCoderFuncs{
 }
 
 // sizeSint32Slice returns the size of wire encoding a []int32 pointer as a repeated Sint32.
-func sizeSint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSint32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	s := *p.Int32Slice()
 	for _, v := range s {
 		size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v)))
@@ -1125,7 +1125,7 @@ func sizeSint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int)
 }
 
 // appendSint32Slice encodes a []int32 pointer as a repeated Sint32.
-func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.Int32Slice()
 	for _, v := range s {
 		b = protowire.AppendVarint(b, f.wiretag)
@@ -1135,13 +1135,13 @@ func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions)
 }
 
 // consumeSint32Slice wire decodes a []int32 pointer as a repeated Sint32.
-func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	sp := p.Int32Slice()
 	if wtyp == protowire.BytesType {
 		s := *sp
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return out, protowire.ParseError(n)
+			return out, errDecode
 		}
 		for len(b) > 0 {
 			var v uint64
@@ -1156,7 +1156,7 @@ func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI
 				v, n = protowire.ConsumeVarint(b)
 			}
 			if n < 0 {
-				return out, protowire.ParseError(n)
+				return out, errDecode
 			}
 			s = append(s, int32(protowire.DecodeZigZag(v&math.MaxUint32)))
 			b = b[n:]
@@ -1180,7 +1180,7 @@ func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*sp = append(*sp, int32(protowire.DecodeZigZag(v&math.MaxUint32)))
 	out.n = n
@@ -1195,7 +1195,7 @@ var coderSint32Slice = pointerCoderFuncs{
 }
 
 // sizeSint32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sint32.
-func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	s := *p.Int32Slice()
 	if len(s) == 0 {
 		return 0
@@ -1208,7 +1208,7 @@ func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size
 }
 
 // appendSint32PackedSlice encodes a []int32 pointer as a packed repeated Sint32.
-func appendSint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.Int32Slice()
 	if len(s) == 0 {
 		return b, nil
@@ -1233,19 +1233,19 @@ var coderSint32PackedSlice = pointerCoderFuncs{
 }
 
 // sizeSint32Value returns the size of wire encoding a int32 value as a Sint32.
-func sizeSint32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeSint32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
 	return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int()))))
 }
 
 // appendSint32Value encodes a int32 value as a Sint32.
-func appendSint32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendSint32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	b = protowire.AppendVarint(b, wiretag)
 	b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int()))))
 	return b, nil
 }
 
 // consumeSint32Value decodes a int32 value as a Sint32.
-func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	if wtyp != protowire.VarintType {
 		return protoreflect.Value{}, out, errUnknown
 	}
@@ -1261,7 +1261,7 @@ func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	out.n = n
 	return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), out, nil
@@ -1275,7 +1275,7 @@ var coderSint32Value = valueCoderFuncs{
 }
 
 // sizeSint32SliceValue returns the size of wire encoding a []int32 value as a repeated Sint32.
-func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	for i, llen := 0, list.Len(); i < llen; i++ {
 		v := list.Get(i)
@@ -1285,7 +1285,7 @@ func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOption
 }
 
 // appendSint32SliceValue encodes a []int32 value as a repeated Sint32.
-func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	for i, llen := 0, list.Len(); i < llen; i++ {
 		v := list.Get(i)
@@ -1296,12 +1296,12 @@ func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64,
 }
 
 // consumeSint32SliceValue wire decodes a []int32 value as a repeated Sint32.
-func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	list := listv.List()
 	if wtyp == protowire.BytesType {
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return protoreflect.Value{}, out, protowire.ParseError(n)
+			return protoreflect.Value{}, out, errDecode
 		}
 		for len(b) > 0 {
 			var v uint64
@@ -1316,7 +1316,7 @@ func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num
 				v, n = protowire.ConsumeVarint(b)
 			}
 			if n < 0 {
-				return protoreflect.Value{}, out, protowire.ParseError(n)
+				return protoreflect.Value{}, out, errDecode
 			}
 			list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))))
 			b = b[n:]
@@ -1339,7 +1339,7 @@ func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))))
 	out.n = n
@@ -1354,7 +1354,7 @@ var coderSint32SliceValue = valueCoderFuncs{
 }
 
 // sizeSint32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sint32.
-func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	llen := list.Len()
 	if llen == 0 {
@@ -1369,7 +1369,7 @@ func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal
 }
 
 // appendSint32PackedSliceValue encodes a []int32 value as a packed repeated Sint32.
-func appendSint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendSint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	llen := list.Len()
 	if llen == 0 {
@@ -1397,13 +1397,13 @@ var coderSint32PackedSliceValue = valueCoderFuncs{
 }
 
 // sizeUint32 returns the size of wire encoding a uint32 pointer as a Uint32.
-func sizeUint32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeUint32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := *p.Uint32()
 	return f.tagsize + protowire.SizeVarint(uint64(v))
 }
 
 // appendUint32 wire encodes a uint32 pointer as a Uint32.
-func appendUint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendUint32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Uint32()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendVarint(b, uint64(v))
@@ -1411,7 +1411,7 @@ func appendUint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b
 }
 
 // consumeUint32 wire decodes a uint32 pointer as a Uint32.
-func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.VarintType {
 		return out, errUnknown
 	}
@@ -1427,7 +1427,7 @@ func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo,
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*p.Uint32() = uint32(v)
 	out.n = n
@@ -1443,7 +1443,7 @@ var coderUint32 = pointerCoderFuncs{
 
 // sizeUint32NoZero returns the size of wire encoding a uint32 pointer as a Uint32.
 // The zero value is not encoded.
-func sizeUint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeUint32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := *p.Uint32()
 	if v == 0 {
 		return 0
@@ -1453,7 +1453,7 @@ func sizeUint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int)
 
 // appendUint32NoZero wire encodes a uint32 pointer as a Uint32.
 // The zero value is not encoded.
-func appendUint32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendUint32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Uint32()
 	if v == 0 {
 		return b, nil
@@ -1472,14 +1472,14 @@ var coderUint32NoZero = pointerCoderFuncs{
 
 // sizeUint32Ptr returns the size of wire encoding a *uint32 pointer as a Uint32.
 // It panics if the pointer is nil.
-func sizeUint32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeUint32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := **p.Uint32Ptr()
 	return f.tagsize + protowire.SizeVarint(uint64(v))
 }
 
 // appendUint32Ptr wire encodes a *uint32 pointer as a Uint32.
 // It panics if the pointer is nil.
-func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := **p.Uint32Ptr()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendVarint(b, uint64(v))
@@ -1487,7 +1487,7 @@ func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) (
 }
 
 // consumeUint32Ptr wire decodes a *uint32 pointer as a Uint32.
-func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.VarintType {
 		return out, errUnknown
 	}
@@ -1503,7 +1503,7 @@ func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	vp := p.Uint32Ptr()
 	if *vp == nil {
@@ -1522,7 +1522,7 @@ var coderUint32Ptr = pointerCoderFuncs{
 }
 
 // sizeUint32Slice returns the size of wire encoding a []uint32 pointer as a repeated Uint32.
-func sizeUint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeUint32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	s := *p.Uint32Slice()
 	for _, v := range s {
 		size += f.tagsize + protowire.SizeVarint(uint64(v))
@@ -1531,7 +1531,7 @@ func sizeUint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int)
 }
 
 // appendUint32Slice encodes a []uint32 pointer as a repeated Uint32.
-func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.Uint32Slice()
 	for _, v := range s {
 		b = protowire.AppendVarint(b, f.wiretag)
@@ -1541,13 +1541,13 @@ func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions)
 }
 
 // consumeUint32Slice wire decodes a []uint32 pointer as a repeated Uint32.
-func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	sp := p.Uint32Slice()
 	if wtyp == protowire.BytesType {
 		s := *sp
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return out, protowire.ParseError(n)
+			return out, errDecode
 		}
 		for len(b) > 0 {
 			var v uint64
@@ -1562,7 +1562,7 @@ func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI
 				v, n = protowire.ConsumeVarint(b)
 			}
 			if n < 0 {
-				return out, protowire.ParseError(n)
+				return out, errDecode
 			}
 			s = append(s, uint32(v))
 			b = b[n:]
@@ -1586,7 +1586,7 @@ func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*sp = append(*sp, uint32(v))
 	out.n = n
@@ -1601,7 +1601,7 @@ var coderUint32Slice = pointerCoderFuncs{
 }
 
 // sizeUint32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Uint32.
-func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	s := *p.Uint32Slice()
 	if len(s) == 0 {
 		return 0
@@ -1614,7 +1614,7 @@ func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size
 }
 
 // appendUint32PackedSlice encodes a []uint32 pointer as a packed repeated Uint32.
-func appendUint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendUint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.Uint32Slice()
 	if len(s) == 0 {
 		return b, nil
@@ -1639,19 +1639,19 @@ var coderUint32PackedSlice = pointerCoderFuncs{
 }
 
 // sizeUint32Value returns the size of wire encoding a uint32 value as a Uint32.
-func sizeUint32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeUint32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
 	return tagsize + protowire.SizeVarint(uint64(uint32(v.Uint())))
 }
 
 // appendUint32Value encodes a uint32 value as a Uint32.
-func appendUint32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendUint32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	b = protowire.AppendVarint(b, wiretag)
 	b = protowire.AppendVarint(b, uint64(uint32(v.Uint())))
 	return b, nil
 }
 
 // consumeUint32Value decodes a uint32 value as a Uint32.
-func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	if wtyp != protowire.VarintType {
 		return protoreflect.Value{}, out, errUnknown
 	}
@@ -1667,7 +1667,7 @@ func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	out.n = n
 	return protoreflect.ValueOfUint32(uint32(v)), out, nil
@@ -1681,7 +1681,7 @@ var coderUint32Value = valueCoderFuncs{
 }
 
 // sizeUint32SliceValue returns the size of wire encoding a []uint32 value as a repeated Uint32.
-func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	for i, llen := 0, list.Len(); i < llen; i++ {
 		v := list.Get(i)
@@ -1691,7 +1691,7 @@ func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOption
 }
 
 // appendUint32SliceValue encodes a []uint32 value as a repeated Uint32.
-func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	for i, llen := 0, list.Len(); i < llen; i++ {
 		v := list.Get(i)
@@ -1702,12 +1702,12 @@ func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64,
 }
 
 // consumeUint32SliceValue wire decodes a []uint32 value as a repeated Uint32.
-func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	list := listv.List()
 	if wtyp == protowire.BytesType {
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return protoreflect.Value{}, out, protowire.ParseError(n)
+			return protoreflect.Value{}, out, errDecode
 		}
 		for len(b) > 0 {
 			var v uint64
@@ -1722,7 +1722,7 @@ func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num
 				v, n = protowire.ConsumeVarint(b)
 			}
 			if n < 0 {
-				return protoreflect.Value{}, out, protowire.ParseError(n)
+				return protoreflect.Value{}, out, errDecode
 			}
 			list.Append(protoreflect.ValueOfUint32(uint32(v)))
 			b = b[n:]
@@ -1745,7 +1745,7 @@ func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	list.Append(protoreflect.ValueOfUint32(uint32(v)))
 	out.n = n
@@ -1760,7 +1760,7 @@ var coderUint32SliceValue = valueCoderFuncs{
 }
 
 // sizeUint32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Uint32.
-func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	llen := list.Len()
 	if llen == 0 {
@@ -1775,7 +1775,7 @@ func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal
 }
 
 // appendUint32PackedSliceValue encodes a []uint32 value as a packed repeated Uint32.
-func appendUint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendUint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	llen := list.Len()
 	if llen == 0 {
@@ -1803,13 +1803,13 @@ var coderUint32PackedSliceValue = valueCoderFuncs{
 }
 
 // sizeInt64 returns the size of wire encoding a int64 pointer as a Int64.
-func sizeInt64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeInt64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := *p.Int64()
 	return f.tagsize + protowire.SizeVarint(uint64(v))
 }
 
 // appendInt64 wire encodes a int64 pointer as a Int64.
-func appendInt64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendInt64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Int64()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendVarint(b, uint64(v))
@@ -1817,7 +1817,7 @@ func appendInt64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]by
 }
 
 // consumeInt64 wire decodes a int64 pointer as a Int64.
-func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.VarintType {
 		return out, errUnknown
 	}
@@ -1833,7 +1833,7 @@ func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*p.Int64() = int64(v)
 	out.n = n
@@ -1849,7 +1849,7 @@ var coderInt64 = pointerCoderFuncs{
 
 // sizeInt64NoZero returns the size of wire encoding a int64 pointer as a Int64.
 // The zero value is not encoded.
-func sizeInt64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeInt64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := *p.Int64()
 	if v == 0 {
 		return 0
@@ -1859,7 +1859,7 @@ func sizeInt64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int)
 
 // appendInt64NoZero wire encodes a int64 pointer as a Int64.
 // The zero value is not encoded.
-func appendInt64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendInt64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Int64()
 	if v == 0 {
 		return b, nil
@@ -1878,14 +1878,14 @@ var coderInt64NoZero = pointerCoderFuncs{
 
 // sizeInt64Ptr returns the size of wire encoding a *int64 pointer as a Int64.
 // It panics if the pointer is nil.
-func sizeInt64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeInt64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := **p.Int64Ptr()
 	return f.tagsize + protowire.SizeVarint(uint64(v))
 }
 
 // appendInt64Ptr wire encodes a *int64 pointer as a Int64.
 // It panics if the pointer is nil.
-func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := **p.Int64Ptr()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendVarint(b, uint64(v))
@@ -1893,7 +1893,7 @@ func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([
 }
 
 // consumeInt64Ptr wire decodes a *int64 pointer as a Int64.
-func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.VarintType {
 		return out, errUnknown
 	}
@@ -1909,7 +1909,7 @@ func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	vp := p.Int64Ptr()
 	if *vp == nil {
@@ -1928,7 +1928,7 @@ var coderInt64Ptr = pointerCoderFuncs{
 }
 
 // sizeInt64Slice returns the size of wire encoding a []int64 pointer as a repeated Int64.
-func sizeInt64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeInt64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	s := *p.Int64Slice()
 	for _, v := range s {
 		size += f.tagsize + protowire.SizeVarint(uint64(v))
@@ -1937,7 +1937,7 @@ func sizeInt64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
 }
 
 // appendInt64Slice encodes a []int64 pointer as a repeated Int64.
-func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.Int64Slice()
 	for _, v := range s {
 		b = protowire.AppendVarint(b, f.wiretag)
@@ -1947,13 +1947,13 @@ func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions)
 }
 
 // consumeInt64Slice wire decodes a []int64 pointer as a repeated Int64.
-func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	sp := p.Int64Slice()
 	if wtyp == protowire.BytesType {
 		s := *sp
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return out, protowire.ParseError(n)
+			return out, errDecode
 		}
 		for len(b) > 0 {
 			var v uint64
@@ -1968,7 +1968,7 @@ func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn
 				v, n = protowire.ConsumeVarint(b)
 			}
 			if n < 0 {
-				return out, protowire.ParseError(n)
+				return out, errDecode
 			}
 			s = append(s, int64(v))
 			b = b[n:]
@@ -1992,7 +1992,7 @@ func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*sp = append(*sp, int64(v))
 	out.n = n
@@ -2007,7 +2007,7 @@ var coderInt64Slice = pointerCoderFuncs{
 }
 
 // sizeInt64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Int64.
-func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	s := *p.Int64Slice()
 	if len(s) == 0 {
 		return 0
@@ -2020,7 +2020,7 @@ func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size
 }
 
 // appendInt64PackedSlice encodes a []int64 pointer as a packed repeated Int64.
-func appendInt64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendInt64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.Int64Slice()
 	if len(s) == 0 {
 		return b, nil
@@ -2045,19 +2045,19 @@ var coderInt64PackedSlice = pointerCoderFuncs{
 }
 
 // sizeInt64Value returns the size of wire encoding a int64 value as a Int64.
-func sizeInt64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeInt64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
 	return tagsize + protowire.SizeVarint(uint64(v.Int()))
 }
 
 // appendInt64Value encodes a int64 value as a Int64.
-func appendInt64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendInt64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	b = protowire.AppendVarint(b, wiretag)
 	b = protowire.AppendVarint(b, uint64(v.Int()))
 	return b, nil
 }
 
 // consumeInt64Value decodes a int64 value as a Int64.
-func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	if wtyp != protowire.VarintType {
 		return protoreflect.Value{}, out, errUnknown
 	}
@@ -2073,7 +2073,7 @@ func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	out.n = n
 	return protoreflect.ValueOfInt64(int64(v)), out, nil
@@ -2087,7 +2087,7 @@ var coderInt64Value = valueCoderFuncs{
 }
 
 // sizeInt64SliceValue returns the size of wire encoding a []int64 value as a repeated Int64.
-func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	for i, llen := 0, list.Len(); i < llen; i++ {
 		v := list.Get(i)
@@ -2097,7 +2097,7 @@ func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions
 }
 
 // appendInt64SliceValue encodes a []int64 value as a repeated Int64.
-func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	for i, llen := 0, list.Len(); i < llen; i++ {
 		v := list.Get(i)
@@ -2108,12 +2108,12 @@ func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _
 }
 
 // consumeInt64SliceValue wire decodes a []int64 value as a repeated Int64.
-func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	list := listv.List()
 	if wtyp == protowire.BytesType {
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return protoreflect.Value{}, out, protowire.ParseError(n)
+			return protoreflect.Value{}, out, errDecode
 		}
 		for len(b) > 0 {
 			var v uint64
@@ -2128,7 +2128,7 @@ func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb
 				v, n = protowire.ConsumeVarint(b)
 			}
 			if n < 0 {
-				return protoreflect.Value{}, out, protowire.ParseError(n)
+				return protoreflect.Value{}, out, errDecode
 			}
 			list.Append(protoreflect.ValueOfInt64(int64(v)))
 			b = b[n:]
@@ -2151,7 +2151,7 @@ func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	list.Append(protoreflect.ValueOfInt64(int64(v)))
 	out.n = n
@@ -2166,7 +2166,7 @@ var coderInt64SliceValue = valueCoderFuncs{
 }
 
 // sizeInt64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Int64.
-func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	llen := list.Len()
 	if llen == 0 {
@@ -2181,7 +2181,7 @@ func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalO
 }
 
 // appendInt64PackedSliceValue encodes a []int64 value as a packed repeated Int64.
-func appendInt64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendInt64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	llen := list.Len()
 	if llen == 0 {
@@ -2209,13 +2209,13 @@ var coderInt64PackedSliceValue = valueCoderFuncs{
 }
 
 // sizeSint64 returns the size of wire encoding a int64 pointer as a Sint64.
-func sizeSint64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSint64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := *p.Int64()
 	return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v))
 }
 
 // appendSint64 wire encodes a int64 pointer as a Sint64.
-func appendSint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSint64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Int64()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendVarint(b, protowire.EncodeZigZag(v))
@@ -2223,7 +2223,7 @@ func appendSint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b
 }
 
 // consumeSint64 wire decodes a int64 pointer as a Sint64.
-func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.VarintType {
 		return out, errUnknown
 	}
@@ -2239,7 +2239,7 @@ func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo,
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*p.Int64() = protowire.DecodeZigZag(v)
 	out.n = n
@@ -2255,7 +2255,7 @@ var coderSint64 = pointerCoderFuncs{
 
 // sizeSint64NoZero returns the size of wire encoding a int64 pointer as a Sint64.
 // The zero value is not encoded.
-func sizeSint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSint64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := *p.Int64()
 	if v == 0 {
 		return 0
@@ -2265,7 +2265,7 @@ func sizeSint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int)
 
 // appendSint64NoZero wire encodes a int64 pointer as a Sint64.
 // The zero value is not encoded.
-func appendSint64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSint64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Int64()
 	if v == 0 {
 		return b, nil
@@ -2284,14 +2284,14 @@ var coderSint64NoZero = pointerCoderFuncs{
 
 // sizeSint64Ptr returns the size of wire encoding a *int64 pointer as a Sint64.
 // It panics if the pointer is nil.
-func sizeSint64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSint64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := **p.Int64Ptr()
 	return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v))
 }
 
 // appendSint64Ptr wire encodes a *int64 pointer as a Sint64.
 // It panics if the pointer is nil.
-func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := **p.Int64Ptr()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendVarint(b, protowire.EncodeZigZag(v))
@@ -2299,7 +2299,7 @@ func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) (
 }
 
 // consumeSint64Ptr wire decodes a *int64 pointer as a Sint64.
-func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.VarintType {
 		return out, errUnknown
 	}
@@ -2315,7 +2315,7 @@ func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	vp := p.Int64Ptr()
 	if *vp == nil {
@@ -2334,7 +2334,7 @@ var coderSint64Ptr = pointerCoderFuncs{
 }
 
 // sizeSint64Slice returns the size of wire encoding a []int64 pointer as a repeated Sint64.
-func sizeSint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSint64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	s := *p.Int64Slice()
 	for _, v := range s {
 		size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v))
@@ -2343,7 +2343,7 @@ func sizeSint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int)
 }
 
 // appendSint64Slice encodes a []int64 pointer as a repeated Sint64.
-func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.Int64Slice()
 	for _, v := range s {
 		b = protowire.AppendVarint(b, f.wiretag)
@@ -2353,13 +2353,13 @@ func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions)
 }
 
 // consumeSint64Slice wire decodes a []int64 pointer as a repeated Sint64.
-func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	sp := p.Int64Slice()
 	if wtyp == protowire.BytesType {
 		s := *sp
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return out, protowire.ParseError(n)
+			return out, errDecode
 		}
 		for len(b) > 0 {
 			var v uint64
@@ -2374,7 +2374,7 @@ func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI
 				v, n = protowire.ConsumeVarint(b)
 			}
 			if n < 0 {
-				return out, protowire.ParseError(n)
+				return out, errDecode
 			}
 			s = append(s, protowire.DecodeZigZag(v))
 			b = b[n:]
@@ -2398,7 +2398,7 @@ func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*sp = append(*sp, protowire.DecodeZigZag(v))
 	out.n = n
@@ -2413,7 +2413,7 @@ var coderSint64Slice = pointerCoderFuncs{
 }
 
 // sizeSint64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sint64.
-func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	s := *p.Int64Slice()
 	if len(s) == 0 {
 		return 0
@@ -2426,7 +2426,7 @@ func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size
 }
 
 // appendSint64PackedSlice encodes a []int64 pointer as a packed repeated Sint64.
-func appendSint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.Int64Slice()
 	if len(s) == 0 {
 		return b, nil
@@ -2451,19 +2451,19 @@ var coderSint64PackedSlice = pointerCoderFuncs{
 }
 
 // sizeSint64Value returns the size of wire encoding a int64 value as a Sint64.
-func sizeSint64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeSint64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
 	return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v.Int()))
 }
 
 // appendSint64Value encodes a int64 value as a Sint64.
-func appendSint64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendSint64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	b = protowire.AppendVarint(b, wiretag)
 	b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int()))
 	return b, nil
 }
 
 // consumeSint64Value decodes a int64 value as a Sint64.
-func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	if wtyp != protowire.VarintType {
 		return protoreflect.Value{}, out, errUnknown
 	}
@@ -2479,7 +2479,7 @@ func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	out.n = n
 	return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), out, nil
@@ -2493,7 +2493,7 @@ var coderSint64Value = valueCoderFuncs{
 }
 
 // sizeSint64SliceValue returns the size of wire encoding a []int64 value as a repeated Sint64.
-func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	for i, llen := 0, list.Len(); i < llen; i++ {
 		v := list.Get(i)
@@ -2503,7 +2503,7 @@ func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOption
 }
 
 // appendSint64SliceValue encodes a []int64 value as a repeated Sint64.
-func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	for i, llen := 0, list.Len(); i < llen; i++ {
 		v := list.Get(i)
@@ -2514,12 +2514,12 @@ func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64,
 }
 
 // consumeSint64SliceValue wire decodes a []int64 value as a repeated Sint64.
-func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	list := listv.List()
 	if wtyp == protowire.BytesType {
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return protoreflect.Value{}, out, protowire.ParseError(n)
+			return protoreflect.Value{}, out, errDecode
 		}
 		for len(b) > 0 {
 			var v uint64
@@ -2534,7 +2534,7 @@ func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num
 				v, n = protowire.ConsumeVarint(b)
 			}
 			if n < 0 {
-				return protoreflect.Value{}, out, protowire.ParseError(n)
+				return protoreflect.Value{}, out, errDecode
 			}
 			list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)))
 			b = b[n:]
@@ -2557,7 +2557,7 @@ func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)))
 	out.n = n
@@ -2572,7 +2572,7 @@ var coderSint64SliceValue = valueCoderFuncs{
 }
 
 // sizeSint64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sint64.
-func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	llen := list.Len()
 	if llen == 0 {
@@ -2587,7 +2587,7 @@ func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal
 }
 
 // appendSint64PackedSliceValue encodes a []int64 value as a packed repeated Sint64.
-func appendSint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendSint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	llen := list.Len()
 	if llen == 0 {
@@ -2615,13 +2615,13 @@ var coderSint64PackedSliceValue = valueCoderFuncs{
 }
 
 // sizeUint64 returns the size of wire encoding a uint64 pointer as a Uint64.
-func sizeUint64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeUint64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := *p.Uint64()
 	return f.tagsize + protowire.SizeVarint(v)
 }
 
 // appendUint64 wire encodes a uint64 pointer as a Uint64.
-func appendUint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendUint64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Uint64()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendVarint(b, v)
@@ -2629,7 +2629,7 @@ func appendUint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b
 }
 
 // consumeUint64 wire decodes a uint64 pointer as a Uint64.
-func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.VarintType {
 		return out, errUnknown
 	}
@@ -2645,7 +2645,7 @@ func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo,
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*p.Uint64() = v
 	out.n = n
@@ -2661,7 +2661,7 @@ var coderUint64 = pointerCoderFuncs{
 
 // sizeUint64NoZero returns the size of wire encoding a uint64 pointer as a Uint64.
 // The zero value is not encoded.
-func sizeUint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeUint64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := *p.Uint64()
 	if v == 0 {
 		return 0
@@ -2671,7 +2671,7 @@ func sizeUint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int)
 
 // appendUint64NoZero wire encodes a uint64 pointer as a Uint64.
 // The zero value is not encoded.
-func appendUint64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendUint64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Uint64()
 	if v == 0 {
 		return b, nil
@@ -2690,14 +2690,14 @@ var coderUint64NoZero = pointerCoderFuncs{
 
 // sizeUint64Ptr returns the size of wire encoding a *uint64 pointer as a Uint64.
 // It panics if the pointer is nil.
-func sizeUint64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeUint64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := **p.Uint64Ptr()
 	return f.tagsize + protowire.SizeVarint(v)
 }
 
 // appendUint64Ptr wire encodes a *uint64 pointer as a Uint64.
 // It panics if the pointer is nil.
-func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := **p.Uint64Ptr()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendVarint(b, v)
@@ -2705,7 +2705,7 @@ func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) (
 }
 
 // consumeUint64Ptr wire decodes a *uint64 pointer as a Uint64.
-func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.VarintType {
 		return out, errUnknown
 	}
@@ -2721,7 +2721,7 @@ func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	vp := p.Uint64Ptr()
 	if *vp == nil {
@@ -2740,7 +2740,7 @@ var coderUint64Ptr = pointerCoderFuncs{
 }
 
 // sizeUint64Slice returns the size of wire encoding a []uint64 pointer as a repeated Uint64.
-func sizeUint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeUint64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	s := *p.Uint64Slice()
 	for _, v := range s {
 		size += f.tagsize + protowire.SizeVarint(v)
@@ -2749,7 +2749,7 @@ func sizeUint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int)
 }
 
 // appendUint64Slice encodes a []uint64 pointer as a repeated Uint64.
-func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.Uint64Slice()
 	for _, v := range s {
 		b = protowire.AppendVarint(b, f.wiretag)
@@ -2759,13 +2759,13 @@ func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions)
 }
 
 // consumeUint64Slice wire decodes a []uint64 pointer as a repeated Uint64.
-func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	sp := p.Uint64Slice()
 	if wtyp == protowire.BytesType {
 		s := *sp
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return out, protowire.ParseError(n)
+			return out, errDecode
 		}
 		for len(b) > 0 {
 			var v uint64
@@ -2780,7 +2780,7 @@ func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI
 				v, n = protowire.ConsumeVarint(b)
 			}
 			if n < 0 {
-				return out, protowire.ParseError(n)
+				return out, errDecode
 			}
 			s = append(s, v)
 			b = b[n:]
@@ -2804,7 +2804,7 @@ func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*sp = append(*sp, v)
 	out.n = n
@@ -2819,7 +2819,7 @@ var coderUint64Slice = pointerCoderFuncs{
 }
 
 // sizeUint64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Uint64.
-func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	s := *p.Uint64Slice()
 	if len(s) == 0 {
 		return 0
@@ -2832,7 +2832,7 @@ func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size
 }
 
 // appendUint64PackedSlice encodes a []uint64 pointer as a packed repeated Uint64.
-func appendUint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendUint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.Uint64Slice()
 	if len(s) == 0 {
 		return b, nil
@@ -2857,19 +2857,19 @@ var coderUint64PackedSlice = pointerCoderFuncs{
 }
 
 // sizeUint64Value returns the size of wire encoding a uint64 value as a Uint64.
-func sizeUint64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeUint64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
 	return tagsize + protowire.SizeVarint(v.Uint())
 }
 
 // appendUint64Value encodes a uint64 value as a Uint64.
-func appendUint64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendUint64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	b = protowire.AppendVarint(b, wiretag)
 	b = protowire.AppendVarint(b, v.Uint())
 	return b, nil
 }
 
 // consumeUint64Value decodes a uint64 value as a Uint64.
-func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	if wtyp != protowire.VarintType {
 		return protoreflect.Value{}, out, errUnknown
 	}
@@ -2885,7 +2885,7 @@ func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	out.n = n
 	return protoreflect.ValueOfUint64(v), out, nil
@@ -2899,7 +2899,7 @@ var coderUint64Value = valueCoderFuncs{
 }
 
 // sizeUint64SliceValue returns the size of wire encoding a []uint64 value as a repeated Uint64.
-func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	for i, llen := 0, list.Len(); i < llen; i++ {
 		v := list.Get(i)
@@ -2909,7 +2909,7 @@ func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOption
 }
 
 // appendUint64SliceValue encodes a []uint64 value as a repeated Uint64.
-func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	for i, llen := 0, list.Len(); i < llen; i++ {
 		v := list.Get(i)
@@ -2920,12 +2920,12 @@ func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64,
 }
 
 // consumeUint64SliceValue wire decodes a []uint64 value as a repeated Uint64.
-func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	list := listv.List()
 	if wtyp == protowire.BytesType {
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return protoreflect.Value{}, out, protowire.ParseError(n)
+			return protoreflect.Value{}, out, errDecode
 		}
 		for len(b) > 0 {
 			var v uint64
@@ -2940,7 +2940,7 @@ func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num
 				v, n = protowire.ConsumeVarint(b)
 			}
 			if n < 0 {
-				return protoreflect.Value{}, out, protowire.ParseError(n)
+				return protoreflect.Value{}, out, errDecode
 			}
 			list.Append(protoreflect.ValueOfUint64(v))
 			b = b[n:]
@@ -2963,7 +2963,7 @@ func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Num
 		v, n = protowire.ConsumeVarint(b)
 	}
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	list.Append(protoreflect.ValueOfUint64(v))
 	out.n = n
@@ -2978,7 +2978,7 @@ var coderUint64SliceValue = valueCoderFuncs{
 }
 
 // sizeUint64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Uint64.
-func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	llen := list.Len()
 	if llen == 0 {
@@ -2993,7 +2993,7 @@ func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal
 }
 
 // appendUint64PackedSliceValue encodes a []uint64 value as a packed repeated Uint64.
-func appendUint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendUint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	llen := list.Len()
 	if llen == 0 {
@@ -3021,13 +3021,13 @@ var coderUint64PackedSliceValue = valueCoderFuncs{
 }
 
 // sizeSfixed32 returns the size of wire encoding a int32 pointer as a Sfixed32.
-func sizeSfixed32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSfixed32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 
 	return f.tagsize + protowire.SizeFixed32()
 }
 
 // appendSfixed32 wire encodes a int32 pointer as a Sfixed32.
-func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Int32()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendFixed32(b, uint32(v))
@@ -3035,13 +3035,13 @@ func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([
 }
 
 // consumeSfixed32 wire decodes a int32 pointer as a Sfixed32.
-func consumeSfixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeSfixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.Fixed32Type {
 		return out, errUnknown
 	}
 	v, n := protowire.ConsumeFixed32(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*p.Int32() = int32(v)
 	out.n = n
@@ -3057,7 +3057,7 @@ var coderSfixed32 = pointerCoderFuncs{
 
 // sizeSfixed32NoZero returns the size of wire encoding a int32 pointer as a Sfixed32.
 // The zero value is not encoded.
-func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := *p.Int32()
 	if v == 0 {
 		return 0
@@ -3067,7 +3067,7 @@ func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size in
 
 // appendSfixed32NoZero wire encodes a int32 pointer as a Sfixed32.
 // The zero value is not encoded.
-func appendSfixed32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSfixed32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Int32()
 	if v == 0 {
 		return b, nil
@@ -3086,13 +3086,13 @@ var coderSfixed32NoZero = pointerCoderFuncs{
 
 // sizeSfixed32Ptr returns the size of wire encoding a *int32 pointer as a Sfixed32.
 // It panics if the pointer is nil.
-func sizeSfixed32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSfixed32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	return f.tagsize + protowire.SizeFixed32()
 }
 
 // appendSfixed32Ptr wire encodes a *int32 pointer as a Sfixed32.
 // It panics if the pointer is nil.
-func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := **p.Int32Ptr()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendFixed32(b, uint32(v))
@@ -3100,13 +3100,13 @@ func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions)
 }
 
 // consumeSfixed32Ptr wire decodes a *int32 pointer as a Sfixed32.
-func consumeSfixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeSfixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.Fixed32Type {
 		return out, errUnknown
 	}
 	v, n := protowire.ConsumeFixed32(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	vp := p.Int32Ptr()
 	if *vp == nil {
@@ -3125,14 +3125,14 @@ var coderSfixed32Ptr = pointerCoderFuncs{
 }
 
 // sizeSfixed32Slice returns the size of wire encoding a []int32 pointer as a repeated Sfixed32.
-func sizeSfixed32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSfixed32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	s := *p.Int32Slice()
 	size = len(s) * (f.tagsize + protowire.SizeFixed32())
 	return size
 }
 
 // appendSfixed32Slice encodes a []int32 pointer as a repeated Sfixed32.
-func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.Int32Slice()
 	for _, v := range s {
 		b = protowire.AppendVarint(b, f.wiretag)
@@ -3142,18 +3142,18 @@ func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOption
 }
 
 // consumeSfixed32Slice wire decodes a []int32 pointer as a repeated Sfixed32.
-func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	sp := p.Int32Slice()
 	if wtyp == protowire.BytesType {
 		s := *sp
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return out, protowire.ParseError(n)
+			return out, errDecode
 		}
 		for len(b) > 0 {
 			v, n := protowire.ConsumeFixed32(b)
 			if n < 0 {
-				return out, protowire.ParseError(n)
+				return out, errDecode
 			}
 			s = append(s, int32(v))
 			b = b[n:]
@@ -3167,7 +3167,7 @@ func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFiel
 	}
 	v, n := protowire.ConsumeFixed32(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*sp = append(*sp, int32(v))
 	out.n = n
@@ -3182,7 +3182,7 @@ var coderSfixed32Slice = pointerCoderFuncs{
 }
 
 // sizeSfixed32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sfixed32.
-func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	s := *p.Int32Slice()
 	if len(s) == 0 {
 		return 0
@@ -3192,7 +3192,7 @@ func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (si
 }
 
 // appendSfixed32PackedSlice encodes a []int32 pointer as a packed repeated Sfixed32.
-func appendSfixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSfixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.Int32Slice()
 	if len(s) == 0 {
 		return b, nil
@@ -3214,25 +3214,25 @@ var coderSfixed32PackedSlice = pointerCoderFuncs{
 }
 
 // sizeSfixed32Value returns the size of wire encoding a int32 value as a Sfixed32.
-func sizeSfixed32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeSfixed32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
 	return tagsize + protowire.SizeFixed32()
 }
 
 // appendSfixed32Value encodes a int32 value as a Sfixed32.
-func appendSfixed32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendSfixed32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	b = protowire.AppendVarint(b, wiretag)
 	b = protowire.AppendFixed32(b, uint32(v.Int()))
 	return b, nil
 }
 
 // consumeSfixed32Value decodes a int32 value as a Sfixed32.
-func consumeSfixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeSfixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	if wtyp != protowire.Fixed32Type {
 		return protoreflect.Value{}, out, errUnknown
 	}
 	v, n := protowire.ConsumeFixed32(b)
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	out.n = n
 	return protoreflect.ValueOfInt32(int32(v)), out, nil
@@ -3246,14 +3246,14 @@ var coderSfixed32Value = valueCoderFuncs{
 }
 
 // sizeSfixed32SliceValue returns the size of wire encoding a []int32 value as a repeated Sfixed32.
-func sizeSfixed32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeSfixed32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	size = list.Len() * (tagsize + protowire.SizeFixed32())
 	return size
 }
 
 // appendSfixed32SliceValue encodes a []int32 value as a repeated Sfixed32.
-func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	for i, llen := 0, list.Len(); i < llen; i++ {
 		v := list.Get(i)
@@ -3264,17 +3264,17 @@ func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64
 }
 
 // consumeSfixed32SliceValue wire decodes a []int32 value as a repeated Sfixed32.
-func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	list := listv.List()
 	if wtyp == protowire.BytesType {
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return protoreflect.Value{}, out, protowire.ParseError(n)
+			return protoreflect.Value{}, out, errDecode
 		}
 		for len(b) > 0 {
 			v, n := protowire.ConsumeFixed32(b)
 			if n < 0 {
-				return protoreflect.Value{}, out, protowire.ParseError(n)
+				return protoreflect.Value{}, out, errDecode
 			}
 			list.Append(protoreflect.ValueOfInt32(int32(v)))
 			b = b[n:]
@@ -3287,7 +3287,7 @@ func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.N
 	}
 	v, n := protowire.ConsumeFixed32(b)
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	list.Append(protoreflect.ValueOfInt32(int32(v)))
 	out.n = n
@@ -3302,7 +3302,7 @@ var coderSfixed32SliceValue = valueCoderFuncs{
 }
 
 // sizeSfixed32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sfixed32.
-func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	llen := list.Len()
 	if llen == 0 {
@@ -3313,7 +3313,7 @@ func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marsh
 }
 
 // appendSfixed32PackedSliceValue encodes a []int32 value as a packed repeated Sfixed32.
-func appendSfixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendSfixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	llen := list.Len()
 	if llen == 0 {
@@ -3337,13 +3337,13 @@ var coderSfixed32PackedSliceValue = valueCoderFuncs{
 }
 
 // sizeFixed32 returns the size of wire encoding a uint32 pointer as a Fixed32.
-func sizeFixed32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFixed32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 
 	return f.tagsize + protowire.SizeFixed32()
 }
 
 // appendFixed32 wire encodes a uint32 pointer as a Fixed32.
-func appendFixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFixed32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Uint32()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendFixed32(b, v)
@@ -3351,13 +3351,13 @@ func appendFixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]
 }
 
 // consumeFixed32 wire decodes a uint32 pointer as a Fixed32.
-func consumeFixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeFixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.Fixed32Type {
 		return out, errUnknown
 	}
 	v, n := protowire.ConsumeFixed32(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*p.Uint32() = v
 	out.n = n
@@ -3373,7 +3373,7 @@ var coderFixed32 = pointerCoderFuncs{
 
 // sizeFixed32NoZero returns the size of wire encoding a uint32 pointer as a Fixed32.
 // The zero value is not encoded.
-func sizeFixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFixed32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := *p.Uint32()
 	if v == 0 {
 		return 0
@@ -3383,7 +3383,7 @@ func sizeFixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int
 
 // appendFixed32NoZero wire encodes a uint32 pointer as a Fixed32.
 // The zero value is not encoded.
-func appendFixed32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFixed32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Uint32()
 	if v == 0 {
 		return b, nil
@@ -3402,13 +3402,13 @@ var coderFixed32NoZero = pointerCoderFuncs{
 
 // sizeFixed32Ptr returns the size of wire encoding a *uint32 pointer as a Fixed32.
 // It panics if the pointer is nil.
-func sizeFixed32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFixed32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	return f.tagsize + protowire.SizeFixed32()
 }
 
 // appendFixed32Ptr wire encodes a *uint32 pointer as a Fixed32.
 // It panics if the pointer is nil.
-func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := **p.Uint32Ptr()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendFixed32(b, v)
@@ -3416,13 +3416,13 @@ func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions)
 }
 
 // consumeFixed32Ptr wire decodes a *uint32 pointer as a Fixed32.
-func consumeFixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeFixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.Fixed32Type {
 		return out, errUnknown
 	}
 	v, n := protowire.ConsumeFixed32(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	vp := p.Uint32Ptr()
 	if *vp == nil {
@@ -3441,14 +3441,14 @@ var coderFixed32Ptr = pointerCoderFuncs{
 }
 
 // sizeFixed32Slice returns the size of wire encoding a []uint32 pointer as a repeated Fixed32.
-func sizeFixed32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFixed32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	s := *p.Uint32Slice()
 	size = len(s) * (f.tagsize + protowire.SizeFixed32())
 	return size
 }
 
 // appendFixed32Slice encodes a []uint32 pointer as a repeated Fixed32.
-func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.Uint32Slice()
 	for _, v := range s {
 		b = protowire.AppendVarint(b, f.wiretag)
@@ -3458,18 +3458,18 @@ func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions
 }
 
 // consumeFixed32Slice wire decodes a []uint32 pointer as a repeated Fixed32.
-func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	sp := p.Uint32Slice()
 	if wtyp == protowire.BytesType {
 		s := *sp
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return out, protowire.ParseError(n)
+			return out, errDecode
 		}
 		for len(b) > 0 {
 			v, n := protowire.ConsumeFixed32(b)
 			if n < 0 {
-				return out, protowire.ParseError(n)
+				return out, errDecode
 			}
 			s = append(s, v)
 			b = b[n:]
@@ -3483,7 +3483,7 @@ func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderField
 	}
 	v, n := protowire.ConsumeFixed32(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*sp = append(*sp, v)
 	out.n = n
@@ -3498,7 +3498,7 @@ var coderFixed32Slice = pointerCoderFuncs{
 }
 
 // sizeFixed32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Fixed32.
-func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	s := *p.Uint32Slice()
 	if len(s) == 0 {
 		return 0
@@ -3508,7 +3508,7 @@ func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (siz
 }
 
 // appendFixed32PackedSlice encodes a []uint32 pointer as a packed repeated Fixed32.
-func appendFixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.Uint32Slice()
 	if len(s) == 0 {
 		return b, nil
@@ -3530,25 +3530,25 @@ var coderFixed32PackedSlice = pointerCoderFuncs{
 }
 
 // sizeFixed32Value returns the size of wire encoding a uint32 value as a Fixed32.
-func sizeFixed32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeFixed32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
 	return tagsize + protowire.SizeFixed32()
 }
 
 // appendFixed32Value encodes a uint32 value as a Fixed32.
-func appendFixed32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendFixed32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	b = protowire.AppendVarint(b, wiretag)
 	b = protowire.AppendFixed32(b, uint32(v.Uint()))
 	return b, nil
 }
 
 // consumeFixed32Value decodes a uint32 value as a Fixed32.
-func consumeFixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeFixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	if wtyp != protowire.Fixed32Type {
 		return protoreflect.Value{}, out, errUnknown
 	}
 	v, n := protowire.ConsumeFixed32(b)
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	out.n = n
 	return protoreflect.ValueOfUint32(uint32(v)), out, nil
@@ -3562,14 +3562,14 @@ var coderFixed32Value = valueCoderFuncs{
 }
 
 // sizeFixed32SliceValue returns the size of wire encoding a []uint32 value as a repeated Fixed32.
-func sizeFixed32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeFixed32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	size = list.Len() * (tagsize + protowire.SizeFixed32())
 	return size
 }
 
 // appendFixed32SliceValue encodes a []uint32 value as a repeated Fixed32.
-func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	for i, llen := 0, list.Len(); i < llen; i++ {
 		v := list.Get(i)
@@ -3580,17 +3580,17 @@ func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64,
 }
 
 // consumeFixed32SliceValue wire decodes a []uint32 value as a repeated Fixed32.
-func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	list := listv.List()
 	if wtyp == protowire.BytesType {
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return protoreflect.Value{}, out, protowire.ParseError(n)
+			return protoreflect.Value{}, out, errDecode
 		}
 		for len(b) > 0 {
 			v, n := protowire.ConsumeFixed32(b)
 			if n < 0 {
-				return protoreflect.Value{}, out, protowire.ParseError(n)
+				return protoreflect.Value{}, out, errDecode
 			}
 			list.Append(protoreflect.ValueOfUint32(uint32(v)))
 			b = b[n:]
@@ -3603,7 +3603,7 @@ func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Nu
 	}
 	v, n := protowire.ConsumeFixed32(b)
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	list.Append(protoreflect.ValueOfUint32(uint32(v)))
 	out.n = n
@@ -3618,7 +3618,7 @@ var coderFixed32SliceValue = valueCoderFuncs{
 }
 
 // sizeFixed32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Fixed32.
-func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	llen := list.Len()
 	if llen == 0 {
@@ -3629,7 +3629,7 @@ func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marsha
 }
 
 // appendFixed32PackedSliceValue encodes a []uint32 value as a packed repeated Fixed32.
-func appendFixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendFixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	llen := list.Len()
 	if llen == 0 {
@@ -3653,13 +3653,13 @@ var coderFixed32PackedSliceValue = valueCoderFuncs{
 }
 
 // sizeFloat returns the size of wire encoding a float32 pointer as a Float.
-func sizeFloat(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFloat(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 
 	return f.tagsize + protowire.SizeFixed32()
 }
 
 // appendFloat wire encodes a float32 pointer as a Float.
-func appendFloat(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFloat(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Float32()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendFixed32(b, math.Float32bits(v))
@@ -3667,13 +3667,13 @@ func appendFloat(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]by
 }
 
 // consumeFloat wire decodes a float32 pointer as a Float.
-func consumeFloat(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeFloat(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.Fixed32Type {
 		return out, errUnknown
 	}
 	v, n := protowire.ConsumeFixed32(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*p.Float32() = math.Float32frombits(v)
 	out.n = n
@@ -3689,7 +3689,7 @@ var coderFloat = pointerCoderFuncs{
 
 // sizeFloatNoZero returns the size of wire encoding a float32 pointer as a Float.
 // The zero value is not encoded.
-func sizeFloatNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFloatNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := *p.Float32()
 	if v == 0 && !math.Signbit(float64(v)) {
 		return 0
@@ -3699,7 +3699,7 @@ func sizeFloatNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int)
 
 // appendFloatNoZero wire encodes a float32 pointer as a Float.
 // The zero value is not encoded.
-func appendFloatNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFloatNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Float32()
 	if v == 0 && !math.Signbit(float64(v)) {
 		return b, nil
@@ -3718,13 +3718,13 @@ var coderFloatNoZero = pointerCoderFuncs{
 
 // sizeFloatPtr returns the size of wire encoding a *float32 pointer as a Float.
 // It panics if the pointer is nil.
-func sizeFloatPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFloatPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	return f.tagsize + protowire.SizeFixed32()
 }
 
 // appendFloatPtr wire encodes a *float32 pointer as a Float.
 // It panics if the pointer is nil.
-func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := **p.Float32Ptr()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendFixed32(b, math.Float32bits(v))
@@ -3732,13 +3732,13 @@ func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([
 }
 
 // consumeFloatPtr wire decodes a *float32 pointer as a Float.
-func consumeFloatPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeFloatPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.Fixed32Type {
 		return out, errUnknown
 	}
 	v, n := protowire.ConsumeFixed32(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	vp := p.Float32Ptr()
 	if *vp == nil {
@@ -3757,14 +3757,14 @@ var coderFloatPtr = pointerCoderFuncs{
 }
 
 // sizeFloatSlice returns the size of wire encoding a []float32 pointer as a repeated Float.
-func sizeFloatSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFloatSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	s := *p.Float32Slice()
 	size = len(s) * (f.tagsize + protowire.SizeFixed32())
 	return size
 }
 
 // appendFloatSlice encodes a []float32 pointer as a repeated Float.
-func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.Float32Slice()
 	for _, v := range s {
 		b = protowire.AppendVarint(b, f.wiretag)
@@ -3774,18 +3774,18 @@ func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions)
 }
 
 // consumeFloatSlice wire decodes a []float32 pointer as a repeated Float.
-func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	sp := p.Float32Slice()
 	if wtyp == protowire.BytesType {
 		s := *sp
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return out, protowire.ParseError(n)
+			return out, errDecode
 		}
 		for len(b) > 0 {
 			v, n := protowire.ConsumeFixed32(b)
 			if n < 0 {
-				return out, protowire.ParseError(n)
+				return out, errDecode
 			}
 			s = append(s, math.Float32frombits(v))
 			b = b[n:]
@@ -3799,7 +3799,7 @@ func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldIn
 	}
 	v, n := protowire.ConsumeFixed32(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*sp = append(*sp, math.Float32frombits(v))
 	out.n = n
@@ -3814,7 +3814,7 @@ var coderFloatSlice = pointerCoderFuncs{
 }
 
 // sizeFloatPackedSlice returns the size of wire encoding a []float32 pointer as a packed repeated Float.
-func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	s := *p.Float32Slice()
 	if len(s) == 0 {
 		return 0
@@ -3824,7 +3824,7 @@ func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size
 }
 
 // appendFloatPackedSlice encodes a []float32 pointer as a packed repeated Float.
-func appendFloatPackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFloatPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.Float32Slice()
 	if len(s) == 0 {
 		return b, nil
@@ -3846,25 +3846,25 @@ var coderFloatPackedSlice = pointerCoderFuncs{
 }
 
 // sizeFloatValue returns the size of wire encoding a float32 value as a Float.
-func sizeFloatValue(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeFloatValue(v protoreflect.Value, tagsize int, opts marshalOptions) int {
 	return tagsize + protowire.SizeFixed32()
 }
 
 // appendFloatValue encodes a float32 value as a Float.
-func appendFloatValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendFloatValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	b = protowire.AppendVarint(b, wiretag)
 	b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float())))
 	return b, nil
 }
 
 // consumeFloatValue decodes a float32 value as a Float.
-func consumeFloatValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeFloatValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	if wtyp != protowire.Fixed32Type {
 		return protoreflect.Value{}, out, errUnknown
 	}
 	v, n := protowire.ConsumeFixed32(b)
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	out.n = n
 	return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), out, nil
@@ -3878,14 +3878,14 @@ var coderFloatValue = valueCoderFuncs{
 }
 
 // sizeFloatSliceValue returns the size of wire encoding a []float32 value as a repeated Float.
-func sizeFloatSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeFloatSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	size = list.Len() * (tagsize + protowire.SizeFixed32())
 	return size
 }
 
 // appendFloatSliceValue encodes a []float32 value as a repeated Float.
-func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	for i, llen := 0, list.Len(); i < llen; i++ {
 		v := list.Get(i)
@@ -3896,17 +3896,17 @@ func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _
 }
 
 // consumeFloatSliceValue wire decodes a []float32 value as a repeated Float.
-func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	list := listv.List()
 	if wtyp == protowire.BytesType {
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return protoreflect.Value{}, out, protowire.ParseError(n)
+			return protoreflect.Value{}, out, errDecode
 		}
 		for len(b) > 0 {
 			v, n := protowire.ConsumeFixed32(b)
 			if n < 0 {
-				return protoreflect.Value{}, out, protowire.ParseError(n)
+				return protoreflect.Value{}, out, errDecode
 			}
 			list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))))
 			b = b[n:]
@@ -3919,7 +3919,7 @@ func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Numb
 	}
 	v, n := protowire.ConsumeFixed32(b)
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))))
 	out.n = n
@@ -3934,7 +3934,7 @@ var coderFloatSliceValue = valueCoderFuncs{
 }
 
 // sizeFloatPackedSliceValue returns the size of wire encoding a []float32 value as a packed repeated Float.
-func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	llen := list.Len()
 	if llen == 0 {
@@ -3945,7 +3945,7 @@ func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalO
 }
 
 // appendFloatPackedSliceValue encodes a []float32 value as a packed repeated Float.
-func appendFloatPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendFloatPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	llen := list.Len()
 	if llen == 0 {
@@ -3969,13 +3969,13 @@ var coderFloatPackedSliceValue = valueCoderFuncs{
 }
 
 // sizeSfixed64 returns the size of wire encoding a int64 pointer as a Sfixed64.
-func sizeSfixed64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSfixed64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 
 	return f.tagsize + protowire.SizeFixed64()
 }
 
 // appendSfixed64 wire encodes a int64 pointer as a Sfixed64.
-func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Int64()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendFixed64(b, uint64(v))
@@ -3983,13 +3983,13 @@ func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([
 }
 
 // consumeSfixed64 wire decodes a int64 pointer as a Sfixed64.
-func consumeSfixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeSfixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.Fixed64Type {
 		return out, errUnknown
 	}
 	v, n := protowire.ConsumeFixed64(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*p.Int64() = int64(v)
 	out.n = n
@@ -4005,7 +4005,7 @@ var coderSfixed64 = pointerCoderFuncs{
 
 // sizeSfixed64NoZero returns the size of wire encoding a int64 pointer as a Sfixed64.
 // The zero value is not encoded.
-func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := *p.Int64()
 	if v == 0 {
 		return 0
@@ -4015,7 +4015,7 @@ func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size in
 
 // appendSfixed64NoZero wire encodes a int64 pointer as a Sfixed64.
 // The zero value is not encoded.
-func appendSfixed64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSfixed64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Int64()
 	if v == 0 {
 		return b, nil
@@ -4034,13 +4034,13 @@ var coderSfixed64NoZero = pointerCoderFuncs{
 
 // sizeSfixed64Ptr returns the size of wire encoding a *int64 pointer as a Sfixed64.
 // It panics if the pointer is nil.
-func sizeSfixed64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSfixed64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	return f.tagsize + protowire.SizeFixed64()
 }
 
 // appendSfixed64Ptr wire encodes a *int64 pointer as a Sfixed64.
 // It panics if the pointer is nil.
-func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := **p.Int64Ptr()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendFixed64(b, uint64(v))
@@ -4048,13 +4048,13 @@ func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions)
 }
 
 // consumeSfixed64Ptr wire decodes a *int64 pointer as a Sfixed64.
-func consumeSfixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeSfixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.Fixed64Type {
 		return out, errUnknown
 	}
 	v, n := protowire.ConsumeFixed64(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	vp := p.Int64Ptr()
 	if *vp == nil {
@@ -4073,14 +4073,14 @@ var coderSfixed64Ptr = pointerCoderFuncs{
 }
 
 // sizeSfixed64Slice returns the size of wire encoding a []int64 pointer as a repeated Sfixed64.
-func sizeSfixed64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSfixed64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	s := *p.Int64Slice()
 	size = len(s) * (f.tagsize + protowire.SizeFixed64())
 	return size
 }
 
 // appendSfixed64Slice encodes a []int64 pointer as a repeated Sfixed64.
-func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.Int64Slice()
 	for _, v := range s {
 		b = protowire.AppendVarint(b, f.wiretag)
@@ -4090,18 +4090,18 @@ func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOption
 }
 
 // consumeSfixed64Slice wire decodes a []int64 pointer as a repeated Sfixed64.
-func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	sp := p.Int64Slice()
 	if wtyp == protowire.BytesType {
 		s := *sp
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return out, protowire.ParseError(n)
+			return out, errDecode
 		}
 		for len(b) > 0 {
 			v, n := protowire.ConsumeFixed64(b)
 			if n < 0 {
-				return out, protowire.ParseError(n)
+				return out, errDecode
 			}
 			s = append(s, int64(v))
 			b = b[n:]
@@ -4115,7 +4115,7 @@ func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFiel
 	}
 	v, n := protowire.ConsumeFixed64(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*sp = append(*sp, int64(v))
 	out.n = n
@@ -4130,7 +4130,7 @@ var coderSfixed64Slice = pointerCoderFuncs{
 }
 
 // sizeSfixed64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sfixed64.
-func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	s := *p.Int64Slice()
 	if len(s) == 0 {
 		return 0
@@ -4140,7 +4140,7 @@ func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (si
 }
 
 // appendSfixed64PackedSlice encodes a []int64 pointer as a packed repeated Sfixed64.
-func appendSfixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendSfixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.Int64Slice()
 	if len(s) == 0 {
 		return b, nil
@@ -4162,25 +4162,25 @@ var coderSfixed64PackedSlice = pointerCoderFuncs{
 }
 
 // sizeSfixed64Value returns the size of wire encoding a int64 value as a Sfixed64.
-func sizeSfixed64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeSfixed64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
 	return tagsize + protowire.SizeFixed64()
 }
 
 // appendSfixed64Value encodes a int64 value as a Sfixed64.
-func appendSfixed64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendSfixed64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	b = protowire.AppendVarint(b, wiretag)
 	b = protowire.AppendFixed64(b, uint64(v.Int()))
 	return b, nil
 }
 
 // consumeSfixed64Value decodes a int64 value as a Sfixed64.
-func consumeSfixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeSfixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	if wtyp != protowire.Fixed64Type {
 		return protoreflect.Value{}, out, errUnknown
 	}
 	v, n := protowire.ConsumeFixed64(b)
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	out.n = n
 	return protoreflect.ValueOfInt64(int64(v)), out, nil
@@ -4194,14 +4194,14 @@ var coderSfixed64Value = valueCoderFuncs{
 }
 
 // sizeSfixed64SliceValue returns the size of wire encoding a []int64 value as a repeated Sfixed64.
-func sizeSfixed64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeSfixed64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	size = list.Len() * (tagsize + protowire.SizeFixed64())
 	return size
 }
 
 // appendSfixed64SliceValue encodes a []int64 value as a repeated Sfixed64.
-func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	for i, llen := 0, list.Len(); i < llen; i++ {
 		v := list.Get(i)
@@ -4212,17 +4212,17 @@ func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64
 }
 
 // consumeSfixed64SliceValue wire decodes a []int64 value as a repeated Sfixed64.
-func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	list := listv.List()
 	if wtyp == protowire.BytesType {
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return protoreflect.Value{}, out, protowire.ParseError(n)
+			return protoreflect.Value{}, out, errDecode
 		}
 		for len(b) > 0 {
 			v, n := protowire.ConsumeFixed64(b)
 			if n < 0 {
-				return protoreflect.Value{}, out, protowire.ParseError(n)
+				return protoreflect.Value{}, out, errDecode
 			}
 			list.Append(protoreflect.ValueOfInt64(int64(v)))
 			b = b[n:]
@@ -4235,7 +4235,7 @@ func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.N
 	}
 	v, n := protowire.ConsumeFixed64(b)
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	list.Append(protoreflect.ValueOfInt64(int64(v)))
 	out.n = n
@@ -4250,7 +4250,7 @@ var coderSfixed64SliceValue = valueCoderFuncs{
 }
 
 // sizeSfixed64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sfixed64.
-func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	llen := list.Len()
 	if llen == 0 {
@@ -4261,7 +4261,7 @@ func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marsh
 }
 
 // appendSfixed64PackedSliceValue encodes a []int64 value as a packed repeated Sfixed64.
-func appendSfixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendSfixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	llen := list.Len()
 	if llen == 0 {
@@ -4285,13 +4285,13 @@ var coderSfixed64PackedSliceValue = valueCoderFuncs{
 }
 
 // sizeFixed64 returns the size of wire encoding a uint64 pointer as a Fixed64.
-func sizeFixed64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFixed64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 
 	return f.tagsize + protowire.SizeFixed64()
 }
 
 // appendFixed64 wire encodes a uint64 pointer as a Fixed64.
-func appendFixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFixed64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Uint64()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendFixed64(b, v)
@@ -4299,13 +4299,13 @@ func appendFixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]
 }
 
 // consumeFixed64 wire decodes a uint64 pointer as a Fixed64.
-func consumeFixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeFixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.Fixed64Type {
 		return out, errUnknown
 	}
 	v, n := protowire.ConsumeFixed64(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*p.Uint64() = v
 	out.n = n
@@ -4321,7 +4321,7 @@ var coderFixed64 = pointerCoderFuncs{
 
 // sizeFixed64NoZero returns the size of wire encoding a uint64 pointer as a Fixed64.
 // The zero value is not encoded.
-func sizeFixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFixed64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := *p.Uint64()
 	if v == 0 {
 		return 0
@@ -4331,7 +4331,7 @@ func sizeFixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int
 
 // appendFixed64NoZero wire encodes a uint64 pointer as a Fixed64.
 // The zero value is not encoded.
-func appendFixed64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFixed64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Uint64()
 	if v == 0 {
 		return b, nil
@@ -4350,13 +4350,13 @@ var coderFixed64NoZero = pointerCoderFuncs{
 
 // sizeFixed64Ptr returns the size of wire encoding a *uint64 pointer as a Fixed64.
 // It panics if the pointer is nil.
-func sizeFixed64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFixed64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	return f.tagsize + protowire.SizeFixed64()
 }
 
 // appendFixed64Ptr wire encodes a *uint64 pointer as a Fixed64.
 // It panics if the pointer is nil.
-func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := **p.Uint64Ptr()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendFixed64(b, v)
@@ -4364,13 +4364,13 @@ func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions)
 }
 
 // consumeFixed64Ptr wire decodes a *uint64 pointer as a Fixed64.
-func consumeFixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeFixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.Fixed64Type {
 		return out, errUnknown
 	}
 	v, n := protowire.ConsumeFixed64(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	vp := p.Uint64Ptr()
 	if *vp == nil {
@@ -4389,14 +4389,14 @@ var coderFixed64Ptr = pointerCoderFuncs{
 }
 
 // sizeFixed64Slice returns the size of wire encoding a []uint64 pointer as a repeated Fixed64.
-func sizeFixed64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFixed64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	s := *p.Uint64Slice()
 	size = len(s) * (f.tagsize + protowire.SizeFixed64())
 	return size
 }
 
 // appendFixed64Slice encodes a []uint64 pointer as a repeated Fixed64.
-func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.Uint64Slice()
 	for _, v := range s {
 		b = protowire.AppendVarint(b, f.wiretag)
@@ -4406,18 +4406,18 @@ func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions
 }
 
 // consumeFixed64Slice wire decodes a []uint64 pointer as a repeated Fixed64.
-func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	sp := p.Uint64Slice()
 	if wtyp == protowire.BytesType {
 		s := *sp
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return out, protowire.ParseError(n)
+			return out, errDecode
 		}
 		for len(b) > 0 {
 			v, n := protowire.ConsumeFixed64(b)
 			if n < 0 {
-				return out, protowire.ParseError(n)
+				return out, errDecode
 			}
 			s = append(s, v)
 			b = b[n:]
@@ -4431,7 +4431,7 @@ func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderField
 	}
 	v, n := protowire.ConsumeFixed64(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*sp = append(*sp, v)
 	out.n = n
@@ -4446,7 +4446,7 @@ var coderFixed64Slice = pointerCoderFuncs{
 }
 
 // sizeFixed64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Fixed64.
-func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	s := *p.Uint64Slice()
 	if len(s) == 0 {
 		return 0
@@ -4456,7 +4456,7 @@ func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (siz
 }
 
 // appendFixed64PackedSlice encodes a []uint64 pointer as a packed repeated Fixed64.
-func appendFixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendFixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.Uint64Slice()
 	if len(s) == 0 {
 		return b, nil
@@ -4478,25 +4478,25 @@ var coderFixed64PackedSlice = pointerCoderFuncs{
 }
 
 // sizeFixed64Value returns the size of wire encoding a uint64 value as a Fixed64.
-func sizeFixed64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeFixed64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int {
 	return tagsize + protowire.SizeFixed64()
 }
 
 // appendFixed64Value encodes a uint64 value as a Fixed64.
-func appendFixed64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendFixed64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	b = protowire.AppendVarint(b, wiretag)
 	b = protowire.AppendFixed64(b, v.Uint())
 	return b, nil
 }
 
 // consumeFixed64Value decodes a uint64 value as a Fixed64.
-func consumeFixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeFixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	if wtyp != protowire.Fixed64Type {
 		return protoreflect.Value{}, out, errUnknown
 	}
 	v, n := protowire.ConsumeFixed64(b)
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	out.n = n
 	return protoreflect.ValueOfUint64(v), out, nil
@@ -4510,14 +4510,14 @@ var coderFixed64Value = valueCoderFuncs{
 }
 
 // sizeFixed64SliceValue returns the size of wire encoding a []uint64 value as a repeated Fixed64.
-func sizeFixed64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeFixed64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	size = list.Len() * (tagsize + protowire.SizeFixed64())
 	return size
 }
 
 // appendFixed64SliceValue encodes a []uint64 value as a repeated Fixed64.
-func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	for i, llen := 0, list.Len(); i < llen; i++ {
 		v := list.Get(i)
@@ -4528,17 +4528,17 @@ func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64,
 }
 
 // consumeFixed64SliceValue wire decodes a []uint64 value as a repeated Fixed64.
-func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	list := listv.List()
 	if wtyp == protowire.BytesType {
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return protoreflect.Value{}, out, protowire.ParseError(n)
+			return protoreflect.Value{}, out, errDecode
 		}
 		for len(b) > 0 {
 			v, n := protowire.ConsumeFixed64(b)
 			if n < 0 {
-				return protoreflect.Value{}, out, protowire.ParseError(n)
+				return protoreflect.Value{}, out, errDecode
 			}
 			list.Append(protoreflect.ValueOfUint64(v))
 			b = b[n:]
@@ -4551,7 +4551,7 @@ func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Nu
 	}
 	v, n := protowire.ConsumeFixed64(b)
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	list.Append(protoreflect.ValueOfUint64(v))
 	out.n = n
@@ -4566,7 +4566,7 @@ var coderFixed64SliceValue = valueCoderFuncs{
 }
 
 // sizeFixed64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Fixed64.
-func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	llen := list.Len()
 	if llen == 0 {
@@ -4577,7 +4577,7 @@ func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marsha
 }
 
 // appendFixed64PackedSliceValue encodes a []uint64 value as a packed repeated Fixed64.
-func appendFixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendFixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	llen := list.Len()
 	if llen == 0 {
@@ -4601,13 +4601,13 @@ var coderFixed64PackedSliceValue = valueCoderFuncs{
 }
 
 // sizeDouble returns the size of wire encoding a float64 pointer as a Double.
-func sizeDouble(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeDouble(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 
 	return f.tagsize + protowire.SizeFixed64()
 }
 
 // appendDouble wire encodes a float64 pointer as a Double.
-func appendDouble(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendDouble(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Float64()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendFixed64(b, math.Float64bits(v))
@@ -4615,13 +4615,13 @@ func appendDouble(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b
 }
 
 // consumeDouble wire decodes a float64 pointer as a Double.
-func consumeDouble(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeDouble(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.Fixed64Type {
 		return out, errUnknown
 	}
 	v, n := protowire.ConsumeFixed64(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*p.Float64() = math.Float64frombits(v)
 	out.n = n
@@ -4637,7 +4637,7 @@ var coderDouble = pointerCoderFuncs{
 
 // sizeDoubleNoZero returns the size of wire encoding a float64 pointer as a Double.
 // The zero value is not encoded.
-func sizeDoubleNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeDoubleNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := *p.Float64()
 	if v == 0 && !math.Signbit(float64(v)) {
 		return 0
@@ -4647,7 +4647,7 @@ func sizeDoubleNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int)
 
 // appendDoubleNoZero wire encodes a float64 pointer as a Double.
 // The zero value is not encoded.
-func appendDoubleNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendDoubleNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Float64()
 	if v == 0 && !math.Signbit(float64(v)) {
 		return b, nil
@@ -4666,13 +4666,13 @@ var coderDoubleNoZero = pointerCoderFuncs{
 
 // sizeDoublePtr returns the size of wire encoding a *float64 pointer as a Double.
 // It panics if the pointer is nil.
-func sizeDoublePtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeDoublePtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	return f.tagsize + protowire.SizeFixed64()
 }
 
 // appendDoublePtr wire encodes a *float64 pointer as a Double.
 // It panics if the pointer is nil.
-func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := **p.Float64Ptr()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendFixed64(b, math.Float64bits(v))
@@ -4680,13 +4680,13 @@ func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) (
 }
 
 // consumeDoublePtr wire decodes a *float64 pointer as a Double.
-func consumeDoublePtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeDoublePtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.Fixed64Type {
 		return out, errUnknown
 	}
 	v, n := protowire.ConsumeFixed64(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	vp := p.Float64Ptr()
 	if *vp == nil {
@@ -4705,14 +4705,14 @@ var coderDoublePtr = pointerCoderFuncs{
 }
 
 // sizeDoubleSlice returns the size of wire encoding a []float64 pointer as a repeated Double.
-func sizeDoubleSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeDoubleSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	s := *p.Float64Slice()
 	size = len(s) * (f.tagsize + protowire.SizeFixed64())
 	return size
 }
 
 // appendDoubleSlice encodes a []float64 pointer as a repeated Double.
-func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.Float64Slice()
 	for _, v := range s {
 		b = protowire.AppendVarint(b, f.wiretag)
@@ -4722,18 +4722,18 @@ func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions)
 }
 
 // consumeDoubleSlice wire decodes a []float64 pointer as a repeated Double.
-func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	sp := p.Float64Slice()
 	if wtyp == protowire.BytesType {
 		s := *sp
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return out, protowire.ParseError(n)
+			return out, errDecode
 		}
 		for len(b) > 0 {
 			v, n := protowire.ConsumeFixed64(b)
 			if n < 0 {
-				return out, protowire.ParseError(n)
+				return out, errDecode
 			}
 			s = append(s, math.Float64frombits(v))
 			b = b[n:]
@@ -4747,7 +4747,7 @@ func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldI
 	}
 	v, n := protowire.ConsumeFixed64(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*sp = append(*sp, math.Float64frombits(v))
 	out.n = n
@@ -4762,7 +4762,7 @@ var coderDoubleSlice = pointerCoderFuncs{
 }
 
 // sizeDoublePackedSlice returns the size of wire encoding a []float64 pointer as a packed repeated Double.
-func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	s := *p.Float64Slice()
 	if len(s) == 0 {
 		return 0
@@ -4772,7 +4772,7 @@ func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size
 }
 
 // appendDoublePackedSlice encodes a []float64 pointer as a packed repeated Double.
-func appendDoublePackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendDoublePackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.Float64Slice()
 	if len(s) == 0 {
 		return b, nil
@@ -4794,25 +4794,25 @@ var coderDoublePackedSlice = pointerCoderFuncs{
 }
 
 // sizeDoubleValue returns the size of wire encoding a float64 value as a Double.
-func sizeDoubleValue(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeDoubleValue(v protoreflect.Value, tagsize int, opts marshalOptions) int {
 	return tagsize + protowire.SizeFixed64()
 }
 
 // appendDoubleValue encodes a float64 value as a Double.
-func appendDoubleValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendDoubleValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	b = protowire.AppendVarint(b, wiretag)
 	b = protowire.AppendFixed64(b, math.Float64bits(v.Float()))
 	return b, nil
 }
 
 // consumeDoubleValue decodes a float64 value as a Double.
-func consumeDoubleValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeDoubleValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	if wtyp != protowire.Fixed64Type {
 		return protoreflect.Value{}, out, errUnknown
 	}
 	v, n := protowire.ConsumeFixed64(b)
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	out.n = n
 	return protoreflect.ValueOfFloat64(math.Float64frombits(v)), out, nil
@@ -4826,14 +4826,14 @@ var coderDoubleValue = valueCoderFuncs{
 }
 
 // sizeDoubleSliceValue returns the size of wire encoding a []float64 value as a repeated Double.
-func sizeDoubleSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeDoubleSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	size = list.Len() * (tagsize + protowire.SizeFixed64())
 	return size
 }
 
 // appendDoubleSliceValue encodes a []float64 value as a repeated Double.
-func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	for i, llen := 0, list.Len(); i < llen; i++ {
 		v := list.Get(i)
@@ -4844,17 +4844,17 @@ func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64,
 }
 
 // consumeDoubleSliceValue wire decodes a []float64 value as a repeated Double.
-func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	list := listv.List()
 	if wtyp == protowire.BytesType {
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return protoreflect.Value{}, out, protowire.ParseError(n)
+			return protoreflect.Value{}, out, errDecode
 		}
 		for len(b) > 0 {
 			v, n := protowire.ConsumeFixed64(b)
 			if n < 0 {
-				return protoreflect.Value{}, out, protowire.ParseError(n)
+				return protoreflect.Value{}, out, errDecode
 			}
 			list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v)))
 			b = b[n:]
@@ -4867,7 +4867,7 @@ func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Num
 	}
 	v, n := protowire.ConsumeFixed64(b)
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v)))
 	out.n = n
@@ -4882,7 +4882,7 @@ var coderDoubleSliceValue = valueCoderFuncs{
 }
 
 // sizeDoublePackedSliceValue returns the size of wire encoding a []float64 value as a packed repeated Double.
-func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	llen := list.Len()
 	if llen == 0 {
@@ -4893,7 +4893,7 @@ func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, _ marshal
 }
 
 // appendDoublePackedSliceValue encodes a []float64 value as a packed repeated Double.
-func appendDoublePackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendDoublePackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	llen := list.Len()
 	if llen == 0 {
@@ -4917,13 +4917,13 @@ var coderDoublePackedSliceValue = valueCoderFuncs{
 }
 
 // sizeString returns the size of wire encoding a string pointer as a String.
-func sizeString(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeString(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := *p.String()
 	return f.tagsize + protowire.SizeBytes(len(v))
 }
 
 // appendString wire encodes a string pointer as a String.
-func appendString(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendString(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.String()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendString(b, v)
@@ -4931,15 +4931,15 @@ func appendString(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]b
 }
 
 // consumeString wire decodes a string pointer as a String.
-func consumeString(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeString(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.BytesType {
 		return out, errUnknown
 	}
-	v, n := protowire.ConsumeString(b)
+	v, n := protowire.ConsumeBytes(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
-	*p.String() = v
+	*p.String() = string(v)
 	out.n = n
 	return out, nil
 }
@@ -4952,7 +4952,7 @@ var coderString = pointerCoderFuncs{
 }
 
 // appendStringValidateUTF8 wire encodes a string pointer as a String.
-func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.String()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendString(b, v)
@@ -4963,18 +4963,18 @@ func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalO
 }
 
 // consumeStringValidateUTF8 wire decodes a string pointer as a String.
-func consumeStringValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeStringValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.BytesType {
 		return out, errUnknown
 	}
-	v, n := protowire.ConsumeString(b)
+	v, n := protowire.ConsumeBytes(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
-	if !utf8.ValidString(v) {
+	if !utf8.Valid(v) {
 		return out, errInvalidUTF8{}
 	}
-	*p.String() = v
+	*p.String() = string(v)
 	out.n = n
 	return out, nil
 }
@@ -4988,7 +4988,7 @@ var coderStringValidateUTF8 = pointerCoderFuncs{
 
 // sizeStringNoZero returns the size of wire encoding a string pointer as a String.
 // The zero value is not encoded.
-func sizeStringNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeStringNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := *p.String()
 	if len(v) == 0 {
 		return 0
@@ -4998,7 +4998,7 @@ func sizeStringNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int)
 
 // appendStringNoZero wire encodes a string pointer as a String.
 // The zero value is not encoded.
-func appendStringNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendStringNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.String()
 	if len(v) == 0 {
 		return b, nil
@@ -5017,7 +5017,7 @@ var coderStringNoZero = pointerCoderFuncs{
 
 // appendStringNoZeroValidateUTF8 wire encodes a string pointer as a String.
 // The zero value is not encoded.
-func appendStringNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendStringNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.String()
 	if len(v) == 0 {
 		return b, nil
@@ -5039,14 +5039,14 @@ var coderStringNoZeroValidateUTF8 = pointerCoderFuncs{
 
 // sizeStringPtr returns the size of wire encoding a *string pointer as a String.
 // It panics if the pointer is nil.
-func sizeStringPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeStringPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := **p.StringPtr()
 	return f.tagsize + protowire.SizeBytes(len(v))
 }
 
 // appendStringPtr wire encodes a *string pointer as a String.
 // It panics if the pointer is nil.
-func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := **p.StringPtr()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendString(b, v)
@@ -5054,19 +5054,19 @@ func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) (
 }
 
 // consumeStringPtr wire decodes a *string pointer as a String.
-func consumeStringPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeStringPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.BytesType {
 		return out, errUnknown
 	}
-	v, n := protowire.ConsumeString(b)
+	v, n := protowire.ConsumeBytes(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	vp := p.StringPtr()
 	if *vp == nil {
 		*vp = new(string)
 	}
-	**vp = v
+	**vp = string(v)
 	out.n = n
 	return out, nil
 }
@@ -5080,7 +5080,7 @@ var coderStringPtr = pointerCoderFuncs{
 
 // appendStringPtrValidateUTF8 wire encodes a *string pointer as a String.
 // It panics if the pointer is nil.
-func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := **p.StringPtr()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendString(b, v)
@@ -5091,22 +5091,22 @@ func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marsh
 }
 
 // consumeStringPtrValidateUTF8 wire decodes a *string pointer as a String.
-func consumeStringPtrValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeStringPtrValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.BytesType {
 		return out, errUnknown
 	}
-	v, n := protowire.ConsumeString(b)
+	v, n := protowire.ConsumeBytes(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
-	if !utf8.ValidString(v) {
+	if !utf8.Valid(v) {
 		return out, errInvalidUTF8{}
 	}
 	vp := p.StringPtr()
 	if *vp == nil {
 		*vp = new(string)
 	}
-	**vp = v
+	**vp = string(v)
 	out.n = n
 	return out, nil
 }
@@ -5119,7 +5119,7 @@ var coderStringPtrValidateUTF8 = pointerCoderFuncs{
 }
 
 // sizeStringSlice returns the size of wire encoding a []string pointer as a repeated String.
-func sizeStringSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeStringSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	s := *p.StringSlice()
 	for _, v := range s {
 		size += f.tagsize + protowire.SizeBytes(len(v))
@@ -5128,7 +5128,7 @@ func sizeStringSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int)
 }
 
 // appendStringSlice encodes a []string pointer as a repeated String.
-func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.StringSlice()
 	for _, v := range s {
 		b = protowire.AppendVarint(b, f.wiretag)
@@ -5138,16 +5138,16 @@ func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions)
 }
 
 // consumeStringSlice wire decodes a []string pointer as a repeated String.
-func consumeStringSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeStringSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	sp := p.StringSlice()
 	if wtyp != protowire.BytesType {
 		return out, errUnknown
 	}
-	v, n := protowire.ConsumeString(b)
+	v, n := protowire.ConsumeBytes(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
-	*sp = append(*sp, v)
+	*sp = append(*sp, string(v))
 	out.n = n
 	return out, nil
 }
@@ -5160,7 +5160,7 @@ var coderStringSlice = pointerCoderFuncs{
 }
 
 // appendStringSliceValidateUTF8 encodes a []string pointer as a repeated String.
-func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.StringSlice()
 	for _, v := range s {
 		b = protowire.AppendVarint(b, f.wiretag)
@@ -5173,19 +5173,19 @@ func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ mar
 }
 
 // consumeStringSliceValidateUTF8 wire decodes a []string pointer as a repeated String.
-func consumeStringSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
-	sp := p.StringSlice()
+func consumeStringSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.BytesType {
 		return out, errUnknown
 	}
-	v, n := protowire.ConsumeString(b)
+	v, n := protowire.ConsumeBytes(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
-	if !utf8.ValidString(v) {
+	if !utf8.Valid(v) {
 		return out, errInvalidUTF8{}
 	}
-	*sp = append(*sp, v)
+	sp := p.StringSlice()
+	*sp = append(*sp, string(v))
 	out.n = n
 	return out, nil
 }
@@ -5198,25 +5198,25 @@ var coderStringSliceValidateUTF8 = pointerCoderFuncs{
 }
 
 // sizeStringValue returns the size of wire encoding a string value as a String.
-func sizeStringValue(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeStringValue(v protoreflect.Value, tagsize int, opts marshalOptions) int {
 	return tagsize + protowire.SizeBytes(len(v.String()))
 }
 
 // appendStringValue encodes a string value as a String.
-func appendStringValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendStringValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	b = protowire.AppendVarint(b, wiretag)
 	b = protowire.AppendString(b, v.String())
 	return b, nil
 }
 
 // consumeStringValue decodes a string value as a String.
-func consumeStringValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeStringValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	if wtyp != protowire.BytesType {
 		return protoreflect.Value{}, out, errUnknown
 	}
-	v, n := protowire.ConsumeString(b)
+	v, n := protowire.ConsumeBytes(b)
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	out.n = n
 	return protoreflect.ValueOfString(string(v)), out, nil
@@ -5230,7 +5230,7 @@ var coderStringValue = valueCoderFuncs{
 }
 
 // appendStringValueValidateUTF8 encodes a string value as a String.
-func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	b = protowire.AppendVarint(b, wiretag)
 	b = protowire.AppendString(b, v.String())
 	if !utf8.ValidString(v.String()) {
@@ -5240,15 +5240,15 @@ func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint6
 }
 
 // consumeStringValueValidateUTF8 decodes a string value as a String.
-func consumeStringValueValidateUTF8(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeStringValueValidateUTF8(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	if wtyp != protowire.BytesType {
 		return protoreflect.Value{}, out, errUnknown
 	}
-	v, n := protowire.ConsumeString(b)
+	v, n := protowire.ConsumeBytes(b)
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
-	if !utf8.ValidString(v) {
+	if !utf8.Valid(v) {
 		return protoreflect.Value{}, out, errInvalidUTF8{}
 	}
 	out.n = n
@@ -5263,7 +5263,7 @@ var coderStringValueValidateUTF8 = valueCoderFuncs{
 }
 
 // sizeStringSliceValue returns the size of wire encoding a []string value as a repeated String.
-func sizeStringSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeStringSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	for i, llen := 0, list.Len(); i < llen; i++ {
 		v := list.Get(i)
@@ -5273,7 +5273,7 @@ func sizeStringSliceValue(listv protoreflect.Value, tagsize int, _ marshalOption
 }
 
 // appendStringSliceValue encodes a []string value as a repeated String.
-func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	for i, llen := 0, list.Len(); i < llen; i++ {
 		v := list.Get(i)
@@ -5284,14 +5284,14 @@ func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64,
 }
 
 // consumeStringSliceValue wire decodes a []string value as a repeated String.
-func consumeStringSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeStringSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	list := listv.List()
 	if wtyp != protowire.BytesType {
 		return protoreflect.Value{}, out, errUnknown
 	}
-	v, n := protowire.ConsumeString(b)
+	v, n := protowire.ConsumeBytes(b)
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	list.Append(protoreflect.ValueOfString(string(v)))
 	out.n = n
@@ -5306,13 +5306,13 @@ var coderStringSliceValue = valueCoderFuncs{
 }
 
 // sizeBytes returns the size of wire encoding a []byte pointer as a Bytes.
-func sizeBytes(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeBytes(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := *p.Bytes()
 	return f.tagsize + protowire.SizeBytes(len(v))
 }
 
 // appendBytes wire encodes a []byte pointer as a Bytes.
-func appendBytes(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendBytes(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Bytes()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendBytes(b, v)
@@ -5320,13 +5320,13 @@ func appendBytes(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]by
 }
 
 // consumeBytes wire decodes a []byte pointer as a Bytes.
-func consumeBytes(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeBytes(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.BytesType {
 		return out, errUnknown
 	}
 	v, n := protowire.ConsumeBytes(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*p.Bytes() = append(emptyBuf[:], v...)
 	out.n = n
@@ -5341,7 +5341,7 @@ var coderBytes = pointerCoderFuncs{
 }
 
 // appendBytesValidateUTF8 wire encodes a []byte pointer as a Bytes.
-func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Bytes()
 	b = protowire.AppendVarint(b, f.wiretag)
 	b = protowire.AppendBytes(b, v)
@@ -5352,13 +5352,13 @@ func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOp
 }
 
 // consumeBytesValidateUTF8 wire decodes a []byte pointer as a Bytes.
-func consumeBytesValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeBytesValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.BytesType {
 		return out, errUnknown
 	}
 	v, n := protowire.ConsumeBytes(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	if !utf8.Valid(v) {
 		return out, errInvalidUTF8{}
@@ -5377,7 +5377,7 @@ var coderBytesValidateUTF8 = pointerCoderFuncs{
 
 // sizeBytesNoZero returns the size of wire encoding a []byte pointer as a Bytes.
 // The zero value is not encoded.
-func sizeBytesNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeBytesNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	v := *p.Bytes()
 	if len(v) == 0 {
 		return 0
@@ -5387,7 +5387,7 @@ func sizeBytesNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int)
 
 // appendBytesNoZero wire encodes a []byte pointer as a Bytes.
 // The zero value is not encoded.
-func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Bytes()
 	if len(v) == 0 {
 		return b, nil
@@ -5399,13 +5399,13 @@ func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions)
 
 // consumeBytesNoZero wire decodes a []byte pointer as a Bytes.
 // The zero value is not decoded.
-func consumeBytesNoZero(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeBytesNoZero(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.BytesType {
 		return out, errUnknown
 	}
 	v, n := protowire.ConsumeBytes(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*p.Bytes() = append(([]byte)(nil), v...)
 	out.n = n
@@ -5421,7 +5421,7 @@ var coderBytesNoZero = pointerCoderFuncs{
 
 // appendBytesNoZeroValidateUTF8 wire encodes a []byte pointer as a Bytes.
 // The zero value is not encoded.
-func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	v := *p.Bytes()
 	if len(v) == 0 {
 		return b, nil
@@ -5435,13 +5435,13 @@ func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ mar
 }
 
 // consumeBytesNoZeroValidateUTF8 wire decodes a []byte pointer as a Bytes.
-func consumeBytesNoZeroValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeBytesNoZeroValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.BytesType {
 		return out, errUnknown
 	}
 	v, n := protowire.ConsumeBytes(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	if !utf8.Valid(v) {
 		return out, errInvalidUTF8{}
@@ -5459,7 +5459,7 @@ var coderBytesNoZeroValidateUTF8 = pointerCoderFuncs{
 }
 
 // sizeBytesSlice returns the size of wire encoding a [][]byte pointer as a repeated Bytes.
-func sizeBytesSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+func sizeBytesSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
 	s := *p.BytesSlice()
 	for _, v := range s {
 		size += f.tagsize + protowire.SizeBytes(len(v))
@@ -5468,7 +5468,7 @@ func sizeBytesSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
 }
 
 // appendBytesSlice encodes a [][]byte pointer as a repeated Bytes.
-func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.BytesSlice()
 	for _, v := range s {
 		b = protowire.AppendVarint(b, f.wiretag)
@@ -5478,14 +5478,14 @@ func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions)
 }
 
 // consumeBytesSlice wire decodes a [][]byte pointer as a repeated Bytes.
-func consumeBytesSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+func consumeBytesSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	sp := p.BytesSlice()
 	if wtyp != protowire.BytesType {
 		return out, errUnknown
 	}
 	v, n := protowire.ConsumeBytes(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	*sp = append(*sp, append(emptyBuf[:], v...))
 	out.n = n
@@ -5500,7 +5500,7 @@ var coderBytesSlice = pointerCoderFuncs{
 }
 
 // appendBytesSliceValidateUTF8 encodes a [][]byte pointer as a repeated Bytes.
-func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
 	s := *p.BytesSlice()
 	for _, v := range s {
 		b = protowire.AppendVarint(b, f.wiretag)
@@ -5513,18 +5513,18 @@ func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ mars
 }
 
 // consumeBytesSliceValidateUTF8 wire decodes a [][]byte pointer as a repeated Bytes.
-func consumeBytesSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
-	sp := p.BytesSlice()
+func consumeBytesSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	if wtyp != protowire.BytesType {
 		return out, errUnknown
 	}
 	v, n := protowire.ConsumeBytes(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	if !utf8.Valid(v) {
 		return out, errInvalidUTF8{}
 	}
+	sp := p.BytesSlice()
 	*sp = append(*sp, append(emptyBuf[:], v...))
 	out.n = n
 	return out, nil
@@ -5538,25 +5538,25 @@ var coderBytesSliceValidateUTF8 = pointerCoderFuncs{
 }
 
 // sizeBytesValue returns the size of wire encoding a []byte value as a Bytes.
-func sizeBytesValue(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+func sizeBytesValue(v protoreflect.Value, tagsize int, opts marshalOptions) int {
 	return tagsize + protowire.SizeBytes(len(v.Bytes()))
 }
 
 // appendBytesValue encodes a []byte value as a Bytes.
-func appendBytesValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendBytesValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	b = protowire.AppendVarint(b, wiretag)
 	b = protowire.AppendBytes(b, v.Bytes())
 	return b, nil
 }
 
 // consumeBytesValue decodes a []byte value as a Bytes.
-func consumeBytesValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeBytesValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	if wtyp != protowire.BytesType {
 		return protoreflect.Value{}, out, errUnknown
 	}
 	v, n := protowire.ConsumeBytes(b)
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	out.n = n
 	return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), out, nil
@@ -5570,7 +5570,7 @@ var coderBytesValue = valueCoderFuncs{
 }
 
 // sizeBytesSliceValue returns the size of wire encoding a [][]byte value as a repeated Bytes.
-func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) {
 	list := listv.List()
 	for i, llen := 0, list.Len(); i < llen; i++ {
 		v := list.Get(i)
@@ -5580,7 +5580,7 @@ func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions
 }
 
 // appendBytesSliceValue encodes a [][]byte value as a repeated Bytes.
-func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
 	list := listv.List()
 	for i, llen := 0, list.Len(); i < llen; i++ {
 		v := list.Get(i)
@@ -5591,14 +5591,14 @@ func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _
 }
 
 // consumeBytesSliceValue wire decodes a [][]byte value as a repeated Bytes.
-func consumeBytesSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+func consumeBytesSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
 	list := listv.List()
 	if wtyp != protowire.BytesType {
 		return protoreflect.Value{}, out, errUnknown
 	}
 	v, n := protowire.ConsumeBytes(b)
 	if n < 0 {
-		return protoreflect.Value{}, out, protowire.ParseError(n)
+		return protoreflect.Value{}, out, errDecode
 	}
 	list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...)))
 	out.n = n
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
index 44885a761..c1245fef4 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
@@ -5,7 +5,6 @@
 package impl
 
 import (
-	"errors"
 	"reflect"
 	"sort"
 
@@ -118,7 +117,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo
 	}
 	b, n := protowire.ConsumeBytes(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	var (
 		key = mapi.keyZero
@@ -127,10 +126,10 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo
 	for len(b) > 0 {
 		num, wtyp, n := protowire.ConsumeTag(b)
 		if n < 0 {
-			return out, protowire.ParseError(n)
+			return out, errDecode
 		}
 		if num > protowire.MaxValidNumber {
-			return out, errors.New("invalid field number")
+			return out, errDecode
 		}
 		b = b[n:]
 		err := errUnknown
@@ -157,7 +156,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo
 		if err == errUnknown {
 			n = protowire.ConsumeFieldValue(num, wtyp, b)
 			if n < 0 {
-				return out, protowire.ParseError(n)
+				return out, errDecode
 			}
 		} else if err != nil {
 			return out, err
@@ -175,7 +174,7 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi
 	}
 	b, n := protowire.ConsumeBytes(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	var (
 		key = mapi.keyZero
@@ -184,10 +183,10 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi
 	for len(b) > 0 {
 		num, wtyp, n := protowire.ConsumeTag(b)
 		if n < 0 {
-			return out, protowire.ParseError(n)
+			return out, errDecode
 		}
 		if num > protowire.MaxValidNumber {
-			return out, errors.New("invalid field number")
+			return out, errDecode
 		}
 		b = b[n:]
 		err := errUnknown
@@ -208,7 +207,7 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi
 			var v []byte
 			v, n = protowire.ConsumeBytes(b)
 			if n < 0 {
-				return out, protowire.ParseError(n)
+				return out, errDecode
 			}
 			var o unmarshalOutput
 			o, err = f.mi.unmarshalPointer(v, pointerOfValue(val), 0, opts)
@@ -221,7 +220,7 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi
 		if err == errUnknown {
 			n = protowire.ConsumeFieldValue(num, wtyp, b)
 			if n < 0 {
-				return out, protowire.ParseError(n)
+				return out, errDecode
 			}
 		} else if err != nil {
 			return out, err
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
index 0e176d565..cd40527ff 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
@@ -11,7 +11,7 @@ import (
 
 	"google.golang.org/protobuf/encoding/protowire"
 	"google.golang.org/protobuf/internal/encoding/messageset"
-	"google.golang.org/protobuf/internal/fieldsort"
+	"google.golang.org/protobuf/internal/order"
 	pref "google.golang.org/protobuf/reflect/protoreflect"
 	piface "google.golang.org/protobuf/runtime/protoiface"
 )
@@ -27,6 +27,7 @@ type coderMessageInfo struct {
 	coderFields        map[protowire.Number]*coderFieldInfo
 	sizecacheOffset    offset
 	unknownOffset      offset
+	unknownPtrKind     bool
 	extensionOffset    offset
 	needsInitCheck     bool
 	isMessageSet       bool
@@ -47,9 +48,20 @@ type coderFieldInfo struct {
 }
 
 func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
-	mi.sizecacheOffset = si.sizecacheOffset
-	mi.unknownOffset = si.unknownOffset
-	mi.extensionOffset = si.extensionOffset
+	mi.sizecacheOffset = invalidOffset
+	mi.unknownOffset = invalidOffset
+	mi.extensionOffset = invalidOffset
+
+	if si.sizecacheOffset.IsValid() && si.sizecacheType == sizecacheType {
+		mi.sizecacheOffset = si.sizecacheOffset
+	}
+	if si.unknownOffset.IsValid() && (si.unknownType == unknownFieldsAType || si.unknownType == unknownFieldsBType) {
+		mi.unknownOffset = si.unknownOffset
+		mi.unknownPtrKind = si.unknownType.Kind() == reflect.Ptr
+	}
+	if si.extensionOffset.IsValid() && si.extensionType == extensionFieldsType {
+		mi.extensionOffset = si.extensionOffset
+	}
 
 	mi.coderFields = make(map[protowire.Number]*coderFieldInfo)
 	fields := mi.Desc.Fields()
@@ -73,6 +85,27 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
 		var funcs pointerCoderFuncs
 		var childMessage *MessageInfo
 		switch {
+		case ft == nil:
+			// This never occurs for generated message types.
+			// It implies that a hand-crafted type has missing Go fields
+			// for specific protobuf message fields.
+			funcs = pointerCoderFuncs{
+				size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+					return 0
+				},
+				marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+					return nil, nil
+				},
+				unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
+					panic("missing Go struct field for " + string(fd.FullName()))
+				},
+				isInit: func(p pointer, f *coderFieldInfo) error {
+					panic("missing Go struct field for " + string(fd.FullName()))
+				},
+				merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+					panic("missing Go struct field for " + string(fd.FullName()))
+				},
+			}
 		case isOneof:
 			fieldOffset = offsetOf(fs, mi.Exporter)
 		case fd.IsWeak():
@@ -136,7 +169,7 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
 		sort.Slice(mi.orderedCoderFields, func(i, j int) bool {
 			fi := fields.ByNumber(mi.orderedCoderFields[i].num)
 			fj := fields.ByNumber(mi.orderedCoderFields[j].num)
-			return fieldsort.Less(fi, fj)
+			return order.LegacyFieldOrder(fi, fj)
 		})
 	}
 
@@ -157,3 +190,28 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
 		mi.methods.Merge = mi.merge
 	}
 }
+
+// getUnknownBytes returns a *[]byte for the unknown fields.
+// It is the caller's responsibility to check whether the pointer is nil.
+// This function is specially designed to be inlineable.
+func (mi *MessageInfo) getUnknownBytes(p pointer) *[]byte {
+	if mi.unknownPtrKind {
+		return *p.Apply(mi.unknownOffset).BytesPtr()
+	} else {
+		return p.Apply(mi.unknownOffset).Bytes()
+	}
+}
+
+// mutableUnknownBytes returns a *[]byte for the unknown fields.
+// The returned pointer is guaranteed to not be nil.
+func (mi *MessageInfo) mutableUnknownBytes(p pointer) *[]byte {
+	if mi.unknownPtrKind {
+		bp := p.Apply(mi.unknownOffset).BytesPtr()
+		if *bp == nil {
+			*bp = new([]byte)
+		}
+		return *bp
+	} else {
+		return p.Apply(mi.unknownOffset).Bytes()
+	}
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go
index cfb68e12f..b7a23faf1 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go
@@ -29,8 +29,9 @@ func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int)
 		size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts)
 	}
 
-	unknown := *p.Apply(mi.unknownOffset).Bytes()
-	size += messageset.SizeUnknown(unknown)
+	if u := mi.getUnknownBytes(p); u != nil {
+		size += messageset.SizeUnknown(*u)
+	}
 
 	return size
 }
@@ -69,10 +70,12 @@ func marshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts marshalOptions
 		}
 	}
 
-	unknown := *p.Apply(mi.unknownOffset).Bytes()
-	b, err := messageset.AppendUnknown(b, unknown)
-	if err != nil {
-		return b, err
+	if u := mi.getUnknownBytes(p); u != nil {
+		var err error
+		b, err = messageset.AppendUnknown(b, *u)
+		if err != nil {
+			return b, err
+		}
 	}
 
 	return b, nil
@@ -100,13 +103,13 @@ func unmarshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts unmarshalOpt
 		*ep = make(map[int32]ExtensionField)
 	}
 	ext := *ep
-	unknown := p.Apply(mi.unknownOffset).Bytes()
 	initialized := true
 	err = messageset.Unmarshal(b, true, func(num protowire.Number, v []byte) error {
 		o, err := mi.unmarshalExtension(v, num, protowire.BytesType, ext, opts)
 		if err == errUnknown {
-			*unknown = protowire.AppendTag(*unknown, num, protowire.BytesType)
-			*unknown = append(*unknown, v...)
+			u := mi.mutableUnknownBytes(p)
+			*u = protowire.AppendTag(*u, num, protowire.BytesType)
+			*u = append(*u, v...)
 			return nil
 		}
 		if !o.initialized {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
index 86f7dc3c9..90705e3ae 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
@@ -30,7 +30,7 @@ func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _
 	}
 	v, n := protowire.ConsumeVarint(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	p.v.Elem().SetInt(int64(v))
 	out.n = n
@@ -130,12 +130,12 @@ func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf
 	if wtyp == protowire.BytesType {
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return out, protowire.ParseError(n)
+			return out, errDecode
 		}
 		for len(b) > 0 {
 			v, n := protowire.ConsumeVarint(b)
 			if n < 0 {
-				return out, protowire.ParseError(n)
+				return out, errDecode
 			}
 			rv := reflect.New(s.Type().Elem()).Elem()
 			rv.SetInt(int64(v))
@@ -150,7 +150,7 @@ func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf
 	}
 	v, n := protowire.ConsumeVarint(b)
 	if n < 0 {
-		return out, protowire.ParseError(n)
+		return out, errDecode
 	}
 	rv := reflect.New(s.Type().Elem()).Elem()
 	rv.SetInt(int64(v))
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go
index 36a90dff3..acd61bb50 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/convert.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go
@@ -423,6 +423,13 @@ func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value {
 	if v.Type() != c.goType {
 		panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
 	}
+	if c.isNonPointer() {
+		if v.CanAddr() {
+			v = v.Addr() // T => *T
+		} else {
+			v = reflect.Zero(reflect.PtrTo(v.Type()))
+		}
+	}
 	if m, ok := v.Interface().(pref.ProtoMessage); ok {
 		return pref.ValueOfMessage(m.ProtoReflect())
 	}
@@ -437,6 +444,16 @@ func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value {
 	} else {
 		rv = reflect.ValueOf(m.Interface())
 	}
+	if c.isNonPointer() {
+		if rv.Type() != reflect.PtrTo(c.goType) {
+			panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), reflect.PtrTo(c.goType)))
+		}
+		if !rv.IsNil() {
+			rv = rv.Elem() // *T => T
+		} else {
+			rv = reflect.Zero(rv.Type().Elem())
+		}
+	}
 	if rv.Type() != c.goType {
 		panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), c.goType))
 	}
@@ -451,6 +468,9 @@ func (c *messageConverter) IsValidPB(v pref.Value) bool {
 	} else {
 		rv = reflect.ValueOf(m.Interface())
 	}
+	if c.isNonPointer() {
+		return rv.Type() == reflect.PtrTo(c.goType)
+	}
 	return rv.Type() == c.goType
 }
 
@@ -459,9 +479,18 @@ func (c *messageConverter) IsValidGo(v reflect.Value) bool {
 }
 
 func (c *messageConverter) New() pref.Value {
+	if c.isNonPointer() {
+		return c.PBValueOf(reflect.New(c.goType).Elem())
+	}
 	return c.PBValueOf(reflect.New(c.goType.Elem()))
 }
 
 func (c *messageConverter) Zero() pref.Value {
 	return c.PBValueOf(reflect.Zero(c.goType))
 }
+
+// isNonPointer reports whether the type is a non-pointer type.
+// This never occurs for generated message types.
+func (c *messageConverter) isNonPointer() bool {
+	return c.goType.Kind() != reflect.Ptr
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go
index 85ba1d3b3..949dc49a6 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/decode.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go
@@ -17,6 +17,8 @@ import (
 	piface "google.golang.org/protobuf/runtime/protoiface"
 )
 
+var errDecode = errors.New("cannot parse invalid wire-format data")
+
 type unmarshalOptions struct {
 	flags    protoiface.UnmarshalInputFlags
 	resolver interface {
@@ -100,13 +102,13 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.
 			var n int
 			tag, n = protowire.ConsumeVarint(b)
 			if n < 0 {
-				return out, protowire.ParseError(n)
+				return out, errDecode
 			}
 			b = b[n:]
 		}
 		var num protowire.Number
 		if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) {
-			return out, errors.New("invalid field number")
+			return out, errDecode
 		} else {
 			num = protowire.Number(n)
 		}
@@ -114,7 +116,7 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.
 
 		if wtyp == protowire.EndGroupType {
 			if num != groupTag {
-				return out, errors.New("mismatching end group marker")
+				return out, errDecode
 			}
 			groupTag = 0
 			break
@@ -170,10 +172,10 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.
 			}
 			n = protowire.ConsumeFieldValue(num, wtyp, b)
 			if n < 0 {
-				return out, protowire.ParseError(n)
+				return out, errDecode
 			}
 			if !opts.DiscardUnknown() && mi.unknownOffset.IsValid() {
-				u := p.Apply(mi.unknownOffset).Bytes()
+				u := mi.mutableUnknownBytes(p)
 				*u = protowire.AppendTag(*u, num, wtyp)
 				*u = append(*u, b[:n]...)
 			}
@@ -181,7 +183,7 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.
 		b = b[n:]
 	}
 	if groupTag != 0 {
-		return out, errors.New("missing end group marker")
+		return out, errDecode
 	}
 	if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) {
 		initialized = false
@@ -221,7 +223,7 @@ func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp p
 					return out, nil
 				}
 			case ValidationInvalid:
-				return out, errors.New("invalid wire format")
+				return out, errDecode
 			case ValidationUnknown:
 			}
 		}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go
index 8c8a794c6..845c67d6e 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/encode.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go
@@ -79,8 +79,9 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int
 		size += f.funcs.size(fptr, f, opts)
 	}
 	if mi.unknownOffset.IsValid() {
-		u := *p.Apply(mi.unknownOffset).Bytes()
-		size += len(u)
+		if u := mi.getUnknownBytes(p); u != nil {
+			size += len(*u)
+		}
 	}
 	if mi.sizecacheOffset.IsValid() {
 		if size > math.MaxInt32 {
@@ -141,8 +142,9 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt
 		}
 	}
 	if mi.unknownOffset.IsValid() && !mi.isMessageSet {
-		u := *p.Apply(mi.unknownOffset).Bytes()
-		b = append(b, u...)
+		if u := mi.getUnknownBytes(p); u != nil {
+			b = append(b, (*u)...)
+		}
 	}
 	return b, nil
 }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go
index c3d741c2f..e3fb0b578 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go
@@ -30,7 +30,7 @@ func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.M
 	if mv := (Export{}).protoMessageV2Of(m); mv != nil {
 		return mv.ProtoReflect().Type()
 	}
-	return legacyLoadMessageInfo(reflect.TypeOf(m), name)
+	return legacyLoadMessageType(reflect.TypeOf(m), name)
 }
 
 // UnmarshalJSONEnum unmarshals an enum from a JSON-encoded input.
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
index 61757ce50..49e723161 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
@@ -154,7 +154,8 @@ func (x placeholderExtension) Number() pref.FieldNumber                   { retu
 func (x placeholderExtension) Cardinality() pref.Cardinality              { return 0 }
 func (x placeholderExtension) Kind() pref.Kind                            { return 0 }
 func (x placeholderExtension) HasJSONName() bool                          { return false }
-func (x placeholderExtension) JSONName() string                           { return "" }
+func (x placeholderExtension) JSONName() string                           { return "[" + string(x.name) + "]" }
+func (x placeholderExtension) TextName() string                           { return "[" + string(x.name) + "]" }
 func (x placeholderExtension) HasPresence() bool                          { return false }
 func (x placeholderExtension) HasOptionalKeyword() bool                   { return false }
 func (x placeholderExtension) IsExtension() bool                          { return true }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
index 06c68e117..3759b010c 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
@@ -24,14 +24,24 @@ import (
 // legacyWrapMessage wraps v as a protoreflect.Message,
 // where v must be a *struct kind and not implement the v2 API already.
 func legacyWrapMessage(v reflect.Value) pref.Message {
-	typ := v.Type()
-	if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct {
+	t := v.Type()
+	if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct {
 		return aberrantMessage{v: v}
 	}
-	mt := legacyLoadMessageInfo(typ, "")
+	mt := legacyLoadMessageInfo(t, "")
 	return mt.MessageOf(v.Interface())
 }
 
+// legacyLoadMessageType dynamically loads a protoreflect.Type for t,
+// where t must be not implement the v2 API already.
+// The provided name is used if it cannot be determined from the message.
+func legacyLoadMessageType(t reflect.Type, name pref.FullName) protoreflect.MessageType {
+	if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct {
+		return aberrantMessageType{t}
+	}
+	return legacyLoadMessageInfo(t, name)
+}
+
 var legacyMessageTypeCache sync.Map // map[reflect.Type]*MessageInfo
 
 // legacyLoadMessageInfo dynamically loads a *MessageInfo for t,
@@ -49,8 +59,9 @@ func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo {
 		GoReflectType: t,
 	}
 
+	var hasMarshal, hasUnmarshal bool
 	v := reflect.Zero(t).Interface()
-	if _, ok := v.(legacyMarshaler); ok {
+	if _, hasMarshal = v.(legacyMarshaler); hasMarshal {
 		mi.methods.Marshal = legacyMarshal
 
 		// We have no way to tell whether the type's Marshal method
@@ -59,10 +70,10 @@ func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo {
 		// calling Marshal methods when present.
 		mi.methods.Flags |= piface.SupportMarshalDeterministic
 	}
-	if _, ok := v.(legacyUnmarshaler); ok {
+	if _, hasUnmarshal = v.(legacyUnmarshaler); hasUnmarshal {
 		mi.methods.Unmarshal = legacyUnmarshal
 	}
-	if _, ok := v.(legacyMerger); ok {
+	if _, hasMerge := v.(legacyMerger); hasMerge || (hasMarshal && hasUnmarshal) {
 		mi.methods.Merge = legacyMerge
 	}
 
@@ -75,7 +86,7 @@ func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo {
 var legacyMessageDescCache sync.Map // map[reflect.Type]protoreflect.MessageDescriptor
 
 // LegacyLoadMessageDesc returns an MessageDescriptor derived from the Go type,
-// which must be a *struct kind and not implement the v2 API already.
+// which should be a *struct kind and must not implement the v2 API already.
 //
 // This is exported for testing purposes.
 func LegacyLoadMessageDesc(t reflect.Type) pref.MessageDescriptor {
@@ -114,17 +125,19 @@ func legacyLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescr
 	// If the Go type has no fields, then this might be a proto3 empty message
 	// from before the size cache was added. If there are any fields, check to
 	// see that at least one of them looks like something we generated.
-	if nfield := t.Elem().NumField(); nfield > 0 {
-		hasProtoField := false
-		for i := 0; i < nfield; i++ {
-			f := t.Elem().Field(i)
-			if f.Tag.Get("protobuf") != "" || f.Tag.Get("protobuf_oneof") != "" || strings.HasPrefix(f.Name, "XXX_") {
-				hasProtoField = true
-				break
+	if t.Elem().Kind() == reflect.Struct {
+		if nfield := t.Elem().NumField(); nfield > 0 {
+			hasProtoField := false
+			for i := 0; i < nfield; i++ {
+				f := t.Elem().Field(i)
+				if f.Tag.Get("protobuf") != "" || f.Tag.Get("protobuf_oneof") != "" || strings.HasPrefix(f.Name, "XXX_") {
+					hasProtoField = true
+					break
+				}
+			}
+			if !hasProtoField {
+				return aberrantLoadMessageDesc(t, name)
 			}
-		}
-		if !hasProtoField {
-			return aberrantLoadMessageDesc(t, name)
 		}
 	}
 
@@ -370,7 +383,7 @@ type legacyMerger interface {
 	Merge(protoiface.MessageV1)
 }
 
-var legacyProtoMethods = &piface.Methods{
+var aberrantProtoMethods = &piface.Methods{
 	Marshal:   legacyMarshal,
 	Unmarshal: legacyUnmarshal,
 	Merge:     legacyMerge,
@@ -401,18 +414,40 @@ func legacyUnmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) {
 	v := in.Message.(unwrapper).protoUnwrap()
 	unmarshaler, ok := v.(legacyUnmarshaler)
 	if !ok {
-		return piface.UnmarshalOutput{}, errors.New("%T does not implement Marshal", v)
+		return piface.UnmarshalOutput{}, errors.New("%T does not implement Unmarshal", v)
 	}
 	return piface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf)
 }
 
 func legacyMerge(in piface.MergeInput) piface.MergeOutput {
+	// Check whether this supports the legacy merger.
 	dstv := in.Destination.(unwrapper).protoUnwrap()
 	merger, ok := dstv.(legacyMerger)
+	if ok {
+		merger.Merge(Export{}.ProtoMessageV1Of(in.Source))
+		return piface.MergeOutput{Flags: piface.MergeComplete}
+	}
+
+	// If legacy merger is unavailable, implement merge in terms of
+	// a marshal and unmarshal operation.
+	srcv := in.Source.(unwrapper).protoUnwrap()
+	marshaler, ok := srcv.(legacyMarshaler)
 	if !ok {
 		return piface.MergeOutput{}
 	}
-	merger.Merge(Export{}.ProtoMessageV1Of(in.Source))
+	dstv = in.Destination.(unwrapper).protoUnwrap()
+	unmarshaler, ok := dstv.(legacyUnmarshaler)
+	if !ok {
+		return piface.MergeOutput{}
+	}
+	b, err := marshaler.Marshal()
+	if err != nil {
+		return piface.MergeOutput{}
+	}
+	err = unmarshaler.Unmarshal(b)
+	if err != nil {
+		return piface.MergeOutput{}
+	}
 	return piface.MergeOutput{Flags: piface.MergeComplete}
 }
 
@@ -422,6 +457,9 @@ type aberrantMessageType struct {
 }
 
 func (mt aberrantMessageType) New() pref.Message {
+	if mt.t.Kind() == reflect.Ptr {
+		return aberrantMessage{reflect.New(mt.t.Elem())}
+	}
 	return aberrantMessage{reflect.Zero(mt.t)}
 }
 func (mt aberrantMessageType) Zero() pref.Message {
@@ -443,6 +481,17 @@ type aberrantMessage struct {
 	v reflect.Value
 }
 
+// Reset implements the v1 proto.Message.Reset method.
+func (m aberrantMessage) Reset() {
+	if mr, ok := m.v.Interface().(interface{ Reset() }); ok {
+		mr.Reset()
+		return
+	}
+	if m.v.Kind() == reflect.Ptr && !m.v.IsNil() {
+		m.v.Elem().Set(reflect.Zero(m.v.Type().Elem()))
+	}
+}
+
 func (m aberrantMessage) ProtoReflect() pref.Message {
 	return m
 }
@@ -454,33 +503,40 @@ func (m aberrantMessage) Type() pref.MessageType {
 	return aberrantMessageType{m.v.Type()}
 }
 func (m aberrantMessage) New() pref.Message {
+	if m.v.Type().Kind() == reflect.Ptr {
+		return aberrantMessage{reflect.New(m.v.Type().Elem())}
+	}
 	return aberrantMessage{reflect.Zero(m.v.Type())}
 }
 func (m aberrantMessage) Interface() pref.ProtoMessage {
 	return m
 }
 func (m aberrantMessage) Range(f func(pref.FieldDescriptor, pref.Value) bool) {
+	return
 }
 func (m aberrantMessage) Has(pref.FieldDescriptor) bool {
-	panic("invalid field descriptor")
+	return false
 }
 func (m aberrantMessage) Clear(pref.FieldDescriptor) {
-	panic("invalid field descriptor")
+	panic("invalid Message.Clear on " + string(m.Descriptor().FullName()))
 }
-func (m aberrantMessage) Get(pref.FieldDescriptor) pref.Value {
-	panic("invalid field descriptor")
+func (m aberrantMessage) Get(fd pref.FieldDescriptor) pref.Value {
+	if fd.Default().IsValid() {
+		return fd.Default()
+	}
+	panic("invalid Message.Get on " + string(m.Descriptor().FullName()))
 }
 func (m aberrantMessage) Set(pref.FieldDescriptor, pref.Value) {
-	panic("invalid field descriptor")
+	panic("invalid Message.Set on " + string(m.Descriptor().FullName()))
 }
 func (m aberrantMessage) Mutable(pref.FieldDescriptor) pref.Value {
-	panic("invalid field descriptor")
+	panic("invalid Message.Mutable on " + string(m.Descriptor().FullName()))
 }
 func (m aberrantMessage) NewField(pref.FieldDescriptor) pref.Value {
-	panic("invalid field descriptor")
+	panic("invalid Message.NewField on " + string(m.Descriptor().FullName()))
 }
 func (m aberrantMessage) WhichOneof(pref.OneofDescriptor) pref.FieldDescriptor {
-	panic("invalid oneof descriptor")
+	panic("invalid Message.WhichOneof descriptor on " + string(m.Descriptor().FullName()))
 }
 func (m aberrantMessage) GetUnknown() pref.RawFields {
 	return nil
@@ -489,13 +545,13 @@ func (m aberrantMessage) SetUnknown(pref.RawFields) {
 	// SetUnknown discards its input on messages which don't support unknown field storage.
 }
 func (m aberrantMessage) IsValid() bool {
-	// An invalid message is a read-only, empty message. Since we don't know anything
-	// about the alleged contents of this message, we can't say with confidence that
-	// it is invalid in this sense. Therefore, report it as valid.
-	return true
+	if m.v.Kind() == reflect.Ptr {
+		return !m.v.IsNil()
+	}
+	return false
 }
 func (m aberrantMessage) ProtoMethods() *piface.Methods {
-	return legacyProtoMethods
+	return aberrantProtoMethods
 }
 func (m aberrantMessage) protoUnwrap() interface{} {
 	return m.v.Interface()
diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go
index cdc4267df..c65bbc044 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/merge.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go
@@ -77,9 +77,9 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) {
 		}
 	}
 	if mi.unknownOffset.IsValid() {
-		du := dst.Apply(mi.unknownOffset).Bytes()
-		su := src.Apply(mi.unknownOffset).Bytes()
-		if len(*su) > 0 {
+		su := mi.getUnknownBytes(src)
+		if su != nil && len(*su) > 0 {
+			du := mi.mutableUnknownBytes(dst)
 			*du = append(*du, *su...)
 		}
 	}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go
index c026a9818..a104e28e8 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message.go
@@ -15,6 +15,7 @@ import (
 	"google.golang.org/protobuf/internal/genid"
 	"google.golang.org/protobuf/reflect/protoreflect"
 	pref "google.golang.org/protobuf/reflect/protoreflect"
+	preg "google.golang.org/protobuf/reflect/protoregistry"
 )
 
 // MessageInfo provides protobuf related functionality for a given Go type
@@ -109,22 +110,29 @@ func (mi *MessageInfo) getPointer(m pref.Message) (p pointer, ok bool) {
 type (
 	SizeCache       = int32
 	WeakFields      = map[int32]protoreflect.ProtoMessage
-	UnknownFields   = []byte
+	UnknownFields   = unknownFieldsA // TODO: switch to unknownFieldsB
+	unknownFieldsA  = []byte
+	unknownFieldsB  = *[]byte
 	ExtensionFields = map[int32]ExtensionField
 )
 
 var (
 	sizecacheType       = reflect.TypeOf(SizeCache(0))
 	weakFieldsType      = reflect.TypeOf(WeakFields(nil))
-	unknownFieldsType   = reflect.TypeOf(UnknownFields(nil))
+	unknownFieldsAType  = reflect.TypeOf(unknownFieldsA(nil))
+	unknownFieldsBType  = reflect.TypeOf(unknownFieldsB(nil))
 	extensionFieldsType = reflect.TypeOf(ExtensionFields(nil))
 )
 
 type structInfo struct {
 	sizecacheOffset offset
+	sizecacheType   reflect.Type
 	weakOffset      offset
+	weakType        reflect.Type
 	unknownOffset   offset
+	unknownType     reflect.Type
 	extensionOffset offset
+	extensionType   reflect.Type
 
 	fieldsByNumber        map[pref.FieldNumber]reflect.StructField
 	oneofsByName          map[pref.Name]reflect.StructField
@@ -151,18 +159,22 @@ fieldLoop:
 		case genid.SizeCache_goname, genid.SizeCacheA_goname:
 			if f.Type == sizecacheType {
 				si.sizecacheOffset = offsetOf(f, mi.Exporter)
+				si.sizecacheType = f.Type
 			}
 		case genid.WeakFields_goname, genid.WeakFieldsA_goname:
 			if f.Type == weakFieldsType {
 				si.weakOffset = offsetOf(f, mi.Exporter)
+				si.weakType = f.Type
 			}
 		case genid.UnknownFields_goname, genid.UnknownFieldsA_goname:
-			if f.Type == unknownFieldsType {
+			if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType {
 				si.unknownOffset = offsetOf(f, mi.Exporter)
+				si.unknownType = f.Type
 			}
 		case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname:
 			if f.Type == extensionFieldsType {
 				si.extensionOffset = offsetOf(f, mi.Exporter)
+				si.extensionType = f.Type
 			}
 		default:
 			for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") {
@@ -212,4 +224,53 @@ func (mi *MessageInfo) New() protoreflect.Message {
 func (mi *MessageInfo) Zero() protoreflect.Message {
 	return mi.MessageOf(reflect.Zero(mi.GoReflectType).Interface())
 }
-func (mi *MessageInfo) Descriptor() protoreflect.MessageDescriptor { return mi.Desc }
+func (mi *MessageInfo) Descriptor() protoreflect.MessageDescriptor {
+	return mi.Desc
+}
+func (mi *MessageInfo) Enum(i int) protoreflect.EnumType {
+	mi.init()
+	fd := mi.Desc.Fields().Get(i)
+	return Export{}.EnumTypeOf(mi.fieldTypes[fd.Number()])
+}
+func (mi *MessageInfo) Message(i int) protoreflect.MessageType {
+	mi.init()
+	fd := mi.Desc.Fields().Get(i)
+	switch {
+	case fd.IsWeak():
+		mt, _ := preg.GlobalTypes.FindMessageByName(fd.Message().FullName())
+		return mt
+	case fd.IsMap():
+		return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]}
+	default:
+		return Export{}.MessageTypeOf(mi.fieldTypes[fd.Number()])
+	}
+}
+
+type mapEntryType struct {
+	desc    protoreflect.MessageDescriptor
+	valType interface{} // zero value of enum or message type
+}
+
+func (mt mapEntryType) New() protoreflect.Message {
+	return nil
+}
+func (mt mapEntryType) Zero() protoreflect.Message {
+	return nil
+}
+func (mt mapEntryType) Descriptor() protoreflect.MessageDescriptor {
+	return mt.desc
+}
+func (mt mapEntryType) Enum(i int) protoreflect.EnumType {
+	fd := mt.desc.Fields().Get(i)
+	if fd.Enum() == nil {
+		return nil
+	}
+	return Export{}.EnumTypeOf(mt.valType)
+}
+func (mt mapEntryType) Message(i int) protoreflect.MessageType {
+	fd := mt.desc.Fields().Get(i)
+	if fd.Message() == nil {
+		return nil
+	}
+	return Export{}.MessageTypeOf(mt.valType)
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
index 0f4b8db76..9488b7261 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
@@ -8,6 +8,7 @@ import (
 	"fmt"
 	"reflect"
 
+	"google.golang.org/protobuf/internal/detrand"
 	"google.golang.org/protobuf/internal/pragma"
 	pref "google.golang.org/protobuf/reflect/protoreflect"
 )
@@ -16,6 +17,11 @@ type reflectMessageInfo struct {
 	fields map[pref.FieldNumber]*fieldInfo
 	oneofs map[pref.Name]*oneofInfo
 
+	// fieldTypes contains the zero value of an enum or message field.
+	// For lists, it contains the element type.
+	// For maps, it contains the entry value type.
+	fieldTypes map[pref.FieldNumber]interface{}
+
 	// denseFields is a subset of fields where:
 	//	0 < fieldDesc.Number() < len(denseFields)
 	// It provides faster access to the fieldInfo, but may be incomplete.
@@ -36,6 +42,7 @@ func (mi *MessageInfo) makeReflectFuncs(t reflect.Type, si structInfo) {
 	mi.makeKnownFieldsFunc(si)
 	mi.makeUnknownFieldsFunc(t, si)
 	mi.makeExtensionFieldsFunc(t, si)
+	mi.makeFieldTypes(si)
 }
 
 // makeKnownFieldsFunc generates functions for operations that can be performed
@@ -51,17 +58,23 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) {
 	for i := 0; i < fds.Len(); i++ {
 		fd := fds.Get(i)
 		fs := si.fieldsByNumber[fd.Number()]
+		isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic()
+		if isOneof {
+			fs = si.oneofsByName[fd.ContainingOneof().Name()]
+		}
 		var fi fieldInfo
 		switch {
-		case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
-			fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()])
+		case fs.Type == nil:
+			fi = fieldInfoForMissing(fd) // never occurs for officially generated message types
+		case isOneof:
+			fi = fieldInfoForOneof(fd, fs, mi.Exporter, si.oneofWrappersByNumber[fd.Number()])
 		case fd.IsMap():
 			fi = fieldInfoForMap(fd, fs, mi.Exporter)
 		case fd.IsList():
 			fi = fieldInfoForList(fd, fs, mi.Exporter)
 		case fd.IsWeak():
 			fi = fieldInfoForWeakMessage(fd, si.weakOffset)
-		case fd.Kind() == pref.MessageKind || fd.Kind() == pref.GroupKind:
+		case fd.Message() != nil:
 			fi = fieldInfoForMessage(fd, fs, mi.Exporter)
 		default:
 			fi = fieldInfoForScalar(fd, fs, mi.Exporter)
@@ -92,27 +105,53 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) {
 			i++
 		}
 	}
+
+	// Introduce instability to iteration order, but keep it deterministic.
+	if len(mi.rangeInfos) > 1 && detrand.Bool() {
+		i := detrand.Intn(len(mi.rangeInfos) - 1)
+		mi.rangeInfos[i], mi.rangeInfos[i+1] = mi.rangeInfos[i+1], mi.rangeInfos[i]
+	}
 }
 
 func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) {
-	mi.getUnknown = func(pointer) pref.RawFields { return nil }
-	mi.setUnknown = func(pointer, pref.RawFields) { return }
-	if si.unknownOffset.IsValid() {
+	switch {
+	case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsAType:
+		// Handle as []byte.
 		mi.getUnknown = func(p pointer) pref.RawFields {
 			if p.IsNil() {
 				return nil
 			}
-			rv := p.Apply(si.unknownOffset).AsValueOf(unknownFieldsType)
-			return pref.RawFields(*rv.Interface().(*[]byte))
+			return *p.Apply(mi.unknownOffset).Bytes()
 		}
 		mi.setUnknown = func(p pointer, b pref.RawFields) {
 			if p.IsNil() {
 				panic("invalid SetUnknown on nil Message")
 			}
-			rv := p.Apply(si.unknownOffset).AsValueOf(unknownFieldsType)
-			*rv.Interface().(*[]byte) = []byte(b)
+			*p.Apply(mi.unknownOffset).Bytes() = b
 		}
-	} else {
+	case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsBType:
+		// Handle as *[]byte.
+		mi.getUnknown = func(p pointer) pref.RawFields {
+			if p.IsNil() {
+				return nil
+			}
+			bp := p.Apply(mi.unknownOffset).BytesPtr()
+			if *bp == nil {
+				return nil
+			}
+			return **bp
+		}
+		mi.setUnknown = func(p pointer, b pref.RawFields) {
+			if p.IsNil() {
+				panic("invalid SetUnknown on nil Message")
+			}
+			bp := p.Apply(mi.unknownOffset).BytesPtr()
+			if *bp == nil {
+				*bp = new([]byte)
+			}
+			**bp = b
+		}
+	default:
 		mi.getUnknown = func(pointer) pref.RawFields {
 			return nil
 		}
@@ -139,6 +178,58 @@ func (mi *MessageInfo) makeExtensionFieldsFunc(t reflect.Type, si structInfo) {
 		}
 	}
 }
+func (mi *MessageInfo) makeFieldTypes(si structInfo) {
+	md := mi.Desc
+	fds := md.Fields()
+	for i := 0; i < fds.Len(); i++ {
+		var ft reflect.Type
+		fd := fds.Get(i)
+		fs := si.fieldsByNumber[fd.Number()]
+		isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic()
+		if isOneof {
+			fs = si.oneofsByName[fd.ContainingOneof().Name()]
+		}
+		var isMessage bool
+		switch {
+		case fs.Type == nil:
+			continue // never occurs for officially generated message types
+		case isOneof:
+			if fd.Enum() != nil || fd.Message() != nil {
+				ft = si.oneofWrappersByNumber[fd.Number()].Field(0).Type
+			}
+		case fd.IsMap():
+			if fd.MapValue().Enum() != nil || fd.MapValue().Message() != nil {
+				ft = fs.Type.Elem()
+			}
+			isMessage = fd.MapValue().Message() != nil
+		case fd.IsList():
+			if fd.Enum() != nil || fd.Message() != nil {
+				ft = fs.Type.Elem()
+			}
+			isMessage = fd.Message() != nil
+		case fd.Enum() != nil:
+			ft = fs.Type
+			if fd.HasPresence() && ft.Kind() == reflect.Ptr {
+				ft = ft.Elem()
+			}
+		case fd.Message() != nil:
+			ft = fs.Type
+			if fd.IsWeak() {
+				ft = nil
+			}
+			isMessage = true
+		}
+		if isMessage && ft != nil && ft.Kind() != reflect.Ptr {
+			ft = reflect.PtrTo(ft) // never occurs for officially generated message types
+		}
+		if ft != nil {
+			if mi.fieldTypes == nil {
+				mi.fieldTypes = make(map[pref.FieldNumber]interface{})
+			}
+			mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface()
+		}
+	}
+}
 
 type extensionMap map[int32]ExtensionField
 
@@ -306,7 +397,6 @@ var (
 // pointer to a named Go struct. If the provided type has a ProtoReflect method,
 // it must be implemented by calling this method.
 func (mi *MessageInfo) MessageOf(m interface{}) pref.Message {
-	// TODO: Switch the input to be an opaque Pointer.
 	if reflect.TypeOf(m) != mi.GoReflectType {
 		panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType))
 	}
@@ -320,6 +410,17 @@ func (mi *MessageInfo) MessageOf(m interface{}) pref.Message {
 func (m *messageReflectWrapper) pointer() pointer          { return m.p }
 func (m *messageReflectWrapper) messageInfo() *MessageInfo { return m.mi }
 
+// Reset implements the v1 proto.Message.Reset method.
+func (m *messageIfaceWrapper) Reset() {
+	if mr, ok := m.protoUnwrap().(interface{ Reset() }); ok {
+		mr.Reset()
+		return
+	}
+	rv := reflect.ValueOf(m.protoUnwrap())
+	if rv.Kind() == reflect.Ptr && !rv.IsNil() {
+		rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
+	}
+}
 func (m *messageIfaceWrapper) ProtoReflect() pref.Message {
 	return (*messageReflectWrapper)(m)
 }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
index 23124a86e..343cf8721 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
@@ -28,6 +28,39 @@ type fieldInfo struct {
 	newField   func() pref.Value
 }
 
+func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo {
+	// This never occurs for generated message types.
+	// It implies that a hand-crafted type has missing Go fields
+	// for specific protobuf message fields.
+	return fieldInfo{
+		fieldDesc: fd,
+		has: func(p pointer) bool {
+			return false
+		},
+		clear: func(p pointer) {
+			panic("missing Go struct field for " + string(fd.FullName()))
+		},
+		get: func(p pointer) pref.Value {
+			return fd.Default()
+		},
+		set: func(p pointer, v pref.Value) {
+			panic("missing Go struct field for " + string(fd.FullName()))
+		},
+		mutable: func(p pointer) pref.Value {
+			panic("missing Go struct field for " + string(fd.FullName()))
+		},
+		newMessage: func() pref.Message {
+			panic("missing Go struct field for " + string(fd.FullName()))
+		},
+		newField: func() pref.Value {
+			if v := fd.Default(); v.IsValid() {
+				return v
+			}
+			panic("missing Go struct field for " + string(fd.FullName()))
+		},
+	}
+}
+
 func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo {
 	ft := fs.Type
 	if ft.Kind() != reflect.Interface {
@@ -97,7 +130,7 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export
 				rv.Set(reflect.New(ot))
 			}
 			rv = rv.Elem().Elem().Field(0)
-			if rv.IsNil() {
+			if rv.Kind() == reflect.Ptr && rv.IsNil() {
 				rv.Set(conv.GoValueOf(pref.ValueOfMessage(conv.New().Message())))
 			}
 			return conv.PBValueOf(rv)
@@ -225,7 +258,10 @@ func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x expor
 	isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8
 	if nullable {
 		if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice {
-			panic(fmt.Sprintf("field %v has invalid type: got %v, want pointer", fd.FullName(), ft))
+			// This never occurs for generated message types.
+			// Despite the protobuf type system specifying presence,
+			// the Go field type cannot represent it.
+			nullable = false
 		}
 		if ft.Kind() == reflect.Ptr {
 			ft = ft.Elem()
@@ -388,6 +424,9 @@ func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x expo
 				return false
 			}
 			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if fs.Type.Kind() != reflect.Ptr {
+				return !isZero(rv)
+			}
 			return !rv.IsNil()
 		},
 		clear: func(p pointer) {
@@ -404,13 +443,13 @@ func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x expo
 		set: func(p pointer, v pref.Value) {
 			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
 			rv.Set(conv.GoValueOf(v))
-			if rv.IsNil() {
+			if fs.Type.Kind() == reflect.Ptr && rv.IsNil() {
 				panic(fmt.Sprintf("field %v has invalid nil pointer", fd.FullName()))
 			}
 		},
 		mutable: func(p pointer) pref.Value {
 			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
-			if rv.IsNil() {
+			if fs.Type.Kind() == reflect.Ptr && rv.IsNil() {
 				rv.Set(conv.GoValueOf(conv.New()))
 			}
 			return conv.PBValueOf(rv)
@@ -464,3 +503,41 @@ func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInf
 	}
 	return oi
 }
+
+// isZero is identical to reflect.Value.IsZero.
+// TODO: Remove this when Go1.13 is the minimally supported Go version.
+func isZero(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return math.Float64bits(v.Float()) == 0
+	case reflect.Complex64, reflect.Complex128:
+		c := v.Complex()
+		return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0
+	case reflect.Array:
+		for i := 0; i < v.Len(); i++ {
+			if !isZero(v.Index(i)) {
+				return false
+			}
+		}
+		return true
+	case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
+		return v.IsNil()
+	case reflect.String:
+		return v.Len() == 0
+	case reflect.Struct:
+		for i := 0; i < v.NumField(); i++ {
+			if !isZero(v.Field(i)) {
+				return false
+			}
+		}
+		return true
+	default:
+		panic(&reflect.ValueError{"reflect.Value.IsZero", v.Kind()})
+	}
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
index 67b4ede67..9e3ed821e 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
@@ -121,6 +121,7 @@ func (p pointer) String() *string          { return p.v.Interface().(*string) }
 func (p pointer) StringPtr() **string      { return p.v.Interface().(**string) }
 func (p pointer) StringSlice() *[]string   { return p.v.Interface().(*[]string) }
 func (p pointer) Bytes() *[]byte           { return p.v.Interface().(*[]byte) }
+func (p pointer) BytesPtr() **[]byte       { return p.v.Interface().(**[]byte) }
 func (p pointer) BytesSlice() *[][]byte    { return p.v.Interface().(*[][]byte) }
 func (p pointer) WeakFields() *weakFields  { return (*weakFields)(p.v.Interface().(*WeakFields)) }
 func (p pointer) Extensions() *map[int32]ExtensionField {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
index 088aa85d4..9ecf23a85 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
@@ -109,6 +109,7 @@ func (p pointer) String() *string                       { return (*string)(p.p)
 func (p pointer) StringPtr() **string                   { return (**string)(p.p) }
 func (p pointer) StringSlice() *[]string                { return (*[]string)(p.p) }
 func (p pointer) Bytes() *[]byte                        { return (*[]byte)(p.p) }
+func (p pointer) BytesPtr() **[]byte                    { return (**[]byte)(p.p) }
 func (p pointer) BytesSlice() *[][]byte                 { return (*[][]byte)(p.p) }
 func (p pointer) WeakFields() *weakFields               { return (*weakFields)(p.p) }
 func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) }
diff --git a/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go b/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go
deleted file mode 100644
index a3de1cf32..000000000
--- a/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package mapsort provides sorted access to maps.
-package mapsort
-
-import (
-	"sort"
-
-	"google.golang.org/protobuf/reflect/protoreflect"
-)
-
-// Range iterates over every map entry in sorted key order,
-// calling f for each key and value encountered.
-func Range(mapv protoreflect.Map, keyKind protoreflect.Kind, f func(protoreflect.MapKey, protoreflect.Value) bool) {
-	var keys []protoreflect.MapKey
-	mapv.Range(func(key protoreflect.MapKey, _ protoreflect.Value) bool {
-		keys = append(keys, key)
-		return true
-	})
-	sort.Slice(keys, func(i, j int) bool {
-		switch keyKind {
-		case protoreflect.BoolKind:
-			return !keys[i].Bool() && keys[j].Bool()
-		case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind,
-			protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
-			return keys[i].Int() < keys[j].Int()
-		case protoreflect.Uint32Kind, protoreflect.Fixed32Kind,
-			protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
-			return keys[i].Uint() < keys[j].Uint()
-		case protoreflect.StringKind:
-			return keys[i].String() < keys[j].String()
-		default:
-			panic("invalid kind: " + keyKind.String())
-		}
-	})
-	for _, key := range keys {
-		if !f(key, mapv.Get(key)) {
-			break
-		}
-	}
-}
diff --git a/vendor/google.golang.org/protobuf/internal/order/order.go b/vendor/google.golang.org/protobuf/internal/order/order.go
new file mode 100644
index 000000000..2a24953f6
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/order/order.go
@@ -0,0 +1,89 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package order
+
+import (
+	pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// FieldOrder specifies the ordering to visit message fields.
+// It is a function that reports whether x is ordered before y.
+type FieldOrder func(x, y pref.FieldDescriptor) bool
+
+var (
+	// AnyFieldOrder specifies no specific field ordering.
+	AnyFieldOrder FieldOrder = nil
+
+	// LegacyFieldOrder sorts fields in the same ordering as emitted by
+	// wire serialization in the github.com/golang/protobuf implementation.
+	LegacyFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool {
+		ox, oy := x.ContainingOneof(), y.ContainingOneof()
+		inOneof := func(od pref.OneofDescriptor) bool {
+			return od != nil && !od.IsSynthetic()
+		}
+
+		// Extension fields sort before non-extension fields.
+		if x.IsExtension() != y.IsExtension() {
+			return x.IsExtension() && !y.IsExtension()
+		}
+		// Fields not within a oneof sort before those within a oneof.
+		if inOneof(ox) != inOneof(oy) {
+			return !inOneof(ox) && inOneof(oy)
+		}
+		// Fields in disjoint oneof sets are sorted by declaration index.
+		if ox != nil && oy != nil && ox != oy {
+			return ox.Index() < oy.Index()
+		}
+		// Fields sorted by field number.
+		return x.Number() < y.Number()
+	}
+
+	// NumberFieldOrder sorts fields by their field number.
+	NumberFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool {
+		return x.Number() < y.Number()
+	}
+
+	// IndexNameFieldOrder sorts non-extension fields before extension fields.
+	// Non-extensions are sorted according to their declaration index.
+	// Extensions are sorted according to their full name.
+	IndexNameFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool {
+		// Non-extension fields sort before extension fields.
+		if x.IsExtension() != y.IsExtension() {
+			return !x.IsExtension() && y.IsExtension()
+		}
+		// Extensions sorted by fullname.
+		if x.IsExtension() && y.IsExtension() {
+			return x.FullName() < y.FullName()
+		}
+		// Non-extensions sorted by declaration index.
+		return x.Index() < y.Index()
+	}
+)
+
+// KeyOrder specifies the ordering to visit map entries.
+// It is a function that reports whether x is ordered before y.
+type KeyOrder func(x, y pref.MapKey) bool
+
+var (
+	// AnyKeyOrder specifies no specific key ordering.
+	AnyKeyOrder KeyOrder = nil
+
+	// GenericKeyOrder sorts false before true, numeric keys in ascending order,
+	// and strings in lexicographical ordering according to UTF-8 codepoints.
+	GenericKeyOrder KeyOrder = func(x, y pref.MapKey) bool {
+		switch x.Interface().(type) {
+		case bool:
+			return !x.Bool() && y.Bool()
+		case int32, int64:
+			return x.Int() < y.Int()
+		case uint32, uint64:
+			return x.Uint() < y.Uint()
+		case string:
+			return x.String() < y.String()
+		default:
+			panic("invalid map key type")
+		}
+	}
+)
diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go
new file mode 100644
index 000000000..c8090e0c5
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/order/range.go
@@ -0,0 +1,115 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package order provides ordered access to messages and maps.
+package order
+
+import (
+	"sort"
+	"sync"
+
+	pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type messageField struct {
+	fd pref.FieldDescriptor
+	v  pref.Value
+}
+
+var messageFieldPool = sync.Pool{
+	New: func() interface{} { return new([]messageField) },
+}
+
+type (
+	// FieldRnger is an interface for visiting all fields in a message.
+	// The protoreflect.Message type implements this interface.
+	FieldRanger interface{ Range(VisitField) }
+	// VisitField is called everytime a message field is visited.
+	VisitField = func(pref.FieldDescriptor, pref.Value) bool
+)
+
+// RangeFields iterates over the fields of fs according to the specified order.
+func RangeFields(fs FieldRanger, less FieldOrder, fn VisitField) {
+	if less == nil {
+		fs.Range(fn)
+		return
+	}
+
+	// Obtain a pre-allocated scratch buffer.
+	p := messageFieldPool.Get().(*[]messageField)
+	fields := (*p)[:0]
+	defer func() {
+		if cap(fields) < 1024 {
+			*p = fields
+			messageFieldPool.Put(p)
+		}
+	}()
+
+	// Collect all fields in the message and sort them.
+	fs.Range(func(fd pref.FieldDescriptor, v pref.Value) bool {
+		fields = append(fields, messageField{fd, v})
+		return true
+	})
+	sort.Slice(fields, func(i, j int) bool {
+		return less(fields[i].fd, fields[j].fd)
+	})
+
+	// Visit the fields in the specified ordering.
+	for _, f := range fields {
+		if !fn(f.fd, f.v) {
+			return
+		}
+	}
+}
+
+type mapEntry struct {
+	k pref.MapKey
+	v pref.Value
+}
+
+var mapEntryPool = sync.Pool{
+	New: func() interface{} { return new([]mapEntry) },
+}
+
+type (
+	// EntryRanger is an interface for visiting all fields in a message.
+	// The protoreflect.Map type implements this interface.
+	EntryRanger interface{ Range(VisitEntry) }
+	// VisitEntry is called everytime a map entry is visited.
+	VisitEntry = func(pref.MapKey, pref.Value) bool
+)
+
+// RangeEntries iterates over the entries of es according to the specified order.
+func RangeEntries(es EntryRanger, less KeyOrder, fn VisitEntry) {
+	if less == nil {
+		es.Range(fn)
+		return
+	}
+
+	// Obtain a pre-allocated scratch buffer.
+	p := mapEntryPool.Get().(*[]mapEntry)
+	entries := (*p)[:0]
+	defer func() {
+		if cap(entries) < 1024 {
+			*p = entries
+			mapEntryPool.Put(p)
+		}
+	}()
+
+	// Collect all entries in the map and sort them.
+	es.Range(func(k pref.MapKey, v pref.Value) bool {
+		entries = append(entries, mapEntry{k, v})
+		return true
+	})
+	sort.Slice(entries, func(i, j int) bool {
+		return less(entries[i].k, entries[j].k)
+	})
+
+	// Visit the entries in the specified ordering.
+	for _, e := range entries {
+		if !fn(e.k, e.v) {
+			return
+		}
+	}
+}
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index 72cf770b4..5879131da 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -52,7 +52,7 @@ import (
 //	10. Send out the CL for review and submit it.
 const (
 	Major      = 1
-	Minor      = 25
+	Minor      = 26
 	Patch      = 0
 	PreRelease = ""
 )
diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go
index 42fc5195e..49f9b8c88 100644
--- a/vendor/google.golang.org/protobuf/proto/decode.go
+++ b/vendor/google.golang.org/protobuf/proto/decode.go
@@ -45,12 +45,14 @@ type UnmarshalOptions struct {
 }
 
 // Unmarshal parses the wire-format message in b and places the result in m.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
 func Unmarshal(b []byte, m Message) error {
 	_, err := UnmarshalOptions{}.unmarshal(b, m.ProtoReflect())
 	return err
 }
 
 // Unmarshal parses the wire-format message in b and places the result in m.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
 func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error {
 	_, err := o.unmarshal(b, m.ProtoReflect())
 	return err
@@ -116,10 +118,10 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message)
 		// Parse the tag (field number and wire type).
 		num, wtyp, tagLen := protowire.ConsumeTag(b)
 		if tagLen < 0 {
-			return protowire.ParseError(tagLen)
+			return errDecode
 		}
 		if num > protowire.MaxValidNumber {
-			return errors.New("invalid field number")
+			return errDecode
 		}
 
 		// Find the field descriptor for this field number.
@@ -159,7 +161,7 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message)
 			}
 			valLen = protowire.ConsumeFieldValue(num, wtyp, b[tagLen:])
 			if valLen < 0 {
-				return protowire.ParseError(valLen)
+				return errDecode
 			}
 			if !o.DiscardUnknown {
 				m.SetUnknown(append(m.GetUnknown(), b[:tagLen+valLen]...))
@@ -194,7 +196,7 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto
 	}
 	b, n = protowire.ConsumeBytes(b)
 	if n < 0 {
-		return 0, protowire.ParseError(n)
+		return 0, errDecode
 	}
 	var (
 		keyField = fd.MapKey()
@@ -213,10 +215,10 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto
 	for len(b) > 0 {
 		num, wtyp, n := protowire.ConsumeTag(b)
 		if n < 0 {
-			return 0, protowire.ParseError(n)
+			return 0, errDecode
 		}
 		if num > protowire.MaxValidNumber {
-			return 0, errors.New("invalid field number")
+			return 0, errDecode
 		}
 		b = b[n:]
 		err = errUnknown
@@ -246,7 +248,7 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto
 		if err == errUnknown {
 			n = protowire.ConsumeFieldValue(num, wtyp, b)
 			if n < 0 {
-				return 0, protowire.ParseError(n)
+				return 0, errDecode
 			}
 		} else if err != nil {
 			return 0, err
@@ -272,3 +274,5 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto
 // to the unknown field set of a message. It is never returned from an exported
 // function.
 var errUnknown = errors.New("BUG: internal error (unknown)")
+
+var errDecode = errors.New("cannot parse invalid wire-format data")
diff --git a/vendor/google.golang.org/protobuf/proto/decode_gen.go b/vendor/google.golang.org/protobuf/proto/decode_gen.go
index d6dc904dc..301eeb20f 100644
--- a/vendor/google.golang.org/protobuf/proto/decode_gen.go
+++ b/vendor/google.golang.org/protobuf/proto/decode_gen.go
@@ -27,7 +27,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot
 		}
 		v, n := protowire.ConsumeVarint(b)
 		if n < 0 {
-			return val, 0, protowire.ParseError(n)
+			return val, 0, errDecode
 		}
 		return protoreflect.ValueOfBool(protowire.DecodeBool(v)), n, nil
 	case protoreflect.EnumKind:
@@ -36,7 +36,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot
 		}
 		v, n := protowire.ConsumeVarint(b)
 		if n < 0 {
-			return val, 0, protowire.ParseError(n)
+			return val, 0, errDecode
 		}
 		return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), n, nil
 	case protoreflect.Int32Kind:
@@ -45,7 +45,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot
 		}
 		v, n := protowire.ConsumeVarint(b)
 		if n < 0 {
-			return val, 0, protowire.ParseError(n)
+			return val, 0, errDecode
 		}
 		return protoreflect.ValueOfInt32(int32(v)), n, nil
 	case protoreflect.Sint32Kind:
@@ -54,7 +54,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot
 		}
 		v, n := protowire.ConsumeVarint(b)
 		if n < 0 {
-			return val, 0, protowire.ParseError(n)
+			return val, 0, errDecode
 		}
 		return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), n, nil
 	case protoreflect.Uint32Kind:
@@ -63,7 +63,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot
 		}
 		v, n := protowire.ConsumeVarint(b)
 		if n < 0 {
-			return val, 0, protowire.ParseError(n)
+			return val, 0, errDecode
 		}
 		return protoreflect.ValueOfUint32(uint32(v)), n, nil
 	case protoreflect.Int64Kind:
@@ -72,7 +72,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot
 		}
 		v, n := protowire.ConsumeVarint(b)
 		if n < 0 {
-			return val, 0, protowire.ParseError(n)
+			return val, 0, errDecode
 		}
 		return protoreflect.ValueOfInt64(int64(v)), n, nil
 	case protoreflect.Sint64Kind:
@@ -81,7 +81,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot
 		}
 		v, n := protowire.ConsumeVarint(b)
 		if n < 0 {
-			return val, 0, protowire.ParseError(n)
+			return val, 0, errDecode
 		}
 		return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), n, nil
 	case protoreflect.Uint64Kind:
@@ -90,7 +90,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot
 		}
 		v, n := protowire.ConsumeVarint(b)
 		if n < 0 {
-			return val, 0, protowire.ParseError(n)
+			return val, 0, errDecode
 		}
 		return protoreflect.ValueOfUint64(v), n, nil
 	case protoreflect.Sfixed32Kind:
@@ -99,7 +99,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot
 		}
 		v, n := protowire.ConsumeFixed32(b)
 		if n < 0 {
-			return val, 0, protowire.ParseError(n)
+			return val, 0, errDecode
 		}
 		return protoreflect.ValueOfInt32(int32(v)), n, nil
 	case protoreflect.Fixed32Kind:
@@ -108,7 +108,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot
 		}
 		v, n := protowire.ConsumeFixed32(b)
 		if n < 0 {
-			return val, 0, protowire.ParseError(n)
+			return val, 0, errDecode
 		}
 		return protoreflect.ValueOfUint32(uint32(v)), n, nil
 	case protoreflect.FloatKind:
@@ -117,7 +117,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot
 		}
 		v, n := protowire.ConsumeFixed32(b)
 		if n < 0 {
-			return val, 0, protowire.ParseError(n)
+			return val, 0, errDecode
 		}
 		return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), n, nil
 	case protoreflect.Sfixed64Kind:
@@ -126,7 +126,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot
 		}
 		v, n := protowire.ConsumeFixed64(b)
 		if n < 0 {
-			return val, 0, protowire.ParseError(n)
+			return val, 0, errDecode
 		}
 		return protoreflect.ValueOfInt64(int64(v)), n, nil
 	case protoreflect.Fixed64Kind:
@@ -135,7 +135,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot
 		}
 		v, n := protowire.ConsumeFixed64(b)
 		if n < 0 {
-			return val, 0, protowire.ParseError(n)
+			return val, 0, errDecode
 		}
 		return protoreflect.ValueOfUint64(v), n, nil
 	case protoreflect.DoubleKind:
@@ -144,7 +144,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot
 		}
 		v, n := protowire.ConsumeFixed64(b)
 		if n < 0 {
-			return val, 0, protowire.ParseError(n)
+			return val, 0, errDecode
 		}
 		return protoreflect.ValueOfFloat64(math.Float64frombits(v)), n, nil
 	case protoreflect.StringKind:
@@ -153,7 +153,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot
 		}
 		v, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return val, 0, protowire.ParseError(n)
+			return val, 0, errDecode
 		}
 		if strs.EnforceUTF8(fd) && !utf8.Valid(v) {
 			return protoreflect.Value{}, 0, errors.InvalidUTF8(string(fd.FullName()))
@@ -165,7 +165,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot
 		}
 		v, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return val, 0, protowire.ParseError(n)
+			return val, 0, errDecode
 		}
 		return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), n, nil
 	case protoreflect.MessageKind:
@@ -174,7 +174,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot
 		}
 		v, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return val, 0, protowire.ParseError(n)
+			return val, 0, errDecode
 		}
 		return protoreflect.ValueOfBytes(v), n, nil
 	case protoreflect.GroupKind:
@@ -183,7 +183,7 @@ func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd prot
 		}
 		v, n := protowire.ConsumeGroup(fd.Number(), b)
 		if n < 0 {
-			return val, 0, protowire.ParseError(n)
+			return val, 0, errDecode
 		}
 		return protoreflect.ValueOfBytes(v), n, nil
 	default:
@@ -197,12 +197,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		if wtyp == protowire.BytesType {
 			buf, n := protowire.ConsumeBytes(b)
 			if n < 0 {
-				return 0, protowire.ParseError(n)
+				return 0, errDecode
 			}
 			for len(buf) > 0 {
 				v, n := protowire.ConsumeVarint(buf)
 				if n < 0 {
-					return 0, protowire.ParseError(n)
+					return 0, errDecode
 				}
 				buf = buf[n:]
 				list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v)))
@@ -214,7 +214,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		}
 		v, n := protowire.ConsumeVarint(b)
 		if n < 0 {
-			return 0, protowire.ParseError(n)
+			return 0, errDecode
 		}
 		list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v)))
 		return n, nil
@@ -222,12 +222,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		if wtyp == protowire.BytesType {
 			buf, n := protowire.ConsumeBytes(b)
 			if n < 0 {
-				return 0, protowire.ParseError(n)
+				return 0, errDecode
 			}
 			for len(buf) > 0 {
 				v, n := protowire.ConsumeVarint(buf)
 				if n < 0 {
-					return 0, protowire.ParseError(n)
+					return 0, errDecode
 				}
 				buf = buf[n:]
 				list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)))
@@ -239,7 +239,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		}
 		v, n := protowire.ConsumeVarint(b)
 		if n < 0 {
-			return 0, protowire.ParseError(n)
+			return 0, errDecode
 		}
 		list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)))
 		return n, nil
@@ -247,12 +247,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		if wtyp == protowire.BytesType {
 			buf, n := protowire.ConsumeBytes(b)
 			if n < 0 {
-				return 0, protowire.ParseError(n)
+				return 0, errDecode
 			}
 			for len(buf) > 0 {
 				v, n := protowire.ConsumeVarint(buf)
 				if n < 0 {
-					return 0, protowire.ParseError(n)
+					return 0, errDecode
 				}
 				buf = buf[n:]
 				list.Append(protoreflect.ValueOfInt32(int32(v)))
@@ -264,7 +264,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		}
 		v, n := protowire.ConsumeVarint(b)
 		if n < 0 {
-			return 0, protowire.ParseError(n)
+			return 0, errDecode
 		}
 		list.Append(protoreflect.ValueOfInt32(int32(v)))
 		return n, nil
@@ -272,12 +272,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		if wtyp == protowire.BytesType {
 			buf, n := protowire.ConsumeBytes(b)
 			if n < 0 {
-				return 0, protowire.ParseError(n)
+				return 0, errDecode
 			}
 			for len(buf) > 0 {
 				v, n := protowire.ConsumeVarint(buf)
 				if n < 0 {
-					return 0, protowire.ParseError(n)
+					return 0, errDecode
 				}
 				buf = buf[n:]
 				list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))))
@@ -289,7 +289,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		}
 		v, n := protowire.ConsumeVarint(b)
 		if n < 0 {
-			return 0, protowire.ParseError(n)
+			return 0, errDecode
 		}
 		list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))))
 		return n, nil
@@ -297,12 +297,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		if wtyp == protowire.BytesType {
 			buf, n := protowire.ConsumeBytes(b)
 			if n < 0 {
-				return 0, protowire.ParseError(n)
+				return 0, errDecode
 			}
 			for len(buf) > 0 {
 				v, n := protowire.ConsumeVarint(buf)
 				if n < 0 {
-					return 0, protowire.ParseError(n)
+					return 0, errDecode
 				}
 				buf = buf[n:]
 				list.Append(protoreflect.ValueOfUint32(uint32(v)))
@@ -314,7 +314,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		}
 		v, n := protowire.ConsumeVarint(b)
 		if n < 0 {
-			return 0, protowire.ParseError(n)
+			return 0, errDecode
 		}
 		list.Append(protoreflect.ValueOfUint32(uint32(v)))
 		return n, nil
@@ -322,12 +322,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		if wtyp == protowire.BytesType {
 			buf, n := protowire.ConsumeBytes(b)
 			if n < 0 {
-				return 0, protowire.ParseError(n)
+				return 0, errDecode
 			}
 			for len(buf) > 0 {
 				v, n := protowire.ConsumeVarint(buf)
 				if n < 0 {
-					return 0, protowire.ParseError(n)
+					return 0, errDecode
 				}
 				buf = buf[n:]
 				list.Append(protoreflect.ValueOfInt64(int64(v)))
@@ -339,7 +339,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		}
 		v, n := protowire.ConsumeVarint(b)
 		if n < 0 {
-			return 0, protowire.ParseError(n)
+			return 0, errDecode
 		}
 		list.Append(protoreflect.ValueOfInt64(int64(v)))
 		return n, nil
@@ -347,12 +347,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		if wtyp == protowire.BytesType {
 			buf, n := protowire.ConsumeBytes(b)
 			if n < 0 {
-				return 0, protowire.ParseError(n)
+				return 0, errDecode
 			}
 			for len(buf) > 0 {
 				v, n := protowire.ConsumeVarint(buf)
 				if n < 0 {
-					return 0, protowire.ParseError(n)
+					return 0, errDecode
 				}
 				buf = buf[n:]
 				list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)))
@@ -364,7 +364,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		}
 		v, n := protowire.ConsumeVarint(b)
 		if n < 0 {
-			return 0, protowire.ParseError(n)
+			return 0, errDecode
 		}
 		list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)))
 		return n, nil
@@ -372,12 +372,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		if wtyp == protowire.BytesType {
 			buf, n := protowire.ConsumeBytes(b)
 			if n < 0 {
-				return 0, protowire.ParseError(n)
+				return 0, errDecode
 			}
 			for len(buf) > 0 {
 				v, n := protowire.ConsumeVarint(buf)
 				if n < 0 {
-					return 0, protowire.ParseError(n)
+					return 0, errDecode
 				}
 				buf = buf[n:]
 				list.Append(protoreflect.ValueOfUint64(v))
@@ -389,7 +389,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		}
 		v, n := protowire.ConsumeVarint(b)
 		if n < 0 {
-			return 0, protowire.ParseError(n)
+			return 0, errDecode
 		}
 		list.Append(protoreflect.ValueOfUint64(v))
 		return n, nil
@@ -397,12 +397,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		if wtyp == protowire.BytesType {
 			buf, n := protowire.ConsumeBytes(b)
 			if n < 0 {
-				return 0, protowire.ParseError(n)
+				return 0, errDecode
 			}
 			for len(buf) > 0 {
 				v, n := protowire.ConsumeFixed32(buf)
 				if n < 0 {
-					return 0, protowire.ParseError(n)
+					return 0, errDecode
 				}
 				buf = buf[n:]
 				list.Append(protoreflect.ValueOfInt32(int32(v)))
@@ -414,7 +414,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		}
 		v, n := protowire.ConsumeFixed32(b)
 		if n < 0 {
-			return 0, protowire.ParseError(n)
+			return 0, errDecode
 		}
 		list.Append(protoreflect.ValueOfInt32(int32(v)))
 		return n, nil
@@ -422,12 +422,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		if wtyp == protowire.BytesType {
 			buf, n := protowire.ConsumeBytes(b)
 			if n < 0 {
-				return 0, protowire.ParseError(n)
+				return 0, errDecode
 			}
 			for len(buf) > 0 {
 				v, n := protowire.ConsumeFixed32(buf)
 				if n < 0 {
-					return 0, protowire.ParseError(n)
+					return 0, errDecode
 				}
 				buf = buf[n:]
 				list.Append(protoreflect.ValueOfUint32(uint32(v)))
@@ -439,7 +439,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		}
 		v, n := protowire.ConsumeFixed32(b)
 		if n < 0 {
-			return 0, protowire.ParseError(n)
+			return 0, errDecode
 		}
 		list.Append(protoreflect.ValueOfUint32(uint32(v)))
 		return n, nil
@@ -447,12 +447,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		if wtyp == protowire.BytesType {
 			buf, n := protowire.ConsumeBytes(b)
 			if n < 0 {
-				return 0, protowire.ParseError(n)
+				return 0, errDecode
 			}
 			for len(buf) > 0 {
 				v, n := protowire.ConsumeFixed32(buf)
 				if n < 0 {
-					return 0, protowire.ParseError(n)
+					return 0, errDecode
 				}
 				buf = buf[n:]
 				list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))))
@@ -464,7 +464,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		}
 		v, n := protowire.ConsumeFixed32(b)
 		if n < 0 {
-			return 0, protowire.ParseError(n)
+			return 0, errDecode
 		}
 		list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))))
 		return n, nil
@@ -472,12 +472,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		if wtyp == protowire.BytesType {
 			buf, n := protowire.ConsumeBytes(b)
 			if n < 0 {
-				return 0, protowire.ParseError(n)
+				return 0, errDecode
 			}
 			for len(buf) > 0 {
 				v, n := protowire.ConsumeFixed64(buf)
 				if n < 0 {
-					return 0, protowire.ParseError(n)
+					return 0, errDecode
 				}
 				buf = buf[n:]
 				list.Append(protoreflect.ValueOfInt64(int64(v)))
@@ -489,7 +489,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		}
 		v, n := protowire.ConsumeFixed64(b)
 		if n < 0 {
-			return 0, protowire.ParseError(n)
+			return 0, errDecode
 		}
 		list.Append(protoreflect.ValueOfInt64(int64(v)))
 		return n, nil
@@ -497,12 +497,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		if wtyp == protowire.BytesType {
 			buf, n := protowire.ConsumeBytes(b)
 			if n < 0 {
-				return 0, protowire.ParseError(n)
+				return 0, errDecode
 			}
 			for len(buf) > 0 {
 				v, n := protowire.ConsumeFixed64(buf)
 				if n < 0 {
-					return 0, protowire.ParseError(n)
+					return 0, errDecode
 				}
 				buf = buf[n:]
 				list.Append(protoreflect.ValueOfUint64(v))
@@ -514,7 +514,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		}
 		v, n := protowire.ConsumeFixed64(b)
 		if n < 0 {
-			return 0, protowire.ParseError(n)
+			return 0, errDecode
 		}
 		list.Append(protoreflect.ValueOfUint64(v))
 		return n, nil
@@ -522,12 +522,12 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		if wtyp == protowire.BytesType {
 			buf, n := protowire.ConsumeBytes(b)
 			if n < 0 {
-				return 0, protowire.ParseError(n)
+				return 0, errDecode
 			}
 			for len(buf) > 0 {
 				v, n := protowire.ConsumeFixed64(buf)
 				if n < 0 {
-					return 0, protowire.ParseError(n)
+					return 0, errDecode
 				}
 				buf = buf[n:]
 				list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v)))
@@ -539,7 +539,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		}
 		v, n := protowire.ConsumeFixed64(b)
 		if n < 0 {
-			return 0, protowire.ParseError(n)
+			return 0, errDecode
 		}
 		list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v)))
 		return n, nil
@@ -549,7 +549,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		}
 		v, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return 0, protowire.ParseError(n)
+			return 0, errDecode
 		}
 		if strs.EnforceUTF8(fd) && !utf8.Valid(v) {
 			return 0, errors.InvalidUTF8(string(fd.FullName()))
@@ -562,7 +562,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		}
 		v, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return 0, protowire.ParseError(n)
+			return 0, errDecode
 		}
 		list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...)))
 		return n, nil
@@ -572,7 +572,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		}
 		v, n := protowire.ConsumeBytes(b)
 		if n < 0 {
-			return 0, protowire.ParseError(n)
+			return 0, errDecode
 		}
 		m := list.NewElement()
 		if err := o.unmarshalMessage(v, m.Message()); err != nil {
@@ -586,7 +586,7 @@ func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list prot
 		}
 		v, n := protowire.ConsumeGroup(fd.Number(), b)
 		if n < 0 {
-			return 0, protowire.ParseError(n)
+			return 0, errDecode
 		}
 		m := list.NewElement()
 		if err := o.unmarshalMessage(v, m.Message()); err != nil {
diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go
index 7b47a1180..d18239c23 100644
--- a/vendor/google.golang.org/protobuf/proto/encode.go
+++ b/vendor/google.golang.org/protobuf/proto/encode.go
@@ -5,12 +5,9 @@
 package proto
 
 import (
-	"sort"
-
 	"google.golang.org/protobuf/encoding/protowire"
 	"google.golang.org/protobuf/internal/encoding/messageset"
-	"google.golang.org/protobuf/internal/fieldsort"
-	"google.golang.org/protobuf/internal/mapsort"
+	"google.golang.org/protobuf/internal/order"
 	"google.golang.org/protobuf/internal/pragma"
 	"google.golang.org/protobuf/reflect/protoreflect"
 	"google.golang.org/protobuf/runtime/protoiface"
@@ -211,14 +208,15 @@ func (o MarshalOptions) marshalMessageSlow(b []byte, m protoreflect.Message) ([]
 	if messageset.IsMessageSet(m.Descriptor()) {
 		return o.marshalMessageSet(b, m)
 	}
-	// There are many choices for what order we visit fields in. The default one here
-	// is chosen for reasonable efficiency and simplicity given the protoreflect API.
-	// It is not deterministic, since Message.Range does not return fields in any
-	// defined order.
-	//
-	// When using deterministic serialization, we sort the known fields.
+	fieldOrder := order.AnyFieldOrder
+	if o.Deterministic {
+		// TODO: This should use a more natural ordering like NumberFieldOrder,
+		// but doing so breaks golden tests that make invalid assumption about
+		// output stability of this implementation.
+		fieldOrder = order.LegacyFieldOrder
+	}
 	var err error
-	o.rangeFields(m, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+	order.RangeFields(m, fieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
 		b, err = o.marshalField(b, fd, v)
 		return err == nil
 	})
@@ -229,27 +227,6 @@ func (o MarshalOptions) marshalMessageSlow(b []byte, m protoreflect.Message) ([]
 	return b, nil
 }
 
-// rangeFields visits fields in a defined order when deterministic serialization is enabled.
-func (o MarshalOptions) rangeFields(m protoreflect.Message, f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
-	if !o.Deterministic {
-		m.Range(f)
-		return
-	}
-	var fds []protoreflect.FieldDescriptor
-	m.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
-		fds = append(fds, fd)
-		return true
-	})
-	sort.Slice(fds, func(a, b int) bool {
-		return fieldsort.Less(fds[a], fds[b])
-	})
-	for _, fd := range fds {
-		if !f(fd, m.Get(fd)) {
-			break
-		}
-	}
-}
-
 func (o MarshalOptions) marshalField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) {
 	switch {
 	case fd.IsList():
@@ -292,8 +269,12 @@ func (o MarshalOptions) marshalList(b []byte, fd protoreflect.FieldDescriptor, l
 func (o MarshalOptions) marshalMap(b []byte, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) ([]byte, error) {
 	keyf := fd.MapKey()
 	valf := fd.MapValue()
+	keyOrder := order.AnyKeyOrder
+	if o.Deterministic {
+		keyOrder = order.GenericKeyOrder
+	}
 	var err error
-	o.rangeMap(mapv, keyf.Kind(), func(key protoreflect.MapKey, value protoreflect.Value) bool {
+	order.RangeEntries(mapv, keyOrder, func(key protoreflect.MapKey, value protoreflect.Value) bool {
 		b = protowire.AppendTag(b, fd.Number(), protowire.BytesType)
 		var pos int
 		b, pos = appendSpeculativeLength(b)
@@ -312,14 +293,6 @@ func (o MarshalOptions) marshalMap(b []byte, fd protoreflect.FieldDescriptor, ma
 	return b, err
 }
 
-func (o MarshalOptions) rangeMap(mapv protoreflect.Map, kind protoreflect.Kind, f func(protoreflect.MapKey, protoreflect.Value) bool) {
-	if !o.Deterministic {
-		mapv.Range(f)
-		return
-	}
-	mapsort.Range(mapv, kind, f)
-}
-
 // When encoding length-prefixed fields, we speculatively set aside some number of bytes
 // for the length, encode the data, and then encode the length (shifting the data if necessary
 // to make room).
diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go
index 10902bd85..4dba2b969 100644
--- a/vendor/google.golang.org/protobuf/proto/equal.go
+++ b/vendor/google.golang.org/protobuf/proto/equal.go
@@ -111,18 +111,31 @@ func equalList(fd pref.FieldDescriptor, x, y pref.List) bool {
 
 // equalValue compares two singular values.
 func equalValue(fd pref.FieldDescriptor, x, y pref.Value) bool {
-	switch {
-	case fd.Message() != nil:
-		return equalMessage(x.Message(), y.Message())
-	case fd.Kind() == pref.BytesKind:
-		return bytes.Equal(x.Bytes(), y.Bytes())
-	case fd.Kind() == pref.FloatKind, fd.Kind() == pref.DoubleKind:
+	switch fd.Kind() {
+	case pref.BoolKind:
+		return x.Bool() == y.Bool()
+	case pref.EnumKind:
+		return x.Enum() == y.Enum()
+	case pref.Int32Kind, pref.Sint32Kind,
+		pref.Int64Kind, pref.Sint64Kind,
+		pref.Sfixed32Kind, pref.Sfixed64Kind:
+		return x.Int() == y.Int()
+	case pref.Uint32Kind, pref.Uint64Kind,
+		pref.Fixed32Kind, pref.Fixed64Kind:
+		return x.Uint() == y.Uint()
+	case pref.FloatKind, pref.DoubleKind:
 		fx := x.Float()
 		fy := y.Float()
 		if math.IsNaN(fx) || math.IsNaN(fy) {
 			return math.IsNaN(fx) && math.IsNaN(fy)
 		}
 		return fx == fy
+	case pref.StringKind:
+		return x.String() == y.String()
+	case pref.BytesKind:
+		return bytes.Equal(x.Bytes(), y.Bytes())
+	case pref.MessageKind, pref.GroupKind:
+		return equalMessage(x.Message(), y.Message())
 	default:
 		return x.Interface() == y.Interface()
 	}
diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go
index 1d692c3a8..312d5d45c 100644
--- a/vendor/google.golang.org/protobuf/proto/messageset.go
+++ b/vendor/google.golang.org/protobuf/proto/messageset.go
@@ -9,6 +9,7 @@ import (
 	"google.golang.org/protobuf/internal/encoding/messageset"
 	"google.golang.org/protobuf/internal/errors"
 	"google.golang.org/protobuf/internal/flags"
+	"google.golang.org/protobuf/internal/order"
 	"google.golang.org/protobuf/reflect/protoreflect"
 	"google.golang.org/protobuf/reflect/protoregistry"
 )
@@ -28,8 +29,12 @@ func (o MarshalOptions) marshalMessageSet(b []byte, m protoreflect.Message) ([]b
 	if !flags.ProtoLegacy {
 		return b, errors.New("no support for message_set_wire_format")
 	}
+	fieldOrder := order.AnyFieldOrder
+	if o.Deterministic {
+		fieldOrder = order.NumberFieldOrder
+	}
 	var err error
-	o.rangeFields(m, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+	order.RangeFields(m, fieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
 		b, err = o.marshalMessageSetField(b, fd, v)
 		return err == nil
 	})
diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go
index ca14b09c3..1f0d183b1 100644
--- a/vendor/google.golang.org/protobuf/proto/proto.go
+++ b/vendor/google.golang.org/protobuf/proto/proto.go
@@ -32,3 +32,12 @@ var Error error
 func init() {
 	Error = errors.Error
 }
+
+// MessageName returns the full name of m.
+// If m is nil, it returns an empty string.
+func MessageName(m Message) protoreflect.FullName {
+	if m == nil {
+		return ""
+	}
+	return m.ProtoReflect().Descriptor().FullName()
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
index 37f254d4c..e4dfb1205 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
@@ -144,6 +144,7 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot
 	}
 
 	// Handle source locations.
+	f.L2.Locations.File = f
 	for _, loc := range fd.GetSourceCodeInfo().GetLocation() {
 		var l protoreflect.SourceLocation
 		// TODO: Validate that the path points to an actual declaration?
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
index 673a230e7..37efda1af 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
@@ -135,7 +135,7 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc
 			f.L1.Kind = protoreflect.Kind(fd.GetType())
 		}
 		if fd.JsonName != nil {
-			f.L1.JSONName.Init(fd.GetJsonName())
+			f.L1.StringName.InitJSON(fd.GetJsonName())
 		}
 	}
 	return fs, nil
@@ -175,7 +175,7 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript
 			x.L1.Kind = protoreflect.Kind(xd.GetType())
 		}
 		if xd.JsonName != nil {
-			x.L2.JSONName.Init(xd.GetJsonName())
+			x.L2.StringName.InitJSON(xd.GetJsonName())
 		}
 	}
 	return xs, nil
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go
index 2d5fa9936..9af1d5648 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go
@@ -239,6 +239,9 @@ func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb.
 			return errors.New("extension field %q has an invalid cardinality: %d", x.FullName(), x.Cardinality())
 		}
 		if xd.JsonName != nil {
+			// A bug in older versions of protoc would always populate the
+			// "json_name" option for extensions when it is meaningless.
+			// When it did so, it would always use the camel-cased field name.
 			if xd.GetJsonName() != strs.JSONCamelCase(string(x.Name())) {
 				return errors.New("extension field %q may not have an explicitly set JSON name: %q", x.FullName(), xd.GetJsonName())
 			}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
index 00d35e02e..a7c5ceffc 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
@@ -9,6 +9,7 @@ import (
 	"strings"
 
 	"google.golang.org/protobuf/internal/encoding/defval"
+	"google.golang.org/protobuf/internal/strs"
 	"google.golang.org/protobuf/proto"
 	"google.golang.org/protobuf/reflect/protoreflect"
 
@@ -20,9 +21,11 @@ import (
 func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto {
 	p := &descriptorpb.FileDescriptorProto{
 		Name:    proto.String(file.Path()),
-		Package: proto.String(string(file.Package())),
 		Options: proto.Clone(file.Options()).(*descriptorpb.FileOptions),
 	}
+	if file.Package() != "" {
+		p.Package = proto.String(string(file.Package()))
+	}
 	for i, imports := 0, file.Imports(); i < imports.Len(); i++ {
 		imp := imports.Get(i)
 		p.Dependency = append(p.Dependency, imp.Path())
@@ -138,7 +141,14 @@ func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.Fi
 		p.TypeName = fullNameOf(field.Message())
 	}
 	if field.HasJSONName() {
-		p.JsonName = proto.String(field.JSONName())
+		// A bug in older versions of protoc would always populate the
+		// "json_name" option for extensions when it is meaningless.
+		// When it did so, it would always use the camel-cased field name.
+		if field.IsExtension() {
+			p.JsonName = proto.String(strs.JSONCamelCase(string(field.Name())))
+		} else {
+			p.JsonName = proto.String(field.JSONName())
+		}
 	}
 	if field.Syntax() == protoreflect.Proto3 && field.HasOptionalKeyword() {
 		p.Proto3Optional = proto.Bool(true)
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go
index 32ea3d98c..121ba3a07 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go
@@ -4,6 +4,10 @@
 
 package protoreflect
 
+import (
+	"strconv"
+)
+
 // SourceLocations is a list of source locations.
 type SourceLocations interface {
 	// Len reports the number of source locations in the proto file.
@@ -11,9 +15,20 @@ type SourceLocations interface {
 	// Get returns the ith SourceLocation. It panics if out of bounds.
 	Get(int) SourceLocation
 
-	doNotImplement
+	// ByPath returns the SourceLocation for the given path,
+	// returning the first location if multiple exist for the same path.
+	// If multiple locations exist for the same path,
+	// then SourceLocation.Next index can be used to identify the
+	// index of the next SourceLocation.
+	// If no location exists for this path, it returns the zero value.
+	ByPath(path SourcePath) SourceLocation
 
-	// TODO: Add ByPath and ByDescriptor helper methods.
+	// ByDescriptor returns the SourceLocation for the given descriptor,
+	// returning the first location if multiple exist for the same path.
+	// If no location exists for this descriptor, it returns the zero value.
+	ByDescriptor(desc Descriptor) SourceLocation
+
+	doNotImplement
 }
 
 // SourceLocation describes a source location and
@@ -39,6 +54,10 @@ type SourceLocation struct {
 	LeadingComments string
 	// TrailingComments is the trailing attached comment for the declaration.
 	TrailingComments string
+
+	// Next is an index into SourceLocations for the next source location that
+	// has the same Path. It is zero if there is no next location.
+	Next int
 }
 
 // SourcePath identifies part of a file descriptor for a source location.
@@ -48,5 +67,62 @@ type SourceLocation struct {
 // See google.protobuf.SourceCodeInfo.Location.path.
 type SourcePath []int32
 
-// TODO: Add SourcePath.String method to pretty-print the path. For example:
-//	".message_type[6].nested_type[15].field[3]"
+// Equal reports whether p1 equals p2.
+func (p1 SourcePath) Equal(p2 SourcePath) bool {
+	if len(p1) != len(p2) {
+		return false
+	}
+	for i := range p1 {
+		if p1[i] != p2[i] {
+			return false
+		}
+	}
+	return true
+}
+
+// String formats the path in a humanly readable manner.
+// The output is guaranteed to be deterministic,
+// making it suitable for use as a key into a Go map.
+// It is not guaranteed to be stable as the exact output could change
+// in a future version of this module.
+//
+// Example output:
+//	.message_type[6].nested_type[15].field[3]
+func (p SourcePath) String() string {
+	b := p.appendFileDescriptorProto(nil)
+	for _, i := range p {
+		b = append(b, '.')
+		b = strconv.AppendInt(b, int64(i), 10)
+	}
+	return string(b)
+}
+
+type appendFunc func(*SourcePath, []byte) []byte
+
+func (p *SourcePath) appendSingularField(b []byte, name string, f appendFunc) []byte {
+	if len(*p) == 0 {
+		return b
+	}
+	b = append(b, '.')
+	b = append(b, name...)
+	*p = (*p)[1:]
+	if f != nil {
+		b = f(p, b)
+	}
+	return b
+}
+
+func (p *SourcePath) appendRepeatedField(b []byte, name string, f appendFunc) []byte {
+	b = p.appendSingularField(b, name, nil)
+	if len(*p) == 0 || (*p)[0] < 0 {
+		return b
+	}
+	b = append(b, '[')
+	b = strconv.AppendUint(b, uint64((*p)[0]), 10)
+	b = append(b, ']')
+	*p = (*p)[1:]
+	if f != nil {
+		b = f(p, b)
+	}
+	return b
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
new file mode 100644
index 000000000..b03c1223c
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
@@ -0,0 +1,461 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package protoreflect
+
+func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte {
+	if len(*p) == 0 {
+		return b
+	}
+	switch (*p)[0] {
+	case 1:
+		b = p.appendSingularField(b, "name", nil)
+	case 2:
+		b = p.appendSingularField(b, "package", nil)
+	case 3:
+		b = p.appendRepeatedField(b, "dependency", nil)
+	case 10:
+		b = p.appendRepeatedField(b, "public_dependency", nil)
+	case 11:
+		b = p.appendRepeatedField(b, "weak_dependency", nil)
+	case 4:
+		b = p.appendRepeatedField(b, "message_type", (*SourcePath).appendDescriptorProto)
+	case 5:
+		b = p.appendRepeatedField(b, "enum_type", (*SourcePath).appendEnumDescriptorProto)
+	case 6:
+		b = p.appendRepeatedField(b, "service", (*SourcePath).appendServiceDescriptorProto)
+	case 7:
+		b = p.appendRepeatedField(b, "extension", (*SourcePath).appendFieldDescriptorProto)
+	case 8:
+		b = p.appendSingularField(b, "options", (*SourcePath).appendFileOptions)
+	case 9:
+		b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo)
+	case 12:
+		b = p.appendSingularField(b, "syntax", nil)
+	}
+	return b
+}
+
+func (p *SourcePath) appendDescriptorProto(b []byte) []byte {
+	if len(*p) == 0 {
+		return b
+	}
+	switch (*p)[0] {
+	case 1:
+		b = p.appendSingularField(b, "name", nil)
+	case 2:
+		b = p.appendRepeatedField(b, "field", (*SourcePath).appendFieldDescriptorProto)
+	case 6:
+		b = p.appendRepeatedField(b, "extension", (*SourcePath).appendFieldDescriptorProto)
+	case 3:
+		b = p.appendRepeatedField(b, "nested_type", (*SourcePath).appendDescriptorProto)
+	case 4:
+		b = p.appendRepeatedField(b, "enum_type", (*SourcePath).appendEnumDescriptorProto)
+	case 5:
+		b = p.appendRepeatedField(b, "extension_range", (*SourcePath).appendDescriptorProto_ExtensionRange)
+	case 8:
+		b = p.appendRepeatedField(b, "oneof_decl", (*SourcePath).appendOneofDescriptorProto)
+	case 7:
+		b = p.appendSingularField(b, "options", (*SourcePath).appendMessageOptions)
+	case 9:
+		b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendDescriptorProto_ReservedRange)
+	case 10:
+		b = p.appendRepeatedField(b, "reserved_name", nil)
+	}
+	return b
+}
+
+func (p *SourcePath) appendEnumDescriptorProto(b []byte) []byte {
+	if len(*p) == 0 {
+		return b
+	}
+	switch (*p)[0] {
+	case 1:
+		b = p.appendSingularField(b, "name", nil)
+	case 2:
+		b = p.appendRepeatedField(b, "value", (*SourcePath).appendEnumValueDescriptorProto)
+	case 3:
+		b = p.appendSingularField(b, "options", (*SourcePath).appendEnumOptions)
+	case 4:
+		b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendEnumDescriptorProto_EnumReservedRange)
+	case 5:
+		b = p.appendRepeatedField(b, "reserved_name", nil)
+	}
+	return b
+}
+
+func (p *SourcePath) appendServiceDescriptorProto(b []byte) []byte {
+	if len(*p) == 0 {
+		return b
+	}
+	switch (*p)[0] {
+	case 1:
+		b = p.appendSingularField(b, "name", nil)
+	case 2:
+		b = p.appendRepeatedField(b, "method", (*SourcePath).appendMethodDescriptorProto)
+	case 3:
+		b = p.appendSingularField(b, "options", (*SourcePath).appendServiceOptions)
+	}
+	return b
+}
+
+func (p *SourcePath) appendFieldDescriptorProto(b []byte) []byte {
+	if len(*p) == 0 {
+		return b
+	}
+	switch (*p)[0] {
+	case 1:
+		b = p.appendSingularField(b, "name", nil)
+	case 3:
+		b = p.appendSingularField(b, "number", nil)
+	case 4:
+		b = p.appendSingularField(b, "label", nil)
+	case 5:
+		b = p.appendSingularField(b, "type", nil)
+	case 6:
+		b = p.appendSingularField(b, "type_name", nil)
+	case 2:
+		b = p.appendSingularField(b, "extendee", nil)
+	case 7:
+		b = p.appendSingularField(b, "default_value", nil)
+	case 9:
+		b = p.appendSingularField(b, "oneof_index", nil)
+	case 10:
+		b = p.appendSingularField(b, "json_name", nil)
+	case 8:
+		b = p.appendSingularField(b, "options", (*SourcePath).appendFieldOptions)
+	case 17:
+		b = p.appendSingularField(b, "proto3_optional", nil)
+	}
+	return b
+}
+
+func (p *SourcePath) appendFileOptions(b []byte) []byte {
+	if len(*p) == 0 {
+		return b
+	}
+	switch (*p)[0] {
+	case 1:
+		b = p.appendSingularField(b, "java_package", nil)
+	case 8:
+		b = p.appendSingularField(b, "java_outer_classname", nil)
+	case 10:
+		b = p.appendSingularField(b, "java_multiple_files", nil)
+	case 20:
+		b = p.appendSingularField(b, "java_generate_equals_and_hash", nil)
+	case 27:
+		b = p.appendSingularField(b, "java_string_check_utf8", nil)
+	case 9:
+		b = p.appendSingularField(b, "optimize_for", nil)
+	case 11:
+		b = p.appendSingularField(b, "go_package", nil)
+	case 16:
+		b = p.appendSingularField(b, "cc_generic_services", nil)
+	case 17:
+		b = p.appendSingularField(b, "java_generic_services", nil)
+	case 18:
+		b = p.appendSingularField(b, "py_generic_services", nil)
+	case 42:
+		b = p.appendSingularField(b, "php_generic_services", nil)
+	case 23:
+		b = p.appendSingularField(b, "deprecated", nil)
+	case 31:
+		b = p.appendSingularField(b, "cc_enable_arenas", nil)
+	case 36:
+		b = p.appendSingularField(b, "objc_class_prefix", nil)
+	case 37:
+		b = p.appendSingularField(b, "csharp_namespace", nil)
+	case 39:
+		b = p.appendSingularField(b, "swift_prefix", nil)
+	case 40:
+		b = p.appendSingularField(b, "php_class_prefix", nil)
+	case 41:
+		b = p.appendSingularField(b, "php_namespace", nil)
+	case 44:
+		b = p.appendSingularField(b, "php_metadata_namespace", nil)
+	case 45:
+		b = p.appendSingularField(b, "ruby_package", nil)
+	case 999:
+		b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
+	}
+	return b
+}
+
+func (p *SourcePath) appendSourceCodeInfo(b []byte) []byte {
+	if len(*p) == 0 {
+		return b
+	}
+	switch (*p)[0] {
+	case 1:
+		b = p.appendRepeatedField(b, "location", (*SourcePath).appendSourceCodeInfo_Location)
+	}
+	return b
+}
+
+func (p *SourcePath) appendDescriptorProto_ExtensionRange(b []byte) []byte {
+	if len(*p) == 0 {
+		return b
+	}
+	switch (*p)[0] {
+	case 1:
+		b = p.appendSingularField(b, "start", nil)
+	case 2:
+		b = p.appendSingularField(b, "end", nil)
+	case 3:
+		b = p.appendSingularField(b, "options", (*SourcePath).appendExtensionRangeOptions)
+	}
+	return b
+}
+
+func (p *SourcePath) appendOneofDescriptorProto(b []byte) []byte {
+	if len(*p) == 0 {
+		return b
+	}
+	switch (*p)[0] {
+	case 1:
+		b = p.appendSingularField(b, "name", nil)
+	case 2:
+		b = p.appendSingularField(b, "options", (*SourcePath).appendOneofOptions)
+	}
+	return b
+}
+
+func (p *SourcePath) appendMessageOptions(b []byte) []byte {
+	if len(*p) == 0 {
+		return b
+	}
+	switch (*p)[0] {
+	case 1:
+		b = p.appendSingularField(b, "message_set_wire_format", nil)
+	case 2:
+		b = p.appendSingularField(b, "no_standard_descriptor_accessor", nil)
+	case 3:
+		b = p.appendSingularField(b, "deprecated", nil)
+	case 7:
+		b = p.appendSingularField(b, "map_entry", nil)
+	case 999:
+		b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
+	}
+	return b
+}
+
+func (p *SourcePath) appendDescriptorProto_ReservedRange(b []byte) []byte {
+	if len(*p) == 0 {
+		return b
+	}
+	switch (*p)[0] {
+	case 1:
+		b = p.appendSingularField(b, "start", nil)
+	case 2:
+		b = p.appendSingularField(b, "end", nil)
+	}
+	return b
+}
+
+func (p *SourcePath) appendEnumValueDescriptorProto(b []byte) []byte {
+	if len(*p) == 0 {
+		return b
+	}
+	switch (*p)[0] {
+	case 1:
+		b = p.appendSingularField(b, "name", nil)
+	case 2:
+		b = p.appendSingularField(b, "number", nil)
+	case 3:
+		b = p.appendSingularField(b, "options", (*SourcePath).appendEnumValueOptions)
+	}
+	return b
+}
+
+func (p *SourcePath) appendEnumOptions(b []byte) []byte {
+	if len(*p) == 0 {
+		return b
+	}
+	switch (*p)[0] {
+	case 2:
+		b = p.appendSingularField(b, "allow_alias", nil)
+	case 3:
+		b = p.appendSingularField(b, "deprecated", nil)
+	case 999:
+		b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
+	}
+	return b
+}
+
+func (p *SourcePath) appendEnumDescriptorProto_EnumReservedRange(b []byte) []byte {
+	if len(*p) == 0 {
+		return b
+	}
+	switch (*p)[0] {
+	case 1:
+		b = p.appendSingularField(b, "start", nil)
+	case 2:
+		b = p.appendSingularField(b, "end", nil)
+	}
+	return b
+}
+
+func (p *SourcePath) appendMethodDescriptorProto(b []byte) []byte {
+	if len(*p) == 0 {
+		return b
+	}
+	switch (*p)[0] {
+	case 1:
+		b = p.appendSingularField(b, "name", nil)
+	case 2:
+		b = p.appendSingularField(b, "input_type", nil)
+	case 3:
+		b = p.appendSingularField(b, "output_type", nil)
+	case 4:
+		b = p.appendSingularField(b, "options", (*SourcePath).appendMethodOptions)
+	case 5:
+		b = p.appendSingularField(b, "client_streaming", nil)
+	case 6:
+		b = p.appendSingularField(b, "server_streaming", nil)
+	}
+	return b
+}
+
+func (p *SourcePath) appendServiceOptions(b []byte) []byte {
+	if len(*p) == 0 {
+		return b
+	}
+	switch (*p)[0] {
+	case 33:
+		b = p.appendSingularField(b, "deprecated", nil)
+	case 999:
+		b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
+	}
+	return b
+}
+
+func (p *SourcePath) appendFieldOptions(b []byte) []byte {
+	if len(*p) == 0 {
+		return b
+	}
+	switch (*p)[0] {
+	case 1:
+		b = p.appendSingularField(b, "ctype", nil)
+	case 2:
+		b = p.appendSingularField(b, "packed", nil)
+	case 6:
+		b = p.appendSingularField(b, "jstype", nil)
+	case 5:
+		b = p.appendSingularField(b, "lazy", nil)
+	case 3:
+		b = p.appendSingularField(b, "deprecated", nil)
+	case 10:
+		b = p.appendSingularField(b, "weak", nil)
+	case 999:
+		b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
+	}
+	return b
+}
+
+func (p *SourcePath) appendUninterpretedOption(b []byte) []byte {
+	if len(*p) == 0 {
+		return b
+	}
+	switch (*p)[0] {
+	case 2:
+		b = p.appendRepeatedField(b, "name", (*SourcePath).appendUninterpretedOption_NamePart)
+	case 3:
+		b = p.appendSingularField(b, "identifier_value", nil)
+	case 4:
+		b = p.appendSingularField(b, "positive_int_value", nil)
+	case 5:
+		b = p.appendSingularField(b, "negative_int_value", nil)
+	case 6:
+		b = p.appendSingularField(b, "double_value", nil)
+	case 7:
+		b = p.appendSingularField(b, "string_value", nil)
+	case 8:
+		b = p.appendSingularField(b, "aggregate_value", nil)
+	}
+	return b
+}
+
+func (p *SourcePath) appendSourceCodeInfo_Location(b []byte) []byte {
+	if len(*p) == 0 {
+		return b
+	}
+	switch (*p)[0] {
+	case 1:
+		b = p.appendRepeatedField(b, "path", nil)
+	case 2:
+		b = p.appendRepeatedField(b, "span", nil)
+	case 3:
+		b = p.appendSingularField(b, "leading_comments", nil)
+	case 4:
+		b = p.appendSingularField(b, "trailing_comments", nil)
+	case 6:
+		b = p.appendRepeatedField(b, "leading_detached_comments", nil)
+	}
+	return b
+}
+
+func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte {
+	if len(*p) == 0 {
+		return b
+	}
+	switch (*p)[0] {
+	case 999:
+		b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
+	}
+	return b
+}
+
+func (p *SourcePath) appendOneofOptions(b []byte) []byte {
+	if len(*p) == 0 {
+		return b
+	}
+	switch (*p)[0] {
+	case 999:
+		b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
+	}
+	return b
+}
+
+func (p *SourcePath) appendEnumValueOptions(b []byte) []byte {
+	if len(*p) == 0 {
+		return b
+	}
+	switch (*p)[0] {
+	case 1:
+		b = p.appendSingularField(b, "deprecated", nil)
+	case 999:
+		b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
+	}
+	return b
+}
+
+func (p *SourcePath) appendMethodOptions(b []byte) []byte {
+	if len(*p) == 0 {
+		return b
+	}
+	switch (*p)[0] {
+	case 33:
+		b = p.appendSingularField(b, "deprecated", nil)
+	case 34:
+		b = p.appendSingularField(b, "idempotency_level", nil)
+	case 999:
+		b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
+	}
+	return b
+}
+
+func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte {
+	if len(*p) == 0 {
+		return b
+	}
+	switch (*p)[0] {
+	case 1:
+		b = p.appendSingularField(b, "name_part", nil)
+	case 2:
+		b = p.appendSingularField(b, "is_extension", nil)
+	}
+	return b
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
index 5be14a725..8e53c44a9 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
@@ -232,11 +232,15 @@ type MessageDescriptor interface {
 type isMessageDescriptor interface{ ProtoType(MessageDescriptor) }
 
 // MessageType encapsulates a MessageDescriptor with a concrete Go implementation.
+// It is recommended that implementations of this interface also implement the
+// MessageFieldTypes interface.
 type MessageType interface {
 	// New returns a newly allocated empty message.
+	// It may return nil for synthetic messages representing a map entry.
 	New() Message
 
 	// Zero returns an empty, read-only message.
+	// It may return nil for synthetic messages representing a map entry.
 	Zero() Message
 
 	// Descriptor returns the message descriptor.
@@ -245,6 +249,26 @@ type MessageType interface {
 	Descriptor() MessageDescriptor
 }
 
+// MessageFieldTypes extends a MessageType by providing type information
+// regarding enums and messages referenced by the message fields.
+type MessageFieldTypes interface {
+	MessageType
+
+	// Enum returns the EnumType for the ith field in Descriptor.Fields.
+	// It returns nil if the ith field is not an enum kind.
+	// It panics if out of bounds.
+	//
+	// Invariant: mt.Enum(i).Descriptor() == mt.Descriptor().Fields(i).Enum()
+	Enum(i int) EnumType
+
+	// Message returns the MessageType for the ith field in Descriptor.Fields.
+	// It returns nil if the ith field is not a message or group kind.
+	// It panics if out of bounds.
+	//
+	// Invariant: mt.Message(i).Descriptor() == mt.Descriptor().Fields(i).Message()
+	Message(i int) MessageType
+}
+
 // MessageDescriptors is a list of message declarations.
 type MessageDescriptors interface {
 	// Len reports the number of messages.
@@ -279,8 +303,15 @@ type FieldDescriptor interface {
 
 	// JSONName reports the name used for JSON serialization.
 	// It is usually the camel-cased form of the field name.
+	// Extension fields are represented by the full name surrounded by brackets.
 	JSONName() string
 
+	// TextName reports the name used for text serialization.
+	// It is usually the name of the field, except that groups use the name
+	// of the inlined message, and extension fields are represented by the
+	// full name surrounded by brackets.
+	TextName() string
+
 	// HasPresence reports whether the field distinguishes between unpopulated
 	// and default values.
 	HasPresence() bool
@@ -371,6 +402,9 @@ type FieldDescriptors interface {
 	// ByJSONName returns the FieldDescriptor for a field with s as the JSON name.
 	// It returns nil if not found.
 	ByJSONName(s string) FieldDescriptor
+	// ByTextName returns the FieldDescriptor for a field with s as the text name.
+	// It returns nil if not found.
+	ByTextName(s string) FieldDescriptor
 	// ByNumber returns the FieldDescriptor for a field numbered n.
 	// It returns nil if not found.
 	ByNumber(n FieldNumber) FieldDescriptor
diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
index 5e5f96716..66dcbcd0d 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
@@ -17,24 +17,49 @@ package protoregistry
 
 import (
 	"fmt"
-	"log"
+	"os"
 	"strings"
 	"sync"
 
+	"google.golang.org/protobuf/internal/encoding/messageset"
 	"google.golang.org/protobuf/internal/errors"
+	"google.golang.org/protobuf/internal/flags"
 	"google.golang.org/protobuf/reflect/protoreflect"
 )
 
+// conflictPolicy configures the policy for handling registration conflicts.
+//
+// It can be over-written at compile time with a linker-initialized variable:
+//	go build -ldflags "-X google.golang.org/protobuf/reflect/protoregistry.conflictPolicy=warn"
+//
+// It can be over-written at program execution with an environment variable:
+//	GOLANG_PROTOBUF_REGISTRATION_CONFLICT=warn ./main
+//
+// Neither of the above are covered by the compatibility promise and
+// may be removed in a future release of this module.
+var conflictPolicy = "panic" // "panic" | "warn" | "ignore"
+
 // ignoreConflict reports whether to ignore a registration conflict
 // given the descriptor being registered and the error.
 // It is a variable so that the behavior is easily overridden in another file.
 var ignoreConflict = func(d protoreflect.Descriptor, err error) bool {
-	log.Printf(""+
-		"WARNING: %v\n"+
-		"A future release will panic on registration conflicts. See:\n"+
-		"https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict\n"+
-		"\n", err)
-	return true
+	const env = "GOLANG_PROTOBUF_REGISTRATION_CONFLICT"
+	const faq = "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict"
+	policy := conflictPolicy
+	if v := os.Getenv(env); v != "" {
+		policy = v
+	}
+	switch policy {
+	case "panic":
+		panic(fmt.Sprintf("%v\nSee %v\n", err, faq))
+	case "warn":
+		fmt.Fprintf(os.Stderr, "WARNING: %v\nSee %v\n\n", err, faq)
+		return true
+	case "ignore":
+		return true
+	default:
+		panic("invalid " + env + " value: " + os.Getenv(env))
+	}
 }
 
 var globalMutex sync.RWMutex
@@ -96,38 +121,7 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error {
 	}
 	path := file.Path()
 	if prev := r.filesByPath[path]; prev != nil {
-		// TODO: Remove this after some soak-in period after moving these types.
-		var prevPath string
-		const prevModule = "google.golang.org/genproto"
-		const prevVersion = "cb27e3aa (May 26th, 2020)"
-		switch path {
-		case "google/protobuf/field_mask.proto":
-			prevPath = prevModule + "/protobuf/field_mask"
-		case "google/protobuf/api.proto":
-			prevPath = prevModule + "/protobuf/api"
-		case "google/protobuf/type.proto":
-			prevPath = prevModule + "/protobuf/ptype"
-		case "google/protobuf/source_context.proto":
-			prevPath = prevModule + "/protobuf/source_context"
-		}
-		if r == GlobalFiles && prevPath != "" {
-			pkgName := strings.TrimSuffix(strings.TrimPrefix(path, "google/protobuf/"), ".proto")
-			pkgName = strings.Replace(pkgName, "_", "", -1) + "pb"
-			currPath := "google.golang.org/protobuf/types/known/" + pkgName
-			panic(fmt.Sprintf(""+
-				"duplicate registration of %q\n"+
-				"\n"+
-				"The generated definition for this file has moved:\n"+
-				"\tfrom: %q\n"+
-				"\tto:   %q\n"+
-				"A dependency on the %q module must\n"+
-				"be at version %v or higher.\n"+
-				"\n"+
-				"Upgrade the dependency by running:\n"+
-				"\tgo get -u %v\n",
-				path, prevPath, currPath, prevModule, prevVersion, prevPath))
-		}
-
+		r.checkGenProtoConflict(path)
 		err := errors.New("file %q is already registered", file.Path())
 		err = amendErrorWithCaller(err, prev, file)
 		if r == GlobalFiles && ignoreConflict(file, err) {
@@ -178,6 +172,47 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error {
 	return nil
 }
 
+// Several well-known types were hosted in the google.golang.org/genproto module
+// but were later moved to this module. To avoid a weak dependency on the
+// genproto module (and its relatively large set of transitive dependencies),
+// we rely on a registration conflict to determine whether the genproto version
+// is too old (i.e., does not contain aliases to the new type declarations).
+func (r *Files) checkGenProtoConflict(path string) {
+	if r != GlobalFiles {
+		return
+	}
+	var prevPath string
+	const prevModule = "google.golang.org/genproto"
+	const prevVersion = "cb27e3aa (May 26th, 2020)"
+	switch path {
+	case "google/protobuf/field_mask.proto":
+		prevPath = prevModule + "/protobuf/field_mask"
+	case "google/protobuf/api.proto":
+		prevPath = prevModule + "/protobuf/api"
+	case "google/protobuf/type.proto":
+		prevPath = prevModule + "/protobuf/ptype"
+	case "google/protobuf/source_context.proto":
+		prevPath = prevModule + "/protobuf/source_context"
+	default:
+		return
+	}
+	pkgName := strings.TrimSuffix(strings.TrimPrefix(path, "google/protobuf/"), ".proto")
+	pkgName = strings.Replace(pkgName, "_", "", -1) + "pb" // e.g., "field_mask" => "fieldmaskpb"
+	currPath := "google.golang.org/protobuf/types/known/" + pkgName
+	panic(fmt.Sprintf(""+
+		"duplicate registration of %q\n"+
+		"\n"+
+		"The generated definition for this file has moved:\n"+
+		"\tfrom: %q\n"+
+		"\tto:   %q\n"+
+		"A dependency on the %q module must\n"+
+		"be at version %v or higher.\n"+
+		"\n"+
+		"Upgrade the dependency by running:\n"+
+		"\tgo get -u %v\n",
+		path, prevPath, currPath, prevModule, prevVersion, prevPath))
+}
+
 // FindDescriptorByName looks up a descriptor by the full name.
 //
 // This returns (nil, NotFound) if not found.
@@ -560,13 +595,25 @@ func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumTyp
 	return nil, NotFound
 }
 
-// FindMessageByName looks up a message by its full name.
-// E.g., "google.protobuf.Any"
+// FindMessageByName looks up a message by its full name,
+// e.g. "google.protobuf.Any".
 //
-// This return (nil, NotFound) if not found.
+// This returns (nil, NotFound) if not found.
 func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
-	// The full name by itself is a valid URL.
-	return r.FindMessageByURL(string(message))
+	if r == nil {
+		return nil, NotFound
+	}
+	if r == GlobalTypes {
+		globalMutex.RLock()
+		defer globalMutex.RUnlock()
+	}
+	if v := r.typesByName[message]; v != nil {
+		if mt, _ := v.(protoreflect.MessageType); mt != nil {
+			return mt, nil
+		}
+		return nil, errors.New("found wrong type: got %v, want message", typeName(v))
+	}
+	return nil, NotFound
 }
 
 // FindMessageByURL looks up a message by a URL identifier.
@@ -574,6 +621,8 @@ func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.M
 //
 // This returns (nil, NotFound) if not found.
 func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) {
+	// This function is similar to FindMessageByName but
+	// truncates anything before and including '/' in the URL.
 	if r == nil {
 		return nil, NotFound
 	}
@@ -613,6 +662,26 @@ func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.E
 		if xt, _ := v.(protoreflect.ExtensionType); xt != nil {
 			return xt, nil
 		}
+
+		// MessageSet extensions are special in that the name of the extension
+		// is the name of the message type used to extend the MessageSet.
+		// This naming scheme is used by text and JSON serialization.
+		//
+		// This feature is protected by the ProtoLegacy flag since MessageSets
+		// are a proto1 feature that is long deprecated.
+		if flags.ProtoLegacy {
+			if _, ok := v.(protoreflect.MessageType); ok {
+				field := field.Append(messageset.ExtensionName)
+				if v := r.typesByName[field]; v != nil {
+					if xt, _ := v.(protoreflect.ExtensionType); xt != nil {
+						if messageset.IsMessageSetExtension(xt.TypeDescriptor()) {
+							return xt, nil
+						}
+					}
+				}
+			}
+		}
+
 		return nil, errors.New("found wrong type: got %v, want extension", typeName(v))
 	}
 	return nil, NotFound
diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
index 824237856..f77239fc3 100644
--- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
+++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
@@ -3557,16 +3557,15 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{
 	0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67,
 	0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12,
 	0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e,
-	0x64, 0x42, 0x8f, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
-	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72,
-	0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x3e, 0x67,
-	0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
-	0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
-	0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
-	0x6f, 0x72, 0x3b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0xf8, 0x01, 0x01,
-	0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74,
-	0x69, 0x6f, 0x6e,
+	0x64, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
+	0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64,
+	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02,
+	0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72,
+	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f,
+	0x6e,
 }
 
 var (
diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
index 82a473e26..8c10797b9 100644
--- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
@@ -166,10 +166,13 @@ import (
 //  Example 4: Pack and unpack a message in Go
 //
 //      foo := &pb.Foo{...}
-//      any, err := ptypes.MarshalAny(foo)
+//      any, err := anypb.New(foo)
+//      if err != nil {
+//        ...
+//      }
 //      ...
 //      foo := &pb.Foo{}
-//      if err := ptypes.UnmarshalAny(any, foo); err != nil {
+//      if err := any.UnmarshalTo(foo); err != nil {
 //        ...
 //      }
 //
@@ -420,14 +423,15 @@ var file_google_protobuf_any_proto_rawDesc = []byte{
 	0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18,
 	0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14,
 	0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76,
-	0x61, 0x6c, 0x75, 0x65, 0x42, 0x6f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x61, 0x6c, 0x75, 0x65, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
 	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79,
-	0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x25, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
-	0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0xa2, 0x02,
-	0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e,
-	0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+	0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f,
+	0x61, 0x6e, 0x79, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65,
+	0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x33,
 }
 
 var (
diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
index f7a110994..a583ca2f6 100644
--- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
@@ -303,16 +303,16 @@ var file_google_protobuf_duration_proto_rawDesc = []byte{
 	0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a,
 	0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07,
 	0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73,
-	0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x7c, 0x0a,
-	0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72,
-	0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
-	0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
-	0x6e, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c,
-	0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x33,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x83, 0x01,
+	0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50,
+	0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
+	0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+	0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x64,
+	0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47,
+	0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79,
+	0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
 }
 
 var (
diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
index c25e4bd7d..c9ae92132 100644
--- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
@@ -134,7 +134,16 @@ import (
 //         .setNanos((int) ((millis % 1000) * 1000000)).build();
 //
 //
-// Example 5: Compute Timestamp from current time in Python.
+// Example 5: Compute Timestamp from Java `Instant.now()`.
+//
+//     Instant now = Instant.now();
+//
+//     Timestamp timestamp =
+//         Timestamp.newBuilder().setSeconds(now.getEpochSecond())
+//             .setNanos(now.getNano()).build();
+//
+//
+// Example 6: Compute Timestamp from current time in Python.
 //
 //     timestamp = Timestamp()
 //     timestamp.GetCurrentTime()
@@ -306,15 +315,15 @@ var file_google_protobuf_timestamp_proto_rawDesc = []byte{
 	0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
 	0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e,
 	0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42,
-	0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
-	0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
-	0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65,
-	0x73, 0x74, 0x61, 0x6d, 0x70, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02,
-	0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62,
-	0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+	0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+	0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77,
+	0x6e, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x70, 0x62, 0xf8, 0x01, 0x01,
+	0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f,
+	0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
 }
 
 var (
diff --git a/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go b/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go
index 43ef7cb14..e511ad6f7 100644
--- a/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go
+++ b/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go
@@ -411,6 +411,10 @@ type CodeGeneratorResponse_File struct {
 	InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"`
 	// The file contents.
 	Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"`
+	// Information describing the file content being inserted. If an insertion
+	// point is used, this information will be appropriately offset and inserted
+	// into the code generation metadata for the generated files.
+	GeneratedCodeInfo *descriptorpb.GeneratedCodeInfo `protobuf:"bytes,16,opt,name=generated_code_info,json=generatedCodeInfo" json:"generated_code_info,omitempty"`
 }
 
 func (x *CodeGeneratorResponse_File) Reset() {
@@ -466,6 +470,13 @@ func (x *CodeGeneratorResponse_File) GetContent() string {
 	return ""
 }
 
+func (x *CodeGeneratorResponse_File) GetGeneratedCodeInfo() *descriptorpb.GeneratedCodeInfo {
+	if x != nil {
+		return x.GeneratedCodeInfo
+	}
+	return nil
+}
+
 var File_google_protobuf_compiler_plugin_proto protoreflect.FileDescriptor
 
 var file_google_protobuf_compiler_plugin_proto_rawDesc = []byte{
@@ -496,7 +507,7 @@ var file_google_protobuf_compiler_plugin_proto_rawDesc = []byte{
 	0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
 	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69,
 	0x6c, 0x65, 0x72, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x63, 0x6f, 0x6d,
-	0x70, 0x69, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xbf, 0x02, 0x0a,
+	0x70, 0x69, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x94, 0x03, 0x0a,
 	0x15, 0x43, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65,
 	0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18,
 	0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2d, 0x0a, 0x12,
@@ -507,23 +518,27 @@ var file_google_protobuf_compiler_plugin_proto_rawDesc = []byte{
 	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70,
 	0x69, 0x6c, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
 	0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x52,
-	0x04, 0x66, 0x69, 0x6c, 0x65, 0x1a, 0x5d, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a,
-	0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
-	0x65, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70,
-	0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x6e, 0x73, 0x65,
-	0x72, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f,
-	0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6e,
-	0x74, 0x65, 0x6e, 0x74, 0x22, 0x38, 0x0a, 0x07, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12,
-	0x10, 0x0a, 0x0c, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, 0x10,
-	0x00, 0x12, 0x1b, 0x0a, 0x17, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x50, 0x52, 0x4f,
-	0x54, 0x4f, 0x33, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x42, 0x67,
-	0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x42, 0x0c,
-	0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x5a, 0x39, 0x67, 0x69,
-	0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d,
-	0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x3b, 0x70, 0x6c,
-	0x75, 0x67, 0x69, 0x6e, 0x5f, 0x67, 0x6f,
+	0x04, 0x66, 0x69, 0x6c, 0x65, 0x1a, 0xb1, 0x01, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x12,
+	0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+	0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+	0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x6e, 0x73,
+	0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63,
+	0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f,
+	0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x52, 0x0a, 0x13, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+	0x65, 0x64, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x10, 0x20, 0x01,
+	0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f,
+	0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x11, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65,
+	0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x38, 0x0a, 0x07, 0x46, 0x65, 0x61,
+	0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x5f,
+	0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52,
+	0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41,
+	0x4c, 0x10, 0x01, 0x42, 0x57, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69,
+	0x6c, 0x65, 0x72, 0x42, 0x0c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+	0x73, 0x5a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
+	0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79,
+	0x70, 0x65, 0x73, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x70, 0x62,
 }
 
 var (
@@ -547,16 +562,18 @@ var file_google_protobuf_compiler_plugin_proto_goTypes = []interface{}{
 	(*CodeGeneratorResponse)(nil),            // 3: google.protobuf.compiler.CodeGeneratorResponse
 	(*CodeGeneratorResponse_File)(nil),       // 4: google.protobuf.compiler.CodeGeneratorResponse.File
 	(*descriptorpb.FileDescriptorProto)(nil), // 5: google.protobuf.FileDescriptorProto
+	(*descriptorpb.GeneratedCodeInfo)(nil),   // 6: google.protobuf.GeneratedCodeInfo
 }
 var file_google_protobuf_compiler_plugin_proto_depIdxs = []int32{
 	5, // 0: google.protobuf.compiler.CodeGeneratorRequest.proto_file:type_name -> google.protobuf.FileDescriptorProto
 	1, // 1: google.protobuf.compiler.CodeGeneratorRequest.compiler_version:type_name -> google.protobuf.compiler.Version
 	4, // 2: google.protobuf.compiler.CodeGeneratorResponse.file:type_name -> google.protobuf.compiler.CodeGeneratorResponse.File
-	3, // [3:3] is the sub-list for method output_type
-	3, // [3:3] is the sub-list for method input_type
-	3, // [3:3] is the sub-list for extension type_name
-	3, // [3:3] is the sub-list for extension extendee
-	0, // [0:3] is the sub-list for field type_name
+	6, // 3: google.protobuf.compiler.CodeGeneratorResponse.File.generated_code_info:type_name -> google.protobuf.GeneratedCodeInfo
+	4, // [4:4] is the sub-list for method output_type
+	4, // [4:4] is the sub-list for method input_type
+	4, // [4:4] is the sub-list for extension type_name
+	4, // [4:4] is the sub-list for extension extendee
+	0, // [0:4] is the sub-list for field type_name
 }
 
 func init() { file_google_protobuf_compiler_plugin_proto_init() }
diff --git a/vendor/modules.txt b/vendor/modules.txt
index fb8a8859a..1e6eb73c6 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,4 +1,5 @@
-# cloud.google.com/go v0.79.0
+# cloud.google.com/go v0.80.0
+## explicit
 cloud.google.com/go
 cloud.google.com/go/compute/metadata
 cloud.google.com/go/iam
@@ -17,7 +18,7 @@ github.com/VictoriaMetrics/fastcache
 github.com/VictoriaMetrics/fasthttp
 github.com/VictoriaMetrics/fasthttp/fasthttputil
 github.com/VictoriaMetrics/fasthttp/stackless
-# github.com/VictoriaMetrics/metrics v1.17.1
+# github.com/VictoriaMetrics/metrics v1.17.2
 ## explicit
 github.com/VictoriaMetrics/metrics
 # github.com/VictoriaMetrics/metricsql v0.14.0
@@ -26,7 +27,7 @@ github.com/VictoriaMetrics/metricsql
 github.com/VictoriaMetrics/metricsql/binaryop
 # github.com/VividCortex/ewma v1.1.1
 github.com/VividCortex/ewma
-# github.com/aws/aws-sdk-go v1.37.31
+# github.com/aws/aws-sdk-go v1.38.4
 ## explicit
 github.com/aws/aws-sdk-go/aws
 github.com/aws/aws-sdk-go/aws/arn
@@ -84,7 +85,7 @@ github.com/beorn7/perks/quantile
 # github.com/cespare/xxhash/v2 v2.1.1
 ## explicit
 github.com/cespare/xxhash/v2
-# github.com/cheggaaa/pb/v3 v3.0.6
+# github.com/cheggaaa/pb/v3 v3.0.7
 ## explicit
 github.com/cheggaaa/pb/v3
 github.com/cheggaaa/pb/v3/termutil
@@ -101,7 +102,7 @@ github.com/go-kit/kit/log/level
 github.com/go-logfmt/logfmt
 # github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e
 github.com/golang/groupcache/lru
-# github.com/golang/protobuf v1.4.3
+# github.com/golang/protobuf v1.5.1
 github.com/golang/protobuf/internal/gengogrpc
 github.com/golang/protobuf/proto
 github.com/golang/protobuf/protoc-gen-go
@@ -135,7 +136,7 @@ github.com/klauspost/compress/snappy
 github.com/klauspost/compress/zlib
 github.com/klauspost/compress/zstd
 github.com/klauspost/compress/zstd/internal/xxhash
-# github.com/lithammer/go-jump-consistent-hash v1.0.1
+# github.com/lithammer/go-jump-consistent-hash v1.0.2
 ## explicit
 github.com/lithammer/go-jump-consistent-hash
 # github.com/mattn/go-colorable v0.1.8
@@ -151,19 +152,18 @@ github.com/matttproud/golang_protobuf_extensions/pbutil
 github.com/oklog/ulid
 # github.com/pkg/errors v0.9.1
 github.com/pkg/errors
-# github.com/prometheus/client_golang v1.9.0
+# github.com/prometheus/client_golang v1.10.0
 ## explicit
 github.com/prometheus/client_golang/prometheus
 github.com/prometheus/client_golang/prometheus/internal
 # github.com/prometheus/client_model v0.2.0
 github.com/prometheus/client_model/go
-# github.com/prometheus/common v0.19.0
+# github.com/prometheus/common v0.20.0
 ## explicit
 github.com/prometheus/common/expfmt
 github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
 github.com/prometheus/common/model
 # github.com/prometheus/procfs v0.6.0
-## explicit
 github.com/prometheus/procfs
 github.com/prometheus/procfs/internal/fs
 github.com/prometheus/procfs/internal/util
@@ -240,7 +240,7 @@ golang.org/x/lint/golint
 ## explicit
 golang.org/x/mod/module
 golang.org/x/mod/semver
-# golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4
+# golang.org/x/net v0.0.0-20210324205630-d1beb07c2056
 ## explicit
 golang.org/x/net/context
 golang.org/x/net/context/ctxhttp
@@ -250,7 +250,7 @@ golang.org/x/net/http2/hpack
 golang.org/x/net/idna
 golang.org/x/net/internal/timeseries
 golang.org/x/net/trace
-# golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84
+# golang.org/x/oauth2 v0.0.0-20210323180902-22b0adad7558
 ## explicit
 golang.org/x/oauth2
 golang.org/x/oauth2/google
@@ -260,7 +260,7 @@ golang.org/x/oauth2/jws
 golang.org/x/oauth2/jwt
 # golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
 golang.org/x/sync/errgroup
-# golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa
+# golang.org/x/sys v0.0.0-20210324051608-47abb6519492
 ## explicit
 golang.org/x/sys/execabs
 golang.org/x/sys/internal/unsafeheader
@@ -287,7 +287,7 @@ golang.org/x/tools/internal/imports
 # golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1
 golang.org/x/xerrors
 golang.org/x/xerrors/internal
-# google.golang.org/api v0.42.0
+# google.golang.org/api v0.43.0
 ## explicit
 google.golang.org/api/googleapi
 google.golang.org/api/googleapi/transport
@@ -314,7 +314,7 @@ google.golang.org/appengine/internal/modules
 google.golang.org/appengine/internal/remote_api
 google.golang.org/appengine/internal/urlfetch
 google.golang.org/appengine/urlfetch
-# google.golang.org/genproto v0.0.0-20210315173758-2651cd453018
+# google.golang.org/genproto v0.0.0-20210325141258-5636347f2b14
 ## explicit
 google.golang.org/genproto/googleapis/api/annotations
 google.golang.org/genproto/googleapis/iam/v1
@@ -366,7 +366,7 @@ google.golang.org/grpc/serviceconfig
 google.golang.org/grpc/stats
 google.golang.org/grpc/status
 google.golang.org/grpc/tap
-# google.golang.org/protobuf v1.25.0
+# google.golang.org/protobuf v1.26.0
 google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo
 google.golang.org/protobuf/compiler/protogen
 google.golang.org/protobuf/encoding/prototext
@@ -379,13 +379,12 @@ google.golang.org/protobuf/internal/encoding/messageset
 google.golang.org/protobuf/internal/encoding/tag
 google.golang.org/protobuf/internal/encoding/text
 google.golang.org/protobuf/internal/errors
-google.golang.org/protobuf/internal/fieldsort
 google.golang.org/protobuf/internal/filedesc
 google.golang.org/protobuf/internal/filetype
 google.golang.org/protobuf/internal/flags
 google.golang.org/protobuf/internal/genid
 google.golang.org/protobuf/internal/impl
-google.golang.org/protobuf/internal/mapsort
+google.golang.org/protobuf/internal/order
 google.golang.org/protobuf/internal/pragma
 google.golang.org/protobuf/internal/set
 google.golang.org/protobuf/internal/strs