mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
make vendor-update
This commit is contained in:
parent
84b57e8974
commit
293b541784
53 changed files with 3080 additions and 1647 deletions
16
go.mod
16
go.mod
|
@ -5,27 +5,27 @@ require (
|
|||
cloud.google.com/go/storage v1.4.0
|
||||
github.com/VictoriaMetrics/fastcache v1.5.4
|
||||
github.com/VictoriaMetrics/metrics v1.9.2
|
||||
github.com/aws/aws-sdk-go v1.25.43
|
||||
github.com/aws/aws-sdk-go v1.25.48
|
||||
github.com/cespare/xxhash/v2 v2.1.1
|
||||
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 // indirect
|
||||
github.com/golang/snappy v0.0.1
|
||||
github.com/jstemmer/go-junit-report v0.9.1 // indirect
|
||||
github.com/klauspost/compress v1.9.2
|
||||
github.com/klauspost/compress v1.9.4
|
||||
github.com/valyala/fastjson v1.4.1
|
||||
github.com/valyala/fastrand v1.0.0
|
||||
github.com/valyala/gozstd v1.6.3
|
||||
github.com/valyala/histogram v1.0.1
|
||||
github.com/valyala/quicktemplate v1.4.1
|
||||
go.opencensus.io v0.22.2 // indirect
|
||||
golang.org/x/exp v0.0.0-20191127035308-9964a5a80460 // indirect
|
||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587 // indirect
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f // indirect
|
||||
golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20191122200657-5d9234df094c // indirect
|
||||
golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2
|
||||
golang.org/x/tools v0.0.0-20191127064951-724660f1afeb // indirect
|
||||
golang.org/x/net v0.0.0-20191207000613-e7e4b65ae663 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 // indirect
|
||||
golang.org/x/sys v0.0.0-20191206220618-eeba5f6aabab
|
||||
golang.org/x/tools v0.0.0-20191206204035-259af5ff87bd // indirect
|
||||
google.golang.org/api v0.14.0
|
||||
google.golang.org/appengine v1.6.5 // indirect
|
||||
google.golang.org/genproto v0.0.0-20191115221424-83cc0476cb11 // indirect
|
||||
google.golang.org/genproto v0.0.0-20191206224255-0243a4be9c8f // indirect
|
||||
google.golang.org/grpc v1.25.1 // indirect
|
||||
)
|
||||
|
||||
|
|
32
go.sum
32
go.sum
|
@ -26,8 +26,8 @@ github.com/VictoriaMetrics/metrics v1.9.2 h1:+CNV5OOPe1PuHff3AsOa5cbN8qCWkWwZZA7
|
|||
github.com/VictoriaMetrics/metrics v1.9.2/go.mod h1:LU2j9qq7xqZYXz8tF3/RQnB2z2MbZms5TDiIg9/NHiQ=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||
github.com/aws/aws-sdk-go v1.25.43 h1:R5YqHQFIulYVfgRySz9hvBRTWBjudISa+r0C8XQ1ufg=
|
||||
github.com/aws/aws-sdk-go v1.25.43/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.25.48 h1:J82DYDGZHOKHdhx6hD24Tm30c2C3GchYGfN0mf9iKUk=
|
||||
github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
|
@ -76,8 +76,8 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X
|
|||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.9.2 h1:LfVyl+ZlLlLDeQ/d2AqfGIIH4qEDu0Ed2S5GyhCWIWY=
|
||||
github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.9.4 h1:xhvAeUPQ2drNUhKtrGdTGNvV9nNafHMUkRyLkzxJoB4=
|
||||
github.com/klauspost/compress v1.9.4/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
|
@ -121,8 +121,8 @@ golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxT
|
|||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136 h1:A1gGSx58LAGVHUUsOf7IiR0u8Xb6W51gRwfDBhkdcaw=
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/exp v0.0.0-20191127035308-9964a5a80460 h1:zNL062UG4d0GC48Bhm+lEI9lTOMsEHNL0WITb/cw7/s=
|
||||
golang.org/x/exp v0.0.0-20191127035308-9964a5a80460/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587 h1:5Uz0rkjCFu9BC9gCRN7EkwVvhNyQgGWb8KNJrPwBoHY=
|
||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
@ -150,14 +150,14 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn
|
|||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933 h1:e6HwijUxhDe+hPNjZQQn9bA5PW3vNmnN64U2ZW759Lk=
|
||||
golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191207000613-e7e4b65ae663 h1:Dd5RoEW+yQi+9DMybroBctIdyiwuNT7sJFMC27/6KxI=
|
||||
golang.org/x/net v0.0.0-20191207000613-e7e4b65ae663/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191122200657-5d9234df094c h1:HjRaKPaiWks0f5tA6ELVF7ZfqSppfPwOEEAvsrKUTO4=
|
||||
golang.org/x/oauth2 v0.0.0-20191122200657-5d9234df094c/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -172,8 +172,8 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2 h1:/J2nHFg1MTqaRLFO7M+J78ASNsJoz3r0cvHBPQ77fsE=
|
||||
golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191206220618-eeba5f6aabab h1:FvshnhkKW+LO3HWHodML8kuVX8rnJTxKm9dFPuI68UM=
|
||||
golang.org/x/sys v0.0.0-20191206220618-eeba5f6aabab/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
|
@ -197,8 +197,8 @@ golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtn
|
|||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191127064951-724660f1afeb h1:K4JMHRJSgd1q/yXZNrKKyneQJcLm1rn7JsokEs/xE9I=
|
||||
golang.org/x/tools v0.0.0-20191127064951-724660f1afeb/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191206204035-259af5ff87bd h1:Zc7EU2PqpsNeIfOoVA7hvQX4cS3YDJEs5KlfatT3hLo=
|
||||
golang.org/x/tools v0.0.0-20191206204035-259af5ff87bd/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
|
@ -221,8 +221,8 @@ google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98
|
|||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191115221424-83cc0476cb11 h1:51D++eCgOHufw5VfDE9Uzqyyc+OyQIjb9hkYy9LN5Fk=
|
||||
google.golang.org/genproto v0.0.0-20191115221424-83cc0476cb11/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191206224255-0243a4be9c8f h1:naitw5DILWPQvG0oG04mR9jF8fmKpRdW3E3zzKA4D0Y=
|
||||
google.golang.org/genproto v0.0.0-20191206224255-0243a4be9c8f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
|
|
92
vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go
generated
vendored
Normal file
92
vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go
generated
vendored
Normal file
|
@ -0,0 +1,92 @@
|
|||
// Package arn provides a parser for interacting with Amazon Resource Names.
|
||||
package arn
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
arnDelimiter = ":"
|
||||
arnSections = 6
|
||||
arnPrefix = "arn:"
|
||||
|
||||
// zero-indexed
|
||||
sectionPartition = 1
|
||||
sectionService = 2
|
||||
sectionRegion = 3
|
||||
sectionAccountID = 4
|
||||
sectionResource = 5
|
||||
|
||||
// errors
|
||||
invalidPrefix = "arn: invalid prefix"
|
||||
invalidSections = "arn: not enough sections"
|
||||
)
|
||||
|
||||
// ARN captures the individual fields of an Amazon Resource Name.
|
||||
// See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html for more information.
|
||||
type ARN struct {
|
||||
// The partition that the resource is in. For standard AWS regions, the partition is "aws". If you have resources in
|
||||
// other partitions, the partition is "aws-partitionname". For example, the partition for resources in the China
|
||||
// (Beijing) region is "aws-cn".
|
||||
Partition string
|
||||
|
||||
// The service namespace that identifies the AWS product (for example, Amazon S3, IAM, or Amazon RDS). For a list of
|
||||
// namespaces, see
|
||||
// http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces.
|
||||
Service string
|
||||
|
||||
// The region the resource resides in. Note that the ARNs for some resources do not require a region, so this
|
||||
// component might be omitted.
|
||||
Region string
|
||||
|
||||
// The ID of the AWS account that owns the resource, without the hyphens. For example, 123456789012. Note that the
|
||||
// ARNs for some resources don't require an account number, so this component might be omitted.
|
||||
AccountID string
|
||||
|
||||
// The content of this part of the ARN varies by service. It often includes an indicator of the type of resource —
|
||||
// for example, an IAM user or Amazon RDS database - followed by a slash (/) or a colon (:), followed by the
|
||||
// resource name itself. Some services allows paths for resource names, as described in
|
||||
// http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-paths.
|
||||
Resource string
|
||||
}
|
||||
|
||||
// Parse parses an ARN into its constituent parts.
|
||||
//
|
||||
// Some example ARNs:
|
||||
// arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnvironment
|
||||
// arn:aws:iam::123456789012:user/David
|
||||
// arn:aws:rds:eu-west-1:123456789012:db:mysql-db
|
||||
// arn:aws:s3:::my_corporate_bucket/exampleobject.png
|
||||
func Parse(arn string) (ARN, error) {
|
||||
if !strings.HasPrefix(arn, arnPrefix) {
|
||||
return ARN{}, errors.New(invalidPrefix)
|
||||
}
|
||||
sections := strings.SplitN(arn, arnDelimiter, arnSections)
|
||||
if len(sections) != arnSections {
|
||||
return ARN{}, errors.New(invalidSections)
|
||||
}
|
||||
return ARN{
|
||||
Partition: sections[sectionPartition],
|
||||
Service: sections[sectionService],
|
||||
Region: sections[sectionRegion],
|
||||
AccountID: sections[sectionAccountID],
|
||||
Resource: sections[sectionResource],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IsARN returns whether the given string is an arn
|
||||
// by looking for whether the string starts with arn:
|
||||
func IsARN(arn string) bool {
|
||||
return strings.HasPrefix(arn, arnPrefix) && strings.Count(arn, ":") > arnSections-1
|
||||
}
|
||||
|
||||
// String returns the canonical representation of the ARN
|
||||
func (arn ARN) String() string {
|
||||
return arnPrefix +
|
||||
arn.Partition + arnDelimiter +
|
||||
arn.Service + arnDelimiter +
|
||||
arn.Region + arnDelimiter +
|
||||
arn.AccountID + arnDelimiter +
|
||||
arn.Resource
|
||||
}
|
15
vendor/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
15
vendor/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
|
@ -161,6 +161,10 @@ type Config struct {
|
|||
// on GetObject API calls.
|
||||
S3DisableContentMD5Validation *bool
|
||||
|
||||
// Set this to `true` to have the S3 service client to use the region specified
|
||||
// in the ARN, when an ARN is provided as an argument to a bucket parameter.
|
||||
S3UseARNRegion *bool
|
||||
|
||||
// Set this to `true` to disable the EC2Metadata client from overriding the
|
||||
// default http.Client's Timeout. This is helpful if you do not want the
|
||||
// EC2Metadata client to create a new http.Client. This options is only
|
||||
|
@ -385,6 +389,13 @@ func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config {
|
|||
|
||||
}
|
||||
|
||||
// WithS3UseARNRegion sets a config S3UseARNRegion value and
|
||||
// returning a Config pointer for chaining
|
||||
func (c *Config) WithS3UseARNRegion(enable bool) *Config {
|
||||
c.S3UseARNRegion = &enable
|
||||
return c
|
||||
}
|
||||
|
||||
// WithUseDualStack sets a config UseDualStack value returning a Config
|
||||
// pointer for chaining.
|
||||
func (c *Config) WithUseDualStack(enable bool) *Config {
|
||||
|
@ -513,6 +524,10 @@ func mergeInConfig(dst *Config, other *Config) {
|
|||
dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation
|
||||
}
|
||||
|
||||
if other.S3UseARNRegion != nil {
|
||||
dst.S3UseARNRegion = other.S3UseARNRegion
|
||||
}
|
||||
|
||||
if other.UseDualStack != nil {
|
||||
dst.UseDualStack = other.UseDualStack
|
||||
}
|
||||
|
|
57
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
57
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
|
@ -2077,6 +2077,27 @@ var awsPartition = partition{
|
|||
},
|
||||
},
|
||||
},
|
||||
"iotsecuredtunneling": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"iotthingsgraph": service{
|
||||
Defaults: endpoint{
|
||||
CredentialScope: credentialScope{
|
||||
|
@ -2733,8 +2754,30 @@ var awsPartition = partition{
|
|||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
"fips-us-east-1": endpoint{
|
||||
Hostname: "pinpoint-fips.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"fips-us-west-2": endpoint{
|
||||
Hostname: "pinpoint-fips.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
"us-east-1": endpoint{
|
||||
Hostname: "pinpoint.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"us-west-2": endpoint{
|
||||
Hostname: "pinpoint.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"polly": service{
|
||||
|
@ -3316,6 +3359,16 @@ var awsPartition = partition{
|
|||
},
|
||||
},
|
||||
},
|
||||
"schemas": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"sdb": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
|
|
25
vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
generated
vendored
25
vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
generated
vendored
|
@ -4,6 +4,7 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
|
@ -141,6 +142,12 @@ type envConfig struct {
|
|||
// AWS_S3_US_EAST_1_REGIONAL_ENDPOINT=regional
|
||||
// This can take value as `regional` or `legacy`
|
||||
S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint
|
||||
|
||||
// Specifies if the S3 service should allow ARNs to direct the region
|
||||
// the client's requests are sent to.
|
||||
//
|
||||
// AWS_S3_USE_ARN_REGION=true
|
||||
S3UseARNRegion bool
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -201,6 +208,9 @@ var (
|
|||
s3UsEast1RegionalEndpoint = []string{
|
||||
"AWS_S3_US_EAST_1_REGIONAL_ENDPOINT",
|
||||
}
|
||||
s3UseARNRegionEnvKey = []string{
|
||||
"AWS_S3_USE_ARN_REGION",
|
||||
}
|
||||
)
|
||||
|
||||
// loadEnvConfig retrieves the SDK's environment configuration.
|
||||
|
@ -307,6 +317,21 @@ func envConfigLoad(enableSharedConfig bool) (envConfig, error) {
|
|||
}
|
||||
}
|
||||
|
||||
var s3UseARNRegion string
|
||||
setFromEnvVal(&s3UseARNRegion, s3UseARNRegionEnvKey)
|
||||
if len(s3UseARNRegion) != 0 {
|
||||
switch {
|
||||
case strings.EqualFold(s3UseARNRegion, "false"):
|
||||
cfg.S3UseARNRegion = false
|
||||
case strings.EqualFold(s3UseARNRegion, "true"):
|
||||
cfg.S3UseARNRegion = true
|
||||
default:
|
||||
return envConfig{}, fmt.Errorf(
|
||||
"invalid value for environment variable, %s=%s, need true or false",
|
||||
s3UseARNRegionEnvKey[0], s3UseARNRegion)
|
||||
}
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
|
|
9
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
9
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
|
@ -580,6 +580,14 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config,
|
|||
cfg.Credentials = creds
|
||||
}
|
||||
|
||||
cfg.S3UseARNRegion = userCfg.S3UseARNRegion
|
||||
if cfg.S3UseARNRegion == nil {
|
||||
cfg.S3UseARNRegion = &envCfg.S3UseARNRegion
|
||||
}
|
||||
if cfg.S3UseARNRegion == nil {
|
||||
cfg.S3UseARNRegion = &sharedCfg.S3UseARNRegion
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -643,6 +651,7 @@ func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Confi
|
|||
return client.Config{
|
||||
Config: s.Config,
|
||||
Handlers: s.Handlers,
|
||||
PartitionID: resolved.PartitionID,
|
||||
Endpoint: resolved.URL,
|
||||
SigningRegion: resolved.SigningRegion,
|
||||
SigningNameDerived: resolved.SigningNameDerived,
|
||||
|
|
21
vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
generated
vendored
21
vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
generated
vendored
|
@ -51,6 +51,9 @@ const (
|
|||
// loading configuration from the config files if another profile name
|
||||
// is not provided.
|
||||
DefaultSharedConfigProfile = `default`
|
||||
|
||||
// S3 ARN Region Usage
|
||||
s3UseARNRegionKey = "s3_use_arn_region"
|
||||
)
|
||||
|
||||
// sharedConfig represents the configuration fields of the SDK config files.
|
||||
|
@ -89,6 +92,7 @@ type sharedConfig struct {
|
|||
//
|
||||
// endpoint_discovery_enabled = true
|
||||
EnableEndpointDiscovery *bool
|
||||
|
||||
// CSM Options
|
||||
CSMEnabled *bool
|
||||
CSMHost string
|
||||
|
@ -106,6 +110,12 @@ type sharedConfig struct {
|
|||
// s3_us_east_1_regional_endpoint = regional
|
||||
// This can take value as `LegacyS3UsEast1Endpoint` or `RegionalS3UsEast1Endpoint`
|
||||
S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint
|
||||
|
||||
// Specifies if the S3 service should allow ARNs to direct the region
|
||||
// the client's requests are sent to.
|
||||
//
|
||||
// s3_use_arn_region=true
|
||||
S3UseARNRegion bool
|
||||
}
|
||||
|
||||
type sharedConfigFile struct {
|
||||
|
@ -306,6 +316,8 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e
|
|||
updateString(&cfg.CSMPort, section, csmPortKey)
|
||||
updateString(&cfg.CSMClientID, section, csmClientIDKey)
|
||||
|
||||
updateBool(&cfg.S3UseARNRegion, section, s3UseARNRegionKey)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -398,6 +410,15 @@ func updateString(dst *string, section ini.Section, key string) {
|
|||
*dst = section.String(key)
|
||||
}
|
||||
|
||||
// updateBool will only update the dst with the value in the section key, key
|
||||
// is present in the section.
|
||||
func updateBool(dst *bool, section ini.Section, key string) {
|
||||
if !section.Has(key) {
|
||||
return
|
||||
}
|
||||
*dst = section.Bool(key)
|
||||
}
|
||||
|
||||
// updateBoolPtr will only update the dst with the value in the section key,
|
||||
// key is present in the section.
|
||||
func updateBoolPtr(dst **bool, section ini.Section, key string) {
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
@ -5,4 +5,4 @@ package aws
|
|||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.25.43"
|
||||
const SDKVersion = "1.25.48"
|
||||
|
|
2276
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
2276
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
File diff suppressed because it is too large
Load diff
10
vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
generated
vendored
10
vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
generated
vendored
|
@ -4,6 +4,7 @@ import (
|
|||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/s3err"
|
||||
"github.com/aws/aws-sdk-go/service/s3/internal/arn"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -13,7 +14,7 @@ func init() {
|
|||
|
||||
func defaultInitClientFn(c *client.Client) {
|
||||
// Support building custom endpoints based on config
|
||||
c.Handlers.Build.PushFront(updateEndpointForS3Config)
|
||||
c.Handlers.Build.PushFront(endpointHandler)
|
||||
|
||||
// Require SSL when using SSE keys
|
||||
c.Handlers.Validate.PushBack(validateSSERequiresSSL)
|
||||
|
@ -27,7 +28,7 @@ func defaultInitClientFn(c *client.Client) {
|
|||
}
|
||||
|
||||
func defaultInitRequestFn(r *request.Request) {
|
||||
// Add reuest handlers for specific platforms.
|
||||
// Add request handlers for specific platforms.
|
||||
// e.g. 100-continue support for PUT requests using Go 1.6
|
||||
platformRequestHandlers(r)
|
||||
|
||||
|
@ -73,3 +74,8 @@ type sseCustomerKeyGetter interface {
|
|||
type copySourceSSECustomerKeyGetter interface {
|
||||
getCopySourceSSECustomerKey() string
|
||||
}
|
||||
|
||||
type endpointARNGetter interface {
|
||||
getEndpointARN() (arn.Resource, error)
|
||||
hasEndpointARN() bool
|
||||
}
|
||||
|
|
233
vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go
generated
vendored
Normal file
233
vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go
generated
vendored
Normal file
|
@ -0,0 +1,233 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
awsarn "github.com/aws/aws-sdk-go/aws/arn"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
"github.com/aws/aws-sdk-go/service/s3/internal/arn"
|
||||
)
|
||||
|
||||
// Used by shapes with members decorated as endpoint ARN.
|
||||
func parseEndpointARN(v string) (arn.Resource, error) {
|
||||
return arn.ParseResource(v, accessPointResourceParser)
|
||||
}
|
||||
|
||||
func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) {
|
||||
resParts := arn.SplitResource(a.Resource)
|
||||
switch resParts[0] {
|
||||
case "accesspoint":
|
||||
return arn.ParseAccessPointResource(a, resParts[1:])
|
||||
default:
|
||||
return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"}
|
||||
}
|
||||
}
|
||||
|
||||
func endpointHandler(req *request.Request) {
|
||||
endpoint, ok := req.Params.(endpointARNGetter)
|
||||
if !ok || !endpoint.hasEndpointARN() {
|
||||
updateBucketEndpointFromParams(req)
|
||||
return
|
||||
}
|
||||
|
||||
resource, err := endpoint.getEndpointARN()
|
||||
if err != nil {
|
||||
req.Error = newInvalidARNError(nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
resReq := resourceRequest{
|
||||
Resource: resource,
|
||||
Request: req,
|
||||
}
|
||||
|
||||
if resReq.IsCrossPartition() {
|
||||
req.Error = newClientPartitionMismatchError(resource,
|
||||
req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if !resReq.AllowCrossRegion() && resReq.IsCrossRegion() {
|
||||
req.Error = newClientRegionMismatchError(resource,
|
||||
req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if resReq.HasCustomEndpoint() {
|
||||
req.Error = newInvalidARNWithCustomEndpointError(resource, nil)
|
||||
return
|
||||
}
|
||||
|
||||
switch tv := resource.(type) {
|
||||
case arn.AccessPointARN:
|
||||
err = updateRequestAccessPointEndpoint(req, tv)
|
||||
if err != nil {
|
||||
req.Error = err
|
||||
}
|
||||
default:
|
||||
req.Error = newInvalidARNError(resource, nil)
|
||||
}
|
||||
}
|
||||
|
||||
type resourceRequest struct {
|
||||
Resource arn.Resource
|
||||
Request *request.Request
|
||||
}
|
||||
|
||||
func (r resourceRequest) ARN() awsarn.ARN {
|
||||
return r.Resource.GetARN()
|
||||
}
|
||||
|
||||
func (r resourceRequest) AllowCrossRegion() bool {
|
||||
return aws.BoolValue(r.Request.Config.S3UseARNRegion)
|
||||
}
|
||||
|
||||
func (r resourceRequest) UseFIPS() bool {
|
||||
return isFIPS(aws.StringValue(r.Request.Config.Region))
|
||||
}
|
||||
|
||||
func (r resourceRequest) IsCrossPartition() bool {
|
||||
return r.Request.ClientInfo.PartitionID != r.Resource.GetARN().Partition
|
||||
}
|
||||
|
||||
func (r resourceRequest) IsCrossRegion() bool {
|
||||
return isCrossRegion(r.Request, r.Resource.GetARN().Region)
|
||||
}
|
||||
|
||||
func (r resourceRequest) HasCustomEndpoint() bool {
|
||||
return len(aws.StringValue(r.Request.Config.Endpoint)) > 0
|
||||
}
|
||||
|
||||
func isFIPS(clientRegion string) bool {
|
||||
return strings.HasPrefix(clientRegion, "fips-") || strings.HasSuffix(clientRegion, "-fips")
|
||||
}
|
||||
func isCrossRegion(req *request.Request, otherRegion string) bool {
|
||||
return req.ClientInfo.SigningRegion != otherRegion
|
||||
}
|
||||
|
||||
func updateBucketEndpointFromParams(r *request.Request) {
|
||||
bucket, ok := bucketNameFromReqParams(r.Params)
|
||||
if !ok {
|
||||
// Ignore operation requests if the bucket name was not provided
|
||||
// if this is an input validation error the validation handler
|
||||
// will report it.
|
||||
return
|
||||
}
|
||||
updateEndpointForS3Config(r, bucket)
|
||||
}
|
||||
|
||||
func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.AccessPointARN) error {
|
||||
// Accelerate not supported
|
||||
if aws.BoolValue(req.Config.S3UseAccelerate) {
|
||||
return newClientConfiguredForAccelerateError(accessPoint,
|
||||
req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
|
||||
}
|
||||
|
||||
// Ignore the disable host prefix for access points since custom endpoints
|
||||
// are not supported.
|
||||
req.Config.DisableEndpointHostPrefix = aws.Bool(false)
|
||||
|
||||
if err := accessPointEndpointBuilder(accessPoint).Build(req); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
removeBucketFromPath(req.HTTPRequest.URL)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeBucketFromPath(u *url.URL) {
|
||||
u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1)
|
||||
if u.Path == "" {
|
||||
u.Path = "/"
|
||||
}
|
||||
}
|
||||
|
||||
type accessPointEndpointBuilder arn.AccessPointARN
|
||||
|
||||
const (
|
||||
accessPointPrefixLabel = "accesspoint"
|
||||
accountIDPrefixLabel = "accountID"
|
||||
accesPointPrefixTemplate = "{" + accessPointPrefixLabel + "}-{" + accountIDPrefixLabel + "}."
|
||||
)
|
||||
|
||||
func (a accessPointEndpointBuilder) Build(req *request.Request) error {
|
||||
resolveRegion := arn.AccessPointARN(a).Region
|
||||
cfgRegion := aws.StringValue(req.Config.Region)
|
||||
|
||||
if isFIPS(cfgRegion) {
|
||||
if aws.BoolValue(req.Config.S3UseARNRegion) && isCrossRegion(req, resolveRegion) {
|
||||
// FIPS with cross region is not supported, the SDK must fail
|
||||
// because there is no well defined method for SDK to construct a
|
||||
// correct FIPS endpoint.
|
||||
return newClientConfiguredForCrossRegionFIPSError(arn.AccessPointARN(a),
|
||||
req.ClientInfo.PartitionID, cfgRegion, nil)
|
||||
}
|
||||
resolveRegion = cfgRegion
|
||||
}
|
||||
|
||||
endpoint, err := resolveRegionalEndpoint(req, resolveRegion)
|
||||
if err != nil {
|
||||
return newFailedToResolveEndpointError(arn.AccessPointARN(a),
|
||||
req.ClientInfo.PartitionID, cfgRegion, err)
|
||||
}
|
||||
|
||||
if err = updateRequestEndpoint(req, endpoint.URL); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
const serviceEndpointLabel = "s3-accesspoint"
|
||||
|
||||
// dualstack provided by endpoint resolver
|
||||
cfgHost := req.HTTPRequest.URL.Host
|
||||
if strings.HasPrefix(cfgHost, "s3") {
|
||||
req.HTTPRequest.URL.Host = serviceEndpointLabel + cfgHost[2:]
|
||||
}
|
||||
|
||||
protocol.HostPrefixBuilder{
|
||||
Prefix: accesPointPrefixTemplate,
|
||||
LabelsFn: a.hostPrefixLabelValues,
|
||||
}.Build(req)
|
||||
|
||||
req.ClientInfo.SigningName = endpoint.SigningName
|
||||
req.ClientInfo.SigningRegion = endpoint.SigningRegion
|
||||
|
||||
err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host)
|
||||
if err != nil {
|
||||
return newInvalidARNError(arn.AccessPointARN(a), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a accessPointEndpointBuilder) hostPrefixLabelValues() map[string]string {
|
||||
return map[string]string{
|
||||
accessPointPrefixLabel: arn.AccessPointARN(a).AccessPointName,
|
||||
accountIDPrefixLabel: arn.AccessPointARN(a).AccountID,
|
||||
}
|
||||
}
|
||||
|
||||
func resolveRegionalEndpoint(r *request.Request, region string) (endpoints.ResolvedEndpoint, error) {
|
||||
return r.Config.EndpointResolver.EndpointFor(EndpointsID, region, func(opts *endpoints.Options) {
|
||||
opts.DisableSSL = aws.BoolValue(r.Config.DisableSSL)
|
||||
opts.UseDualStack = aws.BoolValue(r.Config.UseDualStack)
|
||||
opts.S3UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint
|
||||
})
|
||||
}
|
||||
|
||||
func updateRequestEndpoint(r *request.Request, endpoint string) (err error) {
|
||||
endpoint = endpoints.AddScheme(endpoint, aws.BoolValue(r.Config.DisableSSL))
|
||||
|
||||
r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath)
|
||||
if err != nil {
|
||||
return awserr.New(request.ErrCodeSerialization,
|
||||
"failed to parse endpoint URL", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
151
vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_errors.go
generated
vendored
Normal file
151
vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_errors.go
generated
vendored
Normal file
|
@ -0,0 +1,151 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/s3/internal/arn"
|
||||
)
|
||||
|
||||
const (
|
||||
invalidARNErrorErrCode = "InvalidARNError"
|
||||
configurationErrorErrCode = "ConfigurationError"
|
||||
)
|
||||
|
||||
type invalidARNError struct {
|
||||
message string
|
||||
resource arn.Resource
|
||||
origErr error
|
||||
}
|
||||
|
||||
func (e invalidARNError) Error() string {
|
||||
var extra string
|
||||
if e.resource != nil {
|
||||
extra = "ARN: " + e.resource.String()
|
||||
}
|
||||
return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr)
|
||||
}
|
||||
|
||||
func (e invalidARNError) Code() string {
|
||||
return invalidARNErrorErrCode
|
||||
}
|
||||
|
||||
func (e invalidARNError) Message() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
func (e invalidARNError) OrigErr() error {
|
||||
return e.origErr
|
||||
}
|
||||
|
||||
func newInvalidARNError(resource arn.Resource, err error) invalidARNError {
|
||||
return invalidARNError{
|
||||
message: "invalid ARN",
|
||||
origErr: err,
|
||||
resource: resource,
|
||||
}
|
||||
}
|
||||
|
||||
func newInvalidARNWithCustomEndpointError(resource arn.Resource, err error) invalidARNError {
|
||||
return invalidARNError{
|
||||
message: "resource ARN not supported with custom client endpoints",
|
||||
origErr: err,
|
||||
resource: resource,
|
||||
}
|
||||
}
|
||||
|
||||
// ARN not supported for the target partition
|
||||
func newInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) invalidARNError {
|
||||
return invalidARNError{
|
||||
message: "resource ARN not supported for the target ARN partition",
|
||||
origErr: err,
|
||||
resource: resource,
|
||||
}
|
||||
}
|
||||
|
||||
type configurationError struct {
|
||||
message string
|
||||
resource arn.Resource
|
||||
clientPartitionID string
|
||||
clientRegion string
|
||||
origErr error
|
||||
}
|
||||
|
||||
func (e configurationError) Error() string {
|
||||
extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s",
|
||||
e.resource, e.clientPartitionID, e.clientRegion)
|
||||
|
||||
return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr)
|
||||
}
|
||||
|
||||
func (e configurationError) Code() string {
|
||||
return configurationErrorErrCode
|
||||
}
|
||||
|
||||
func (e configurationError) Message() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
func (e configurationError) OrigErr() error {
|
||||
return e.origErr
|
||||
}
|
||||
|
||||
func newClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
|
||||
return configurationError{
|
||||
message: "client partition does not match provided ARN partition",
|
||||
origErr: err,
|
||||
resource: resource,
|
||||
clientPartitionID: clientPartitionID,
|
||||
clientRegion: clientRegion,
|
||||
}
|
||||
}
|
||||
|
||||
func newClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
|
||||
return configurationError{
|
||||
message: "client region does not match provided ARN region",
|
||||
origErr: err,
|
||||
resource: resource,
|
||||
clientPartitionID: clientPartitionID,
|
||||
clientRegion: clientRegion,
|
||||
}
|
||||
}
|
||||
|
||||
func newFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
|
||||
return configurationError{
|
||||
message: "endpoint resolver failed to find an endpoint for the provided ARN region",
|
||||
origErr: err,
|
||||
resource: resource,
|
||||
clientPartitionID: clientPartitionID,
|
||||
clientRegion: clientRegion,
|
||||
}
|
||||
}
|
||||
|
||||
func newClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
|
||||
return configurationError{
|
||||
message: "client configured for fips but cross-region resource ARN provided",
|
||||
origErr: err,
|
||||
resource: resource,
|
||||
clientPartitionID: clientPartitionID,
|
||||
clientRegion: clientRegion,
|
||||
}
|
||||
}
|
||||
|
||||
func newClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
|
||||
return configurationError{
|
||||
message: "client configured for S3 Accelerate but is supported with resource ARN",
|
||||
origErr: err,
|
||||
resource: resource,
|
||||
clientPartitionID: clientPartitionID,
|
||||
clientRegion: clientRegion,
|
||||
}
|
||||
}
|
||||
|
||||
func newClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
|
||||
return configurationError{
|
||||
message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN",
|
||||
origErr: err,
|
||||
resource: resource,
|
||||
clientPartitionID: clientPartitionID,
|
||||
clientRegion: clientRegion,
|
||||
}
|
||||
}
|
8
vendor/github.com/aws/aws-sdk-go/service/s3/errors.go
generated
vendored
8
vendor/github.com/aws/aws-sdk-go/service/s3/errors.go
generated
vendored
|
@ -15,9 +15,9 @@ const (
|
|||
// "BucketAlreadyOwnedByYou".
|
||||
//
|
||||
// The bucket you tried to create already exists, and you own it. Amazon S3
|
||||
// returns this error in all AWS Regions except in the North Virginia region.
|
||||
// returns this error in all AWS Regions except in the North Virginia Region.
|
||||
// For legacy compatibility, if you re-create an existing bucket that you already
|
||||
// own in the North Virginia region, Amazon S3 returns 200 OK and resets the
|
||||
// own in the North Virginia Region, Amazon S3 returns 200 OK and resets the
|
||||
// bucket access control lists (ACLs).
|
||||
ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou"
|
||||
|
||||
|
@ -42,13 +42,13 @@ const (
|
|||
// ErrCodeObjectAlreadyInActiveTierError for service response error code
|
||||
// "ObjectAlreadyInActiveTierError".
|
||||
//
|
||||
// This operation is not allowed against this storage tier
|
||||
// This operation is not allowed against this storage tier.
|
||||
ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError"
|
||||
|
||||
// ErrCodeObjectNotInActiveTierError for service response error code
|
||||
// "ObjectNotInActiveTierError".
|
||||
//
|
||||
// The source object of the COPY operation is not in the active tier and is
|
||||
// only stored in Amazon Glacier.
|
||||
// only stored in Amazon S3 Glacier.
|
||||
ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError"
|
||||
)
|
||||
|
|
43
vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
generated
vendored
43
vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
generated
vendored
|
@ -30,10 +30,10 @@ var accelerateOpBlacklist = operationBlacklist{
|
|||
opListBuckets, opCreateBucket, opDeleteBucket,
|
||||
}
|
||||
|
||||
// Request handler to automatically add the bucket name to the endpoint domain
|
||||
// Automatically add the bucket name to the endpoint domain
|
||||
// if possible. This style of bucket is valid for all bucket names which are
|
||||
// DNS compatible and do not contain "."
|
||||
func updateEndpointForS3Config(r *request.Request) {
|
||||
func updateEndpointForS3Config(r *request.Request, bucketName string) {
|
||||
forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle)
|
||||
accelerate := aws.BoolValue(r.Config.S3UseAccelerate)
|
||||
|
||||
|
@ -43,45 +43,29 @@ func updateEndpointForS3Config(r *request.Request) {
|
|||
r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.")
|
||||
}
|
||||
}
|
||||
updateEndpointForAccelerate(r)
|
||||
updateEndpointForAccelerate(r, bucketName)
|
||||
} else if !forceHostStyle && r.Operation.Name != opGetBucketLocation {
|
||||
updateEndpointForHostStyle(r)
|
||||
updateEndpointForHostStyle(r, bucketName)
|
||||
}
|
||||
}
|
||||
|
||||
func updateEndpointForHostStyle(r *request.Request) {
|
||||
bucket, ok := bucketNameFromReqParams(r.Params)
|
||||
if !ok {
|
||||
// Ignore operation requests if the bucketname was not provided
|
||||
// if this is an input validation error the validation handler
|
||||
// will report it.
|
||||
return
|
||||
}
|
||||
|
||||
if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
|
||||
func updateEndpointForHostStyle(r *request.Request, bucketName string) {
|
||||
if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) {
|
||||
// bucket name must be valid to put into the host
|
||||
return
|
||||
}
|
||||
|
||||
moveBucketToHost(r.HTTPRequest.URL, bucket)
|
||||
moveBucketToHost(r.HTTPRequest.URL, bucketName)
|
||||
}
|
||||
|
||||
var (
|
||||
accelElem = []byte("s3-accelerate.dualstack.")
|
||||
)
|
||||
|
||||
func updateEndpointForAccelerate(r *request.Request) {
|
||||
bucket, ok := bucketNameFromReqParams(r.Params)
|
||||
if !ok {
|
||||
// Ignore operation requests if the bucketname was not provided
|
||||
// if this is an input validation error the validation handler
|
||||
// will report it.
|
||||
return
|
||||
}
|
||||
|
||||
if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
|
||||
func updateEndpointForAccelerate(r *request.Request, bucketName string) {
|
||||
if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) {
|
||||
r.Error = awserr.New("InvalidParameterException",
|
||||
fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucket),
|
||||
fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucketName),
|
||||
nil)
|
||||
return
|
||||
}
|
||||
|
@ -106,7 +90,7 @@ func updateEndpointForAccelerate(r *request.Request) {
|
|||
|
||||
r.HTTPRequest.URL.Host = strings.Join(parts, ".")
|
||||
|
||||
moveBucketToHost(r.HTTPRequest.URL, bucket)
|
||||
moveBucketToHost(r.HTTPRequest.URL, bucketName)
|
||||
}
|
||||
|
||||
// Attempts to retrieve the bucket name from the request input parameters.
|
||||
|
@ -148,8 +132,5 @@ func dnsCompatibleBucketName(bucket string) bool {
|
|||
// moveBucketToHost moves the bucket name from the URI path to URL host.
|
||||
func moveBucketToHost(u *url.URL, bucket string) {
|
||||
u.Host = bucket + "." + u.Host
|
||||
u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1)
|
||||
if u.Path == "" {
|
||||
u.Path = "/"
|
||||
}
|
||||
removeBucketFromPath(u)
|
||||
}
|
||||
|
|
45
vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/accesspoint_arn.go
generated
vendored
Normal file
45
vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/accesspoint_arn.go
generated
vendored
Normal file
|
@ -0,0 +1,45 @@
|
|||
package arn
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/arn"
|
||||
)
|
||||
|
||||
// AccessPointARN provides representation
|
||||
type AccessPointARN struct {
|
||||
arn.ARN
|
||||
AccessPointName string
|
||||
}
|
||||
|
||||
// GetARN returns the base ARN for the Access Point resource
|
||||
func (a AccessPointARN) GetARN() arn.ARN {
|
||||
return a.ARN
|
||||
}
|
||||
|
||||
// ParseAccessPointResource attempts to parse the ARN's resource as an
|
||||
// AccessPoint resource.
|
||||
func ParseAccessPointResource(a arn.ARN, resParts []string) (AccessPointARN, error) {
|
||||
if len(a.Region) == 0 {
|
||||
return AccessPointARN{}, InvalidARNError{a, "region not set"}
|
||||
}
|
||||
if len(a.AccountID) == 0 {
|
||||
return AccessPointARN{}, InvalidARNError{a, "account-id not set"}
|
||||
}
|
||||
if len(resParts) == 0 {
|
||||
return AccessPointARN{}, InvalidARNError{a, "resource-id not set"}
|
||||
}
|
||||
if len(resParts) > 1 {
|
||||
return AccessPointARN{}, InvalidARNError{a, "sub resource not supported"}
|
||||
}
|
||||
|
||||
resID := resParts[0]
|
||||
if len(strings.TrimSpace(resID)) == 0 {
|
||||
return AccessPointARN{}, InvalidARNError{a, "resource-id not set"}
|
||||
}
|
||||
|
||||
return AccessPointARN{
|
||||
ARN: a,
|
||||
AccessPointName: resID,
|
||||
}, nil
|
||||
}
|
71
vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/arn.go
generated
vendored
Normal file
71
vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/arn.go
generated
vendored
Normal file
|
@ -0,0 +1,71 @@
|
|||
package arn
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/arn"
|
||||
)
|
||||
|
||||
// Resource provides the interfaces abstracting ARNs of specific resource
|
||||
// types.
|
||||
type Resource interface {
|
||||
GetARN() arn.ARN
|
||||
String() string
|
||||
}
|
||||
|
||||
// ResourceParser provides the function for parsing an ARN's resource
|
||||
// component into a typed resource.
|
||||
type ResourceParser func(arn.ARN) (Resource, error)
|
||||
|
||||
// ParseResource parses an AWS ARN into a typed resource for the S3 API.
|
||||
func ParseResource(s string, resParser ResourceParser) (resARN Resource, err error) {
|
||||
a, err := arn.Parse(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(a.Partition) == 0 {
|
||||
return nil, InvalidARNError{a, "partition not set"}
|
||||
}
|
||||
if a.Service != "s3" {
|
||||
return nil, InvalidARNError{a, "service is not S3"}
|
||||
}
|
||||
if len(a.Resource) == 0 {
|
||||
return nil, InvalidARNError{a, "resource not set"}
|
||||
}
|
||||
|
||||
return resParser(a)
|
||||
}
|
||||
|
||||
// SplitResource splits the resource components by the ARN resource delimiters.
|
||||
func SplitResource(v string) []string {
|
||||
var parts []string
|
||||
var offset int
|
||||
|
||||
for offset <= len(v) {
|
||||
idx := strings.IndexAny(v[offset:], "/:")
|
||||
if idx < 0 {
|
||||
parts = append(parts, v[offset:])
|
||||
break
|
||||
}
|
||||
parts = append(parts, v[offset:idx+offset])
|
||||
offset += idx + 1
|
||||
}
|
||||
|
||||
return parts
|
||||
}
|
||||
|
||||
// IsARN returns whether the given string is an ARN
|
||||
func IsARN(s string) bool {
|
||||
return arn.IsARN(s)
|
||||
}
|
||||
|
||||
// InvalidARNError provides the error for an invalid ARN error.
|
||||
type InvalidARNError struct {
|
||||
ARN arn.ARN
|
||||
Reason string
|
||||
}
|
||||
|
||||
func (e InvalidARNError) Error() string {
|
||||
return "invalid Amazon S3 ARN, " + e.Reason + ", " + e.ARN.String()
|
||||
}
|
33
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go
generated
vendored
33
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go
generated
vendored
|
@ -21,7 +21,14 @@ type UploadInput struct {
|
|||
// The readable body payload to send to S3.
|
||||
Body io.Reader
|
||||
|
||||
// Name of the bucket to which the PUT operation was initiated.
|
||||
// Bucket name to which the PUT operation was initiated.
|
||||
//
|
||||
// When using this API with an access point, you must direct requests to the
|
||||
// access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
|
||||
// When using this operation using an access point through the AWS SDKs, you
|
||||
// provide the access point ARN in place of the bucket name. For more information
|
||||
// about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
|
||||
// in the Amazon Simple Storage Service Developer Guide.
|
||||
//
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -92,23 +99,25 @@ type UploadInput struct {
|
|||
|
||||
// Confirms that the requester knows that she or he will be charged for the
|
||||
// request. Bucket owners need not specify this parameter in their requests.
|
||||
// Documentation on downloading objects from requester pays buckets can be found
|
||||
// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
|
||||
// For information about downloading objects from Requester Pays buckets, see
|
||||
// Downloading Objects in Requestor Pays Buckets (https://docs.aws.amazon.com/http:/docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
|
||||
// in the Amazon S3 Developer Guide.
|
||||
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
|
||||
|
||||
// Specifies the algorithm to use to when encrypting the object (e.g., AES256).
|
||||
// Specifies the algorithm to use to when encrypting the object (for example,
|
||||
// AES256).
|
||||
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
|
||||
|
||||
// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
|
||||
// data. This value is used to store the object and then it is discarded; Amazon
|
||||
// does not store the encryption key. The key must be appropriate for use with
|
||||
// the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
|
||||
// S3 does not store the encryption key. The key must be appropriate for use
|
||||
// with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
|
||||
// header.
|
||||
SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
|
||||
|
||||
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
|
||||
// Amazon S3 uses this header for a message integrity check to ensure the encryption
|
||||
// key was transmitted without error.
|
||||
// Amazon S3 uses this header for a message integrity check to ensure that the
|
||||
// encryption key was transmitted without error.
|
||||
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
|
||||
|
||||
// Specifies the AWS KMS Encryption Context to use for object encryption. The
|
||||
|
@ -116,7 +125,7 @@ type UploadInput struct {
|
|||
// encryption context key-value pairs.
|
||||
SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
|
||||
|
||||
// If the x-amz-server-side-encryption is present and has the value of aws:kms,
|
||||
// If x-amz-server-side-encryption is present and has the value of aws:kms,
|
||||
// this header specifies the ID of the AWS Key Management Service (AWS KMS)
|
||||
// customer master key (CMK) that was used for the object.
|
||||
//
|
||||
|
@ -126,8 +135,8 @@ type UploadInput struct {
|
|||
// Amazon S3 uses the AWS managed CMK in AWS to protect the data.
|
||||
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
|
||||
|
||||
// The Server-side encryption algorithm used when storing this object in S3
|
||||
// (e.g., AES256, aws:kms).
|
||||
// The server-side encryption algorithm used when storing this object in Amazon
|
||||
// S3 (for example, AES256, aws:kms).
|
||||
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
|
||||
|
||||
// If you don't specify, Standard is the default storage class. Amazon S3 supports
|
||||
|
@ -141,7 +150,7 @@ type UploadInput struct {
|
|||
// If the bucket is configured as a website, redirects requests for this object
|
||||
// to another object in the same bucket or to an external URL. Amazon S3 stores
|
||||
// the value of this header in the object metadata. For information about object
|
||||
// metadata, see .
|
||||
// metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html).
|
||||
//
|
||||
// In the following example, the request header sets the redirect to an object
|
||||
// (anotherPage.html) in the same bucket:
|
||||
|
|
119
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
119
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
|
@ -15,8 +15,7 @@ type dTable struct {
|
|||
|
||||
// single-symbols decoding
|
||||
type dEntrySingle struct {
|
||||
byte uint8
|
||||
nBits uint8
|
||||
entry uint16
|
||||
}
|
||||
|
||||
// double-symbols decoding
|
||||
|
@ -76,14 +75,15 @@ func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
|
|||
}
|
||||
|
||||
// collect weight stats
|
||||
var rankStats [tableLogMax + 1]uint32
|
||||
var rankStats [16]uint32
|
||||
weightTotal := uint32(0)
|
||||
for _, v := range s.huffWeight[:s.symbolLen] {
|
||||
if v > tableLogMax {
|
||||
return s, nil, errors.New("corrupt input: weight too large")
|
||||
}
|
||||
rankStats[v]++
|
||||
weightTotal += (1 << (v & 15)) >> 1
|
||||
v2 := v & 15
|
||||
rankStats[v2]++
|
||||
weightTotal += (1 << v2) >> 1
|
||||
}
|
||||
if weightTotal == 0 {
|
||||
return s, nil, errors.New("corrupt input: weights zero")
|
||||
|
@ -134,15 +134,17 @@ func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
|
|||
if len(s.dt.single) != tSize {
|
||||
s.dt.single = make([]dEntrySingle, tSize)
|
||||
}
|
||||
|
||||
for n, w := range s.huffWeight[:s.symbolLen] {
|
||||
if w == 0 {
|
||||
continue
|
||||
}
|
||||
length := (uint32(1) << w) >> 1
|
||||
d := dEntrySingle{
|
||||
byte: uint8(n),
|
||||
nBits: s.actualTableLog + 1 - w,
|
||||
entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8),
|
||||
}
|
||||
for u := rankStats[w]; u < rankStats[w]+length; u++ {
|
||||
s.dt.single[u] = d
|
||||
single := s.dt.single[rankStats[w] : rankStats[w]+length]
|
||||
for i := range single {
|
||||
single[i] = d
|
||||
}
|
||||
rankStats[w] += length
|
||||
}
|
||||
|
@ -167,12 +169,12 @@ func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) {
|
|||
decode := func() byte {
|
||||
val := br.peekBitsFast(s.actualTableLog) /* note : actualTableLog >= 1 */
|
||||
v := s.dt.single[val]
|
||||
br.bitsRead += v.nBits
|
||||
return v.byte
|
||||
br.bitsRead += uint8(v.entry)
|
||||
return uint8(v.entry >> 8)
|
||||
}
|
||||
hasDec := func(v dEntrySingle) byte {
|
||||
br.bitsRead += v.nBits
|
||||
return v.byte
|
||||
br.bitsRead += uint8(v.entry)
|
||||
return uint8(v.entry >> 8)
|
||||
}
|
||||
|
||||
// Avoid bounds check by always having full sized table.
|
||||
|
@ -269,8 +271,8 @@ func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) {
|
|||
decode := func(br *bitReader) byte {
|
||||
val := br.peekBitsFast(s.actualTableLog) /* note : actualTableLog >= 1 */
|
||||
v := single[val&tlMask]
|
||||
br.bitsRead += v.nBits
|
||||
return v.byte
|
||||
br.bitsRead += uint8(v.entry)
|
||||
return uint8(v.entry >> 8)
|
||||
}
|
||||
|
||||
// Use temp table to avoid bound checks/append penalty.
|
||||
|
@ -283,20 +285,67 @@ func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) {
|
|||
bigloop:
|
||||
for {
|
||||
for i := range br {
|
||||
if br[i].off < 4 {
|
||||
br := &br[i]
|
||||
if br.off < 4 {
|
||||
break bigloop
|
||||
}
|
||||
br[i].fillFast()
|
||||
br.fillFast()
|
||||
}
|
||||
tmp[off] = decode(&br[0])
|
||||
tmp[off+bufoff] = decode(&br[1])
|
||||
tmp[off+bufoff*2] = decode(&br[2])
|
||||
tmp[off+bufoff*3] = decode(&br[3])
|
||||
tmp[off+1] = decode(&br[0])
|
||||
tmp[off+1+bufoff] = decode(&br[1])
|
||||
tmp[off+1+bufoff*2] = decode(&br[2])
|
||||
tmp[off+1+bufoff*3] = decode(&br[3])
|
||||
|
||||
{
|
||||
const stream = 0
|
||||
val := br[stream].peekBitsFast(s.actualTableLog)
|
||||
v := single[val&tlMask]
|
||||
br[stream].bitsRead += uint8(v.entry)
|
||||
|
||||
val2 := br[stream].peekBitsFast(s.actualTableLog)
|
||||
v2 := single[val2&tlMask]
|
||||
tmp[off+bufoff*stream+1] = uint8(v2.entry >> 8)
|
||||
tmp[off+bufoff*stream] = uint8(v.entry >> 8)
|
||||
br[stream].bitsRead += uint8(v2.entry)
|
||||
}
|
||||
|
||||
{
|
||||
const stream = 1
|
||||
val := br[stream].peekBitsFast(s.actualTableLog)
|
||||
v := single[val&tlMask]
|
||||
br[stream].bitsRead += uint8(v.entry)
|
||||
|
||||
val2 := br[stream].peekBitsFast(s.actualTableLog)
|
||||
v2 := single[val2&tlMask]
|
||||
tmp[off+bufoff*stream+1] = uint8(v2.entry >> 8)
|
||||
tmp[off+bufoff*stream] = uint8(v.entry >> 8)
|
||||
br[stream].bitsRead += uint8(v2.entry)
|
||||
}
|
||||
|
||||
{
|
||||
const stream = 2
|
||||
val := br[stream].peekBitsFast(s.actualTableLog)
|
||||
v := single[val&tlMask]
|
||||
br[stream].bitsRead += uint8(v.entry)
|
||||
|
||||
val2 := br[stream].peekBitsFast(s.actualTableLog)
|
||||
v2 := single[val2&tlMask]
|
||||
tmp[off+bufoff*stream+1] = uint8(v2.entry >> 8)
|
||||
tmp[off+bufoff*stream] = uint8(v.entry >> 8)
|
||||
br[stream].bitsRead += uint8(v2.entry)
|
||||
}
|
||||
|
||||
{
|
||||
const stream = 3
|
||||
val := br[stream].peekBitsFast(s.actualTableLog)
|
||||
v := single[val&tlMask]
|
||||
br[stream].bitsRead += uint8(v.entry)
|
||||
|
||||
val2 := br[stream].peekBitsFast(s.actualTableLog)
|
||||
v2 := single[val2&tlMask]
|
||||
tmp[off+bufoff*stream+1] = uint8(v2.entry >> 8)
|
||||
tmp[off+bufoff*stream] = uint8(v.entry >> 8)
|
||||
br[stream].bitsRead += uint8(v2.entry)
|
||||
}
|
||||
|
||||
off += 2
|
||||
|
||||
if off == bufoff {
|
||||
if bufoff > dstEvery {
|
||||
return nil, errors.New("corruption detected: stream overrun 1")
|
||||
|
@ -367,7 +416,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
|
|||
broken++
|
||||
if enc.nBits == 0 {
|
||||
for _, dec := range dt {
|
||||
if dec.byte == byte(sym) {
|
||||
if uint8(dec.entry>>8) == byte(sym) {
|
||||
fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym)
|
||||
errs++
|
||||
break
|
||||
|
@ -383,12 +432,12 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
|
|||
top := enc.val << ub
|
||||
// decoder looks at top bits.
|
||||
dec := dt[top]
|
||||
if dec.nBits != enc.nBits {
|
||||
fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, dec.nBits)
|
||||
if uint8(dec.entry) != enc.nBits {
|
||||
fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry))
|
||||
errs++
|
||||
}
|
||||
if dec.byte != uint8(sym) {
|
||||
fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, dec.byte)
|
||||
if uint8(dec.entry>>8) != uint8(sym) {
|
||||
fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8))
|
||||
errs++
|
||||
}
|
||||
if errs > 0 {
|
||||
|
@ -399,12 +448,12 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
|
|||
for i := uint16(0); i < (1 << ub); i++ {
|
||||
vval := top | i
|
||||
dec := dt[vval]
|
||||
if dec.nBits != enc.nBits {
|
||||
fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, dec.nBits)
|
||||
if uint8(dec.entry) != enc.nBits {
|
||||
fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry))
|
||||
errs++
|
||||
}
|
||||
if dec.byte != uint8(sym) {
|
||||
fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, dec.byte)
|
||||
if uint8(dec.entry>>8) != uint8(sym) {
|
||||
fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8))
|
||||
errs++
|
||||
}
|
||||
if errs > 20 {
|
||||
|
|
14
vendor/github.com/klauspost/compress/zstd/README.md
generated
vendored
14
vendor/github.com/klauspost/compress/zstd/README.md
generated
vendored
|
@ -26,8 +26,12 @@ Godoc Documentation: https://godoc.org/github.com/klauspost/compress/zstd
|
|||
|
||||
### Status:
|
||||
|
||||
BETA - there may still be subtle bugs, but a wide variety of content has been tested.
|
||||
There may still be implementation specific stuff in regards to error handling that could lead to edge cases.
|
||||
STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively
|
||||
used by several projects. This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz),
|
||||
kindly supplied by [fuzzit.dev](https://fuzzit.dev/).
|
||||
|
||||
There may still be specific combinations of data types/size/settings that could lead to edge cases,
|
||||
so as always, testing is recommended.
|
||||
|
||||
For now, a high speed (fastest) and medium-fast (default) compressor has been implemented.
|
||||
|
||||
|
@ -251,8 +255,12 @@ The converter `s` can be reused to avoid allocations, even after errors.
|
|||
|
||||
## Decompressor
|
||||
|
||||
STATUS: Release Candidate - there may still be subtle bugs, but a wide variety of content has been tested.
|
||||
Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested.
|
||||
|
||||
This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz),
|
||||
kindly supplied by [fuzzit.dev](https://fuzzit.dev/).
|
||||
The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder,
|
||||
or run it past its limits with ANY input provided.
|
||||
|
||||
### Usage
|
||||
|
||||
|
|
4
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
|
@ -89,6 +89,7 @@ type blockDec struct {
|
|||
sequenceBuf []seq
|
||||
tmp [4]byte
|
||||
err error
|
||||
decWG sync.WaitGroup
|
||||
}
|
||||
|
||||
func (b *blockDec) String() string {
|
||||
|
@ -105,6 +106,7 @@ func newBlockDec(lowMem bool) *blockDec {
|
|||
input: make(chan struct{}, 1),
|
||||
history: make(chan *history, 1),
|
||||
}
|
||||
b.decWG.Add(1)
|
||||
go b.startDecoder()
|
||||
return &b
|
||||
}
|
||||
|
@ -183,11 +185,13 @@ func (b *blockDec) Close() {
|
|||
close(b.input)
|
||||
close(b.history)
|
||||
close(b.result)
|
||||
b.decWG.Wait()
|
||||
}
|
||||
|
||||
// decodeAsync will prepare decoding the block when it receives input.
|
||||
// This will separate output and history.
|
||||
func (b *blockDec) startDecoder() {
|
||||
defer b.decWG.Done()
|
||||
for range b.input {
|
||||
//println("blockDec: Got block input")
|
||||
switch b.Type {
|
||||
|
|
12
vendor/github.com/klauspost/compress/zstd/blockenc.go
generated
vendored
12
vendor/github.com/klauspost/compress/zstd/blockenc.go
generated
vendored
|
@ -300,13 +300,13 @@ func (b *blockEnc) encodeRaw(a []byte) {
|
|||
}
|
||||
|
||||
// encodeLits can be used if the block is only litLen.
|
||||
func (b *blockEnc) encodeLits() error {
|
||||
func (b *blockEnc) encodeLits(raw bool) error {
|
||||
var bh blockHeader
|
||||
bh.setLast(b.last)
|
||||
bh.setSize(uint32(len(b.literals)))
|
||||
|
||||
// Don't compress extremely small blocks
|
||||
if len(b.literals) < 32 {
|
||||
if len(b.literals) < 32 || raw {
|
||||
if debug {
|
||||
println("Adding RAW block, length", len(b.literals))
|
||||
}
|
||||
|
@ -438,9 +438,9 @@ func fuzzFseEncoder(data []byte) int {
|
|||
}
|
||||
|
||||
// encode will encode the block and put the output in b.output.
|
||||
func (b *blockEnc) encode() error {
|
||||
func (b *blockEnc) encode(raw bool) error {
|
||||
if len(b.sequences) == 0 {
|
||||
return b.encodeLits()
|
||||
return b.encodeLits(raw)
|
||||
}
|
||||
// We want some difference
|
||||
if len(b.literals) > (b.size - (b.size >> 5)) {
|
||||
|
@ -458,10 +458,10 @@ func (b *blockEnc) encode() error {
|
|||
reUsed, single bool
|
||||
err error
|
||||
)
|
||||
if len(b.literals) >= 1024 {
|
||||
if len(b.literals) >= 1024 && !raw {
|
||||
// Use 4 Streams.
|
||||
out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
|
||||
} else if len(b.literals) > 32 {
|
||||
} else if len(b.literals) > 32 && !raw {
|
||||
// Use 1 stream
|
||||
single = true
|
||||
out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
|
||||
|
|
4
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
|
@ -262,7 +262,7 @@ func (e *Encoder) nextBlock(final bool) error {
|
|||
// If we got the exact same number of literals as input,
|
||||
// assume the literals cannot be compressed.
|
||||
if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
|
||||
err = blk.encode()
|
||||
err = blk.encode(e.o.noEntropy)
|
||||
}
|
||||
switch err {
|
||||
case errIncompressible:
|
||||
|
@ -473,7 +473,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
|||
// If we got the exact same number of literals as input,
|
||||
// assume the literals cannot be compressed.
|
||||
if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize {
|
||||
err = blk.encode()
|
||||
err = blk.encode(e.o.noEntropy)
|
||||
}
|
||||
|
||||
switch err {
|
||||
|
|
11
vendor/github.com/klauspost/compress/zstd/encoder_options.go
generated
vendored
11
vendor/github.com/klauspost/compress/zstd/encoder_options.go
generated
vendored
|
@ -20,6 +20,7 @@ type encoderOptions struct {
|
|||
windowSize int
|
||||
level EncoderLevel
|
||||
fullZero bool
|
||||
noEntropy bool
|
||||
}
|
||||
|
||||
func (o *encoderOptions) setDefault() {
|
||||
|
@ -202,6 +203,16 @@ func WithZeroFrames(b bool) EOption {
|
|||
}
|
||||
}
|
||||
|
||||
// WithNoEntropyCompression will always skip entropy compression of literals.
|
||||
// This can be useful if content has matches, but unlikely to benefit from entropy
|
||||
// compression. Usually the slight speed improvement is not worth enabling this.
|
||||
func WithNoEntropyCompression(b bool) EOption {
|
||||
return func(o *encoderOptions) error {
|
||||
o.noEntropy = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSingleSegment will set the "single segment" flag when EncodeAll is used.
|
||||
// If this flag is set, data must be regenerated within a single continuous memory segment.
|
||||
// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present.
|
||||
|
|
5
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
5
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
|
@ -478,11 +478,12 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
|||
if err == nil {
|
||||
if n != len(dst)-crcStart {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err = d.checkCRC()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
d.history.b = saved
|
||||
return dst, err
|
||||
}
|
||||
|
|
8
vendor/github.com/klauspost/compress/zstd/snappy.go
generated
vendored
8
vendor/github.com/klauspost/compress/zstd/snappy.go
generated
vendored
|
@ -111,7 +111,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
|
|||
// Add empty last block
|
||||
r.block.reset(nil)
|
||||
r.block.last = true
|
||||
err := r.block.encodeLits()
|
||||
err := r.block.encodeLits(false)
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
|
@ -178,7 +178,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
|
|||
r.err = ErrSnappyCorrupt
|
||||
return written, r.err
|
||||
}
|
||||
err = r.block.encode()
|
||||
err = r.block.encode(false)
|
||||
switch err {
|
||||
case errIncompressible:
|
||||
r.block.popOffsets()
|
||||
|
@ -188,7 +188,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
|
|||
println("snappy.Decode:", err)
|
||||
return written, err
|
||||
}
|
||||
err = r.block.encodeLits()
|
||||
err = r.block.encodeLits(false)
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
|
@ -235,7 +235,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
|
|||
r.err = ErrSnappyCorrupt
|
||||
return written, r.err
|
||||
}
|
||||
err := r.block.encodeLits()
|
||||
err := r.block.encodeLits(false)
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
|
|
79
vendor/golang.org/x/oauth2/transport.go
generated
vendored
79
vendor/golang.org/x/oauth2/transport.go
generated
vendored
|
@ -6,7 +6,7 @@ package oauth2
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"sync"
|
||||
)
|
||||
|
@ -25,9 +25,6 @@ type Transport struct {
|
|||
// Base is the base RoundTripper used to make HTTP requests.
|
||||
// If nil, http.DefaultTransport is used.
|
||||
Base http.RoundTripper
|
||||
|
||||
mu sync.Mutex // guards modReq
|
||||
modReq map[*http.Request]*http.Request // original -> modified
|
||||
}
|
||||
|
||||
// RoundTrip authorizes and authenticates the request with an
|
||||
|
@ -52,35 +49,22 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
|
|||
|
||||
req2 := cloneRequest(req) // per RoundTripper contract
|
||||
token.SetAuthHeader(req2)
|
||||
t.setModReq(req, req2)
|
||||
res, err := t.base().RoundTrip(req2)
|
||||
|
||||
// req.Body is assumed to have been closed by the base RoundTripper.
|
||||
// req.Body is assumed to be closed by the base RoundTripper.
|
||||
reqBodyClosed = true
|
||||
|
||||
if err != nil {
|
||||
t.setModReq(req, nil)
|
||||
return nil, err
|
||||
}
|
||||
res.Body = &onEOFReader{
|
||||
rc: res.Body,
|
||||
fn: func() { t.setModReq(req, nil) },
|
||||
}
|
||||
return res, nil
|
||||
return t.base().RoundTrip(req2)
|
||||
}
|
||||
|
||||
// CancelRequest cancels an in-flight request by closing its connection.
|
||||
var cancelOnce sync.Once
|
||||
|
||||
// CancelRequest does nothing. It used to be a legacy cancellation mechanism
|
||||
// but now only it only logs on first use to warn that it's deprecated.
|
||||
//
|
||||
// Deprecated: use contexts for cancellation instead.
|
||||
func (t *Transport) CancelRequest(req *http.Request) {
|
||||
type canceler interface {
|
||||
CancelRequest(*http.Request)
|
||||
}
|
||||
if cr, ok := t.base().(canceler); ok {
|
||||
t.mu.Lock()
|
||||
modReq := t.modReq[req]
|
||||
delete(t.modReq, req)
|
||||
t.mu.Unlock()
|
||||
cr.CancelRequest(modReq)
|
||||
}
|
||||
cancelOnce.Do(func() {
|
||||
log.Printf("deprecated: golang.org/x/oauth2: Transport.CancelRequest no longer does anything; use contexts")
|
||||
})
|
||||
}
|
||||
|
||||
func (t *Transport) base() http.RoundTripper {
|
||||
|
@ -90,19 +74,6 @@ func (t *Transport) base() http.RoundTripper {
|
|||
return http.DefaultTransport
|
||||
}
|
||||
|
||||
func (t *Transport) setModReq(orig, mod *http.Request) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
if t.modReq == nil {
|
||||
t.modReq = make(map[*http.Request]*http.Request)
|
||||
}
|
||||
if mod == nil {
|
||||
delete(t.modReq, orig)
|
||||
} else {
|
||||
t.modReq[orig] = mod
|
||||
}
|
||||
}
|
||||
|
||||
// cloneRequest returns a clone of the provided *http.Request.
|
||||
// The clone is a shallow copy of the struct and its Header map.
|
||||
func cloneRequest(r *http.Request) *http.Request {
|
||||
|
@ -116,29 +87,3 @@ func cloneRequest(r *http.Request) *http.Request {
|
|||
}
|
||||
return r2
|
||||
}
|
||||
|
||||
type onEOFReader struct {
|
||||
rc io.ReadCloser
|
||||
fn func()
|
||||
}
|
||||
|
||||
func (r *onEOFReader) Read(p []byte) (n int, err error) {
|
||||
n, err = r.rc.Read(p)
|
||||
if err == io.EOF {
|
||||
r.runFunc()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *onEOFReader) Close() error {
|
||||
err := r.rc.Close()
|
||||
r.runFunc()
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *onEOFReader) runFunc() {
|
||||
if fn := r.fn; fn != nil {
|
||||
fn()
|
||||
r.fn = nil
|
||||
}
|
||||
}
|
||||
|
|
2
vendor/golang.org/x/sys/unix/mkall.sh
generated
vendored
2
vendor/golang.org/x/sys/unix/mkall.sh
generated
vendored
|
@ -50,7 +50,7 @@ if [[ "$GOOS" = "linux" ]]; then
|
|||
# Use the Docker-based build system
|
||||
# Files generated through docker (use $cmd so you can Ctl-C the build or run)
|
||||
$cmd docker build --tag generate:$GOOS $GOOS
|
||||
$cmd docker run --interactive --tty --volume $(dirname "$(readlink -f "$0")"):/build generate:$GOOS
|
||||
$cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")" && /bin/pwd):/build generate:$GOOS
|
||||
exit
|
||||
fi
|
||||
|
||||
|
|
10
vendor/golang.org/x/sys/unix/syscall_netbsd.go
generated
vendored
10
vendor/golang.org/x/sys/unix/syscall_netbsd.go
generated
vendored
|
@ -249,6 +249,14 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
|
|||
return sendfile(outfd, infd, offset, count)
|
||||
}
|
||||
|
||||
func Fstatvfs(fd int, buf *Statvfs_t) (err error) {
|
||||
return Fstatvfs1(fd, buf, ST_WAIT)
|
||||
}
|
||||
|
||||
func Statvfs(path string, buf *Statvfs_t) (err error) {
|
||||
return Statvfs1(path, buf, ST_WAIT)
|
||||
}
|
||||
|
||||
/*
|
||||
* Exposed directly
|
||||
*/
|
||||
|
@ -287,6 +295,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
|
|||
//sys Fpathconf(fd int, name int) (val int, err error)
|
||||
//sys Fstat(fd int, stat *Stat_t) (err error)
|
||||
//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error)
|
||||
//sys Fstatvfs1(fd int, buf *Statvfs_t) (err error) = SYS_FSTATVFS1
|
||||
//sys Fsync(fd int) (err error)
|
||||
//sys Ftruncate(fd int, length int64) (err error)
|
||||
//sysnb Getegid() (egid int)
|
||||
|
@ -343,6 +352,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
|
|||
//sysnb Settimeofday(tp *Timeval) (err error)
|
||||
//sysnb Setuid(uid int) (err error)
|
||||
//sys Stat(path string, stat *Stat_t) (err error)
|
||||
//sys Statvfs1(path string, buf *Statvfs_t) (err error) = SYS_STATVFS1
|
||||
//sys Symlink(path string, link string) (err error)
|
||||
//sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error)
|
||||
//sys Sync() (err error)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Code generated by linux/mkall.go generatePtracePair(arm, arm64). DO NOT EDIT.
|
||||
// Code generated by linux/mkall.go generatePtracePair("arm", "arm64"). DO NOT EDIT.
|
||||
|
||||
// +build linux
|
||||
// +build arm arm64
|
||||
|
@ -39,3 +39,15 @@ func PtraceGetRegsArm64(pid int, regsout *PtraceRegsArm64) error {
|
|||
func PtraceSetRegsArm64(pid int, regs *PtraceRegsArm64) error {
|
||||
return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
|
||||
}
|
||||
|
||||
// PtraceGetRegSetArm64 fetches the registers used by arm64 binaries.
|
||||
func PtraceGetRegSetArm64(pid, addr int, regsout *PtraceRegsArm64) error {
|
||||
iovec := Iovec{(*byte)(unsafe.Pointer(regsout)), uint64(unsafe.Sizeof(*regsout))}
|
||||
return ptrace(PTRACE_GETREGSET, pid, uintptr(addr), uintptr(unsafe.Pointer(&iovec)))
|
||||
}
|
||||
|
||||
// PtraceSetRegSetArm64 sets the registers used by arm64 binaries.
|
||||
func PtraceSetRegSetArm64(pid, addr int, regs *PtraceRegsArm64) error {
|
||||
iovec := Iovec{(*byte)(unsafe.Pointer(regs)), uint64(unsafe.Sizeof(*regs))}
|
||||
return ptrace(PTRACE_SETREGSET, pid, uintptr(addr), uintptr(unsafe.Pointer(&iovec)))
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
// Code generated by linux/mkall.go generatePtracePair(mips, mips64). DO NOT EDIT.
|
||||
// Code generated by linux/mkall.go generatePtracePair("mips", "mips64"). DO NOT EDIT.
|
||||
|
||||
// +build linux
|
||||
// +build mips mips64
|
|
@ -1,4 +1,4 @@
|
|||
// Code generated by linux/mkall.go generatePtracePair(mipsle, mips64le). DO NOT EDIT.
|
||||
// Code generated by linux/mkall.go generatePtracePair("mipsle", "mips64le"). DO NOT EDIT.
|
||||
|
||||
// +build linux
|
||||
// +build mipsle mips64le
|
|
@ -1,4 +1,4 @@
|
|||
// Code generated by linux/mkall.go generatePtracePair(386, amd64). DO NOT EDIT.
|
||||
// Code generated by linux/mkall.go generatePtracePair("386", "amd64"). DO NOT EDIT.
|
||||
|
||||
// +build linux
|
||||
// +build 386 amd64
|
25
vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go
generated
vendored
25
vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go
generated
vendored
|
@ -926,6 +926,16 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
|
|||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func Fstatvfs1(fd int, buf *Statvfs_t, flags int) (err error) {
|
||||
_, _, e1 := Syscall(SYS_FSTATVFS1, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(flags))
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func Fsync(fd int) (err error) {
|
||||
_, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
|
||||
if e1 != 0 {
|
||||
|
@ -1635,6 +1645,21 @@ func Stat(path string, stat *Stat_t) (err error) {
|
|||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func Statvfs1(path string, buf *Statvfs_t, flags int) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, _, e1 := Syscall(SYS_STATVFS1, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), uintptr(flags))
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func Symlink(path string, link string) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = BytePtrFromString(path)
|
||||
|
|
25
vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go
generated
vendored
25
vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go
generated
vendored
|
@ -926,6 +926,16 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
|
|||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func Fstatvfs1(fd int, buf *Statvfs_t, flags int) (err error) {
|
||||
_, _, e1 := Syscall(SYS_FSTATVFS1, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(flags))
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func Fsync(fd int) (err error) {
|
||||
_, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
|
||||
if e1 != 0 {
|
||||
|
@ -1635,6 +1645,21 @@ func Stat(path string, stat *Stat_t) (err error) {
|
|||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func Statvfs1(path string, buf *Statvfs_t, flags int) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, _, e1 := Syscall(SYS_STATVFS1, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), uintptr(flags))
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func Symlink(path string, link string) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = BytePtrFromString(path)
|
||||
|
|
25
vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go
generated
vendored
25
vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go
generated
vendored
|
@ -926,6 +926,16 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
|
|||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func Fstatvfs1(fd int, buf *Statvfs_t, flags int) (err error) {
|
||||
_, _, e1 := Syscall(SYS_FSTATVFS1, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(flags))
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func Fsync(fd int) (err error) {
|
||||
_, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
|
||||
if e1 != 0 {
|
||||
|
@ -1635,6 +1645,21 @@ func Stat(path string, stat *Stat_t) (err error) {
|
|||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func Statvfs1(path string, buf *Statvfs_t, flags int) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, _, e1 := Syscall(SYS_STATVFS1, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), uintptr(flags))
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func Symlink(path string, link string) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = BytePtrFromString(path)
|
||||
|
|
25
vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go
generated
vendored
25
vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go
generated
vendored
|
@ -926,6 +926,16 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
|
|||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func Fstatvfs1(fd int, buf *Statvfs_t, flags int) (err error) {
|
||||
_, _, e1 := Syscall(SYS_FSTATVFS1, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(flags))
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func Fsync(fd int) (err error) {
|
||||
_, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
|
||||
if e1 != 0 {
|
||||
|
@ -1635,6 +1645,21 @@ func Stat(path string, stat *Stat_t) (err error) {
|
|||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func Statvfs1(path string, buf *Statvfs_t, flags int) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, _, e1 := Syscall(SYS_STATVFS1, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), uintptr(flags))
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func Symlink(path string, link string) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = BytePtrFromString(path)
|
||||
|
|
32
vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go
generated
vendored
32
vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go
generated
vendored
|
@ -78,6 +78,33 @@ type Stat_t struct {
|
|||
|
||||
type Statfs_t [0]byte
|
||||
|
||||
type Statvfs_t struct {
|
||||
Flag uint32
|
||||
Bsize uint32
|
||||
Frsize uint32
|
||||
Iosize uint32
|
||||
Blocks uint64
|
||||
Bfree uint64
|
||||
Bavail uint64
|
||||
Bresvd uint64
|
||||
Files uint64
|
||||
Ffree uint64
|
||||
Favail uint64
|
||||
Fresvd uint64
|
||||
Syncreads uint64
|
||||
Syncwrites uint64
|
||||
Asyncreads uint64
|
||||
Asyncwrites uint64
|
||||
Fsidx Fsid
|
||||
Fsid uint32
|
||||
Namemax uint32
|
||||
Owner uint32
|
||||
Spare [4]uint32
|
||||
Fstypename [32]byte
|
||||
Mntonname [1024]byte
|
||||
Mntfromname [1024]byte
|
||||
}
|
||||
|
||||
type Flock_t struct {
|
||||
Start int64
|
||||
Len int64
|
||||
|
@ -103,6 +130,11 @@ const (
|
|||
PathMax = 0x400
|
||||
)
|
||||
|
||||
const (
|
||||
ST_WAIT = 0x1
|
||||
ST_NOWAIT = 0x2
|
||||
)
|
||||
|
||||
const (
|
||||
FADV_NORMAL = 0x0
|
||||
FADV_RANDOM = 0x1
|
||||
|
|
33
vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go
generated
vendored
33
vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go
generated
vendored
|
@ -82,6 +82,34 @@ type Stat_t struct {
|
|||
|
||||
type Statfs_t [0]byte
|
||||
|
||||
type Statvfs_t struct {
|
||||
Flag uint64
|
||||
Bsize uint64
|
||||
Frsize uint64
|
||||
Iosize uint64
|
||||
Blocks uint64
|
||||
Bfree uint64
|
||||
Bavail uint64
|
||||
Bresvd uint64
|
||||
Files uint64
|
||||
Ffree uint64
|
||||
Favail uint64
|
||||
Fresvd uint64
|
||||
Syncreads uint64
|
||||
Syncwrites uint64
|
||||
Asyncreads uint64
|
||||
Asyncwrites uint64
|
||||
Fsidx Fsid
|
||||
Fsid uint64
|
||||
Namemax uint64
|
||||
Owner uint32
|
||||
Spare [4]uint32
|
||||
Fstypename [32]byte
|
||||
Mntonname [1024]byte
|
||||
Mntfromname [1024]byte
|
||||
_ [4]byte
|
||||
}
|
||||
|
||||
type Flock_t struct {
|
||||
Start int64
|
||||
Len int64
|
||||
|
@ -107,6 +135,11 @@ const (
|
|||
PathMax = 0x400
|
||||
)
|
||||
|
||||
const (
|
||||
ST_WAIT = 0x1
|
||||
ST_NOWAIT = 0x2
|
||||
)
|
||||
|
||||
const (
|
||||
FADV_NORMAL = 0x0
|
||||
FADV_RANDOM = 0x1
|
||||
|
|
32
vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go
generated
vendored
32
vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go
generated
vendored
|
@ -83,6 +83,33 @@ type Stat_t struct {
|
|||
|
||||
type Statfs_t [0]byte
|
||||
|
||||
type Statvfs_t struct {
|
||||
Flag uint32
|
||||
Bsize uint32
|
||||
Frsize uint32
|
||||
Iosize uint32
|
||||
Blocks uint64
|
||||
Bfree uint64
|
||||
Bavail uint64
|
||||
Bresvd uint64
|
||||
Files uint64
|
||||
Ffree uint64
|
||||
Favail uint64
|
||||
Fresvd uint64
|
||||
Syncreads uint64
|
||||
Syncwrites uint64
|
||||
Asyncreads uint64
|
||||
Asyncwrites uint64
|
||||
Fsidx Fsid
|
||||
Fsid uint32
|
||||
Namemax uint32
|
||||
Owner uint32
|
||||
Spare [4]uint32
|
||||
Fstypename [32]byte
|
||||
Mntonname [1024]byte
|
||||
Mntfromname [1024]byte
|
||||
}
|
||||
|
||||
type Flock_t struct {
|
||||
Start int64
|
||||
Len int64
|
||||
|
@ -108,6 +135,11 @@ const (
|
|||
PathMax = 0x400
|
||||
)
|
||||
|
||||
const (
|
||||
ST_WAIT = 0x1
|
||||
ST_NOWAIT = 0x2
|
||||
)
|
||||
|
||||
const (
|
||||
FADV_NORMAL = 0x0
|
||||
FADV_RANDOM = 0x1
|
||||
|
|
33
vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go
generated
vendored
33
vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go
generated
vendored
|
@ -82,6 +82,34 @@ type Stat_t struct {
|
|||
|
||||
type Statfs_t [0]byte
|
||||
|
||||
type Statvfs_t struct {
|
||||
Flag uint64
|
||||
Bsize uint64
|
||||
Frsize uint64
|
||||
Iosize uint64
|
||||
Blocks uint64
|
||||
Bfree uint64
|
||||
Bavail uint64
|
||||
Bresvd uint64
|
||||
Files uint64
|
||||
Ffree uint64
|
||||
Favail uint64
|
||||
Fresvd uint64
|
||||
Syncreads uint64
|
||||
Syncwrites uint64
|
||||
Asyncreads uint64
|
||||
Asyncwrites uint64
|
||||
Fsidx Fsid
|
||||
Fsid uint64
|
||||
Namemax uint64
|
||||
Owner uint32
|
||||
Spare [4]uint32
|
||||
Fstypename [32]byte
|
||||
Mntonname [1024]byte
|
||||
Mntfromname [1024]byte
|
||||
_ [4]byte
|
||||
}
|
||||
|
||||
type Flock_t struct {
|
||||
Start int64
|
||||
Len int64
|
||||
|
@ -107,6 +135,11 @@ const (
|
|||
PathMax = 0x400
|
||||
)
|
||||
|
||||
const (
|
||||
ST_WAIT = 0x1
|
||||
ST_NOWAIT = 0x2
|
||||
)
|
||||
|
||||
const (
|
||||
FADV_NORMAL = 0x0
|
||||
FADV_RANDOM = 0x1
|
||||
|
|
3
vendor/golang.org/x/tools/go/packages/doc.go
generated
vendored
3
vendor/golang.org/x/tools/go/packages/doc.go
generated
vendored
|
@ -60,8 +60,7 @@ causes Load to run in LoadFiles mode, collecting minimal information.
|
|||
See the documentation for type Config for details.
|
||||
|
||||
As noted earlier, the Config.Mode controls the amount of detail
|
||||
reported about the loaded packages, with each mode returning all the data of the
|
||||
previous mode with some extra added. See the documentation for type LoadMode
|
||||
reported about the loaded packages. See the documentation for type LoadMode
|
||||
for details.
|
||||
|
||||
Most tools should pass their command-line arguments (after any flags)
|
||||
|
|
121
vendor/golang.org/x/tools/go/packages/golist.go
generated
vendored
121
vendor/golang.org/x/tools/go/packages/golist.go
generated
vendored
|
@ -26,7 +26,6 @@ import (
|
|||
"golang.org/x/tools/go/internal/packagesdriver"
|
||||
"golang.org/x/tools/internal/gopathwalk"
|
||||
"golang.org/x/tools/internal/semver"
|
||||
"golang.org/x/tools/internal/span"
|
||||
)
|
||||
|
||||
// debug controls verbose logging.
|
||||
|
@ -284,80 +283,18 @@ func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, q
|
|||
return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err)
|
||||
}
|
||||
dirResponse, err := driver(cfg, pattern)
|
||||
if err != nil {
|
||||
var queryErr error
|
||||
if dirResponse, queryErr = adHocPackage(cfg, driver, pattern, query); queryErr != nil {
|
||||
return err // return the original error
|
||||
}
|
||||
}
|
||||
// `go list` can report errors for files that are not listed as part of a package's GoFiles.
|
||||
// In the case of an invalid Go file, we should assume that it is part of package if only
|
||||
// one package is in the response. The file may have valid contents in an overlay.
|
||||
if len(dirResponse.Packages) == 1 {
|
||||
pkg := dirResponse.Packages[0]
|
||||
for i, err := range pkg.Errors {
|
||||
s := errorSpan(err)
|
||||
if !s.IsValid() {
|
||||
break
|
||||
}
|
||||
if len(pkg.CompiledGoFiles) == 0 {
|
||||
break
|
||||
}
|
||||
dir := filepath.Dir(pkg.CompiledGoFiles[0])
|
||||
filename := filepath.Join(dir, filepath.Base(s.URI().Filename()))
|
||||
if info, err := os.Stat(filename); err != nil || info.IsDir() {
|
||||
break
|
||||
}
|
||||
if !contains(pkg.CompiledGoFiles, filename) {
|
||||
pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename)
|
||||
pkg.GoFiles = append(pkg.GoFiles, filename)
|
||||
pkg.Errors = append(pkg.Errors[:i], pkg.Errors[i+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
// A final attempt to construct an ad-hoc package.
|
||||
if len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].Errors) == 1 {
|
||||
var queryErr error
|
||||
if dirResponse, queryErr = adHocPackage(cfg, driver, pattern, query); queryErr != nil {
|
||||
return err // return the original error
|
||||
}
|
||||
}
|
||||
isRoot := make(map[string]bool, len(dirResponse.Roots))
|
||||
for _, root := range dirResponse.Roots {
|
||||
isRoot[root] = true
|
||||
}
|
||||
for _, pkg := range dirResponse.Packages {
|
||||
// Add any new packages to the main set
|
||||
// We don't bother to filter packages that will be dropped by the changes of roots,
|
||||
// that will happen anyway during graph construction outside this function.
|
||||
// Over-reporting packages is not a problem.
|
||||
response.addPackage(pkg)
|
||||
// if the package was not a root one, it cannot have the file
|
||||
if !isRoot[pkg.ID] {
|
||||
continue
|
||||
}
|
||||
for _, pkgFile := range pkg.GoFiles {
|
||||
if filepath.Base(query) == filepath.Base(pkgFile) {
|
||||
response.addRoot(pkg.ID)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// adHocPackage attempts to construct an ad-hoc package given a query that failed.
|
||||
func adHocPackage(cfg *Config, driver driver, pattern, query string) (*driverResponse, error) {
|
||||
if err != nil || (len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].Errors) == 1) {
|
||||
// There was an error loading the package. Try to load the file as an ad-hoc package.
|
||||
// Usually the error will appear in a returned package, but may not if we're in modules mode
|
||||
// and the ad-hoc is located outside a module.
|
||||
dirResponse, err := driver(cfg, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var queryErr error
|
||||
dirResponse, queryErr = driver(cfg, query)
|
||||
if queryErr != nil {
|
||||
// Return the original error if the attempt to fall back failed.
|
||||
return err
|
||||
}
|
||||
// If we get nothing back from `go list`, try to make this file into its own ad-hoc package.
|
||||
if len(dirResponse.Packages) == 0 && err == nil {
|
||||
if len(dirResponse.Packages) == 0 && queryErr == nil {
|
||||
dirResponse.Packages = append(dirResponse.Packages, &Package{
|
||||
ID: "command-line-arguments",
|
||||
PkgPath: query,
|
||||
|
@ -384,36 +321,30 @@ func adHocPackage(cfg *Config, driver driver, pattern, query string) (*driverRes
|
|||
}
|
||||
}
|
||||
}
|
||||
return dirResponse, nil
|
||||
}
|
||||
|
||||
func contains(files []string, filename string) bool {
|
||||
for _, f := range files {
|
||||
if f == filename {
|
||||
return true
|
||||
isRoot := make(map[string]bool, len(dirResponse.Roots))
|
||||
for _, root := range dirResponse.Roots {
|
||||
isRoot[root] = true
|
||||
}
|
||||
for _, pkg := range dirResponse.Packages {
|
||||
// Add any new packages to the main set
|
||||
// We don't bother to filter packages that will be dropped by the changes of roots,
|
||||
// that will happen anyway during graph construction outside this function.
|
||||
// Over-reporting packages is not a problem.
|
||||
response.addPackage(pkg)
|
||||
// if the package was not a root one, it cannot have the file
|
||||
if !isRoot[pkg.ID] {
|
||||
continue
|
||||
}
|
||||
for _, pkgFile := range pkg.GoFiles {
|
||||
if filepath.Base(query) == filepath.Base(pkgFile) {
|
||||
response.addRoot(pkg.ID)
|
||||
break
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// errorSpan attempts to parse a standard `go list` error message
|
||||
// by stripping off the trailing error message.
|
||||
//
|
||||
// It works only on errors whose message is prefixed by colon,
|
||||
// followed by a space (": "). For example:
|
||||
//
|
||||
// attributes.go:13:1: expected 'package', found 'type'
|
||||
//
|
||||
func errorSpan(err Error) span.Span {
|
||||
if err.Pos == "" {
|
||||
input := strings.TrimSpace(err.Msg)
|
||||
msgIndex := strings.Index(input, ": ")
|
||||
if msgIndex < 0 {
|
||||
return span.Parse(input)
|
||||
}
|
||||
return span.Parse(input[:msgIndex])
|
||||
}
|
||||
return span.Parse(err.Pos)
|
||||
return nil
|
||||
}
|
||||
|
||||
// modCacheRegexp splits a path in a module cache into module, module version, and package.
|
||||
|
|
22
vendor/golang.org/x/tools/internal/imports/imports.go
generated
vendored
22
vendor/golang.org/x/tools/internal/imports/imports.go
generated
vendored
|
@ -83,23 +83,35 @@ func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix,
|
|||
return getFixes(fileSet, file, filename, opt.Env)
|
||||
}
|
||||
|
||||
// ApplyFix will apply all of the fixes to the file and format it.
|
||||
func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options) (formatted []byte, err error) {
|
||||
// ApplyFixes applies all of the fixes to the file and formats it. extraMode
|
||||
// is added in when parsing the file.
|
||||
func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, extraMode parser.Mode) (formatted []byte, err error) {
|
||||
src, opt, err = initialize(filename, src, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Don't use parse() -- we don't care about fragments or statement lists
|
||||
// here, and we need to work with unparseable files.
|
||||
fileSet := token.NewFileSet()
|
||||
file, adjust, err := parse(fileSet, filename, src, opt)
|
||||
if err != nil {
|
||||
parserMode := parser.Mode(0)
|
||||
if opt.Comments {
|
||||
parserMode |= parser.ParseComments
|
||||
}
|
||||
if opt.AllErrors {
|
||||
parserMode |= parser.AllErrors
|
||||
}
|
||||
parserMode |= extraMode
|
||||
|
||||
file, err := parser.ParseFile(fileSet, filename, src, parserMode)
|
||||
if file == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Apply the fixes to the file.
|
||||
apply(fileSet, file, fixes)
|
||||
|
||||
return formatFile(fileSet, file, src, adjust, opt)
|
||||
return formatFile(fileSet, file, src, nil, opt)
|
||||
}
|
||||
|
||||
// GetAllCandidates gets all of the standard library candidate packages to import in
|
||||
|
|
100
vendor/golang.org/x/tools/internal/span/parse.go
generated
vendored
100
vendor/golang.org/x/tools/internal/span/parse.go
generated
vendored
|
@ -1,100 +0,0 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package span
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Parse returns the location represented by the input.
|
||||
// All inputs are valid locations, as they can always be a pure filename.
|
||||
// The returned span will be normalized, and thus if printed may produce a
|
||||
// different string.
|
||||
func Parse(input string) Span {
|
||||
// :0:0#0-0:0#0
|
||||
valid := input
|
||||
var hold, offset int
|
||||
hadCol := false
|
||||
suf := rstripSuffix(input)
|
||||
if suf.sep == "#" {
|
||||
offset = suf.num
|
||||
suf = rstripSuffix(suf.remains)
|
||||
}
|
||||
if suf.sep == ":" {
|
||||
valid = suf.remains
|
||||
hold = suf.num
|
||||
hadCol = true
|
||||
suf = rstripSuffix(suf.remains)
|
||||
}
|
||||
switch {
|
||||
case suf.sep == ":":
|
||||
return New(NewURI(suf.remains), NewPoint(suf.num, hold, offset), Point{})
|
||||
case suf.sep == "-":
|
||||
// we have a span, fall out of the case to continue
|
||||
default:
|
||||
// separator not valid, rewind to either the : or the start
|
||||
return New(NewURI(valid), NewPoint(hold, 0, offset), Point{})
|
||||
}
|
||||
// only the span form can get here
|
||||
// at this point we still don't know what the numbers we have mean
|
||||
// if have not yet seen a : then we might have either a line or a column depending
|
||||
// on whether start has a column or not
|
||||
// we build an end point and will fix it later if needed
|
||||
end := NewPoint(suf.num, hold, offset)
|
||||
hold, offset = 0, 0
|
||||
suf = rstripSuffix(suf.remains)
|
||||
if suf.sep == "#" {
|
||||
offset = suf.num
|
||||
suf = rstripSuffix(suf.remains)
|
||||
}
|
||||
if suf.sep != ":" {
|
||||
// turns out we don't have a span after all, rewind
|
||||
return New(NewURI(valid), end, Point{})
|
||||
}
|
||||
valid = suf.remains
|
||||
hold = suf.num
|
||||
suf = rstripSuffix(suf.remains)
|
||||
if suf.sep != ":" {
|
||||
// line#offset only
|
||||
return New(NewURI(valid), NewPoint(hold, 0, offset), end)
|
||||
}
|
||||
// we have a column, so if end only had one number, it is also the column
|
||||
if !hadCol {
|
||||
end = NewPoint(suf.num, end.v.Line, end.v.Offset)
|
||||
}
|
||||
return New(NewURI(suf.remains), NewPoint(suf.num, hold, offset), end)
|
||||
}
|
||||
|
||||
type suffix struct {
|
||||
remains string
|
||||
sep string
|
||||
num int
|
||||
}
|
||||
|
||||
func rstripSuffix(input string) suffix {
|
||||
if len(input) == 0 {
|
||||
return suffix{"", "", -1}
|
||||
}
|
||||
remains := input
|
||||
num := -1
|
||||
// first see if we have a number at the end
|
||||
last := strings.LastIndexFunc(remains, func(r rune) bool { return r < '0' || r > '9' })
|
||||
if last >= 0 && last < len(remains)-1 {
|
||||
number, err := strconv.ParseInt(remains[last+1:], 10, 64)
|
||||
if err == nil {
|
||||
num = int(number)
|
||||
remains = remains[:last+1]
|
||||
}
|
||||
}
|
||||
// now see if we have a trailing separator
|
||||
r, w := utf8.DecodeLastRuneInString(remains)
|
||||
if r != ':' && r != '#' && r == '#' {
|
||||
return suffix{input, "", -1}
|
||||
}
|
||||
remains = remains[:len(remains)-w]
|
||||
return suffix{remains, string(r), num}
|
||||
}
|
285
vendor/golang.org/x/tools/internal/span/span.go
generated
vendored
285
vendor/golang.org/x/tools/internal/span/span.go
generated
vendored
|
@ -1,285 +0,0 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package span contains support for representing with positions and ranges in
|
||||
// text files.
|
||||
package span
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path"
|
||||
)
|
||||
|
||||
// Span represents a source code range in standardized form.
|
||||
type Span struct {
|
||||
v span
|
||||
}
|
||||
|
||||
// Point represents a single point within a file.
|
||||
// In general this should only be used as part of a Span, as on its own it
|
||||
// does not carry enough information.
|
||||
type Point struct {
|
||||
v point
|
||||
}
|
||||
|
||||
type span struct {
|
||||
URI URI `json:"uri"`
|
||||
Start point `json:"start"`
|
||||
End point `json:"end"`
|
||||
}
|
||||
|
||||
type point struct {
|
||||
Line int `json:"line"`
|
||||
Column int `json:"column"`
|
||||
Offset int `json:"offset"`
|
||||
}
|
||||
|
||||
// Invalid is a span that reports false from IsValid
|
||||
var Invalid = Span{v: span{Start: invalidPoint.v, End: invalidPoint.v}}
|
||||
|
||||
var invalidPoint = Point{v: point{Line: 0, Column: 0, Offset: -1}}
|
||||
|
||||
// Converter is the interface to an object that can convert between line:column
|
||||
// and offset forms for a single file.
|
||||
type Converter interface {
|
||||
//ToPosition converts from an offset to a line:column pair.
|
||||
ToPosition(offset int) (int, int, error)
|
||||
//ToOffset converts from a line:column pair to an offset.
|
||||
ToOffset(line, col int) (int, error)
|
||||
}
|
||||
|
||||
func New(uri URI, start Point, end Point) Span {
|
||||
s := Span{v: span{URI: uri, Start: start.v, End: end.v}}
|
||||
s.v.clean()
|
||||
return s
|
||||
}
|
||||
|
||||
func NewPoint(line, col, offset int) Point {
|
||||
p := Point{v: point{Line: line, Column: col, Offset: offset}}
|
||||
p.v.clean()
|
||||
return p
|
||||
}
|
||||
|
||||
func Compare(a, b Span) int {
|
||||
if r := CompareURI(a.URI(), b.URI()); r != 0 {
|
||||
return r
|
||||
}
|
||||
if r := comparePoint(a.v.Start, b.v.Start); r != 0 {
|
||||
return r
|
||||
}
|
||||
return comparePoint(a.v.End, b.v.End)
|
||||
}
|
||||
|
||||
func ComparePoint(a, b Point) int {
|
||||
return comparePoint(a.v, b.v)
|
||||
}
|
||||
|
||||
func comparePoint(a, b point) int {
|
||||
if !a.hasPosition() {
|
||||
if a.Offset < b.Offset {
|
||||
return -1
|
||||
}
|
||||
if a.Offset > b.Offset {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
if a.Line < b.Line {
|
||||
return -1
|
||||
}
|
||||
if a.Line > b.Line {
|
||||
return 1
|
||||
}
|
||||
if a.Column < b.Column {
|
||||
return -1
|
||||
}
|
||||
if a.Column > b.Column {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (s Span) HasPosition() bool { return s.v.Start.hasPosition() }
|
||||
func (s Span) HasOffset() bool { return s.v.Start.hasOffset() }
|
||||
func (s Span) IsValid() bool { return s.v.Start.isValid() }
|
||||
func (s Span) IsPoint() bool { return s.v.Start == s.v.End }
|
||||
func (s Span) URI() URI { return s.v.URI }
|
||||
func (s Span) Start() Point { return Point{s.v.Start} }
|
||||
func (s Span) End() Point { return Point{s.v.End} }
|
||||
func (s *Span) MarshalJSON() ([]byte, error) { return json.Marshal(&s.v) }
|
||||
func (s *Span) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &s.v) }
|
||||
|
||||
func (p Point) HasPosition() bool { return p.v.hasPosition() }
|
||||
func (p Point) HasOffset() bool { return p.v.hasOffset() }
|
||||
func (p Point) IsValid() bool { return p.v.isValid() }
|
||||
func (p *Point) MarshalJSON() ([]byte, error) { return json.Marshal(&p.v) }
|
||||
func (p *Point) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &p.v) }
|
||||
func (p Point) Line() int {
|
||||
if !p.v.hasPosition() {
|
||||
panic(fmt.Errorf("position not set in %v", p.v))
|
||||
}
|
||||
return p.v.Line
|
||||
}
|
||||
func (p Point) Column() int {
|
||||
if !p.v.hasPosition() {
|
||||
panic(fmt.Errorf("position not set in %v", p.v))
|
||||
}
|
||||
return p.v.Column
|
||||
}
|
||||
func (p Point) Offset() int {
|
||||
if !p.v.hasOffset() {
|
||||
panic(fmt.Errorf("offset not set in %v", p.v))
|
||||
}
|
||||
return p.v.Offset
|
||||
}
|
||||
|
||||
func (p point) hasPosition() bool { return p.Line > 0 }
|
||||
func (p point) hasOffset() bool { return p.Offset >= 0 }
|
||||
func (p point) isValid() bool { return p.hasPosition() || p.hasOffset() }
|
||||
func (p point) isZero() bool {
|
||||
return (p.Line == 1 && p.Column == 1) || (!p.hasPosition() && p.Offset == 0)
|
||||
}
|
||||
|
||||
func (s *span) clean() {
|
||||
//this presumes the points are already clean
|
||||
if !s.End.isValid() || (s.End == point{}) {
|
||||
s.End = s.Start
|
||||
}
|
||||
}
|
||||
|
||||
func (p *point) clean() {
|
||||
if p.Line < 0 {
|
||||
p.Line = 0
|
||||
}
|
||||
if p.Column <= 0 {
|
||||
if p.Line > 0 {
|
||||
p.Column = 1
|
||||
} else {
|
||||
p.Column = 0
|
||||
}
|
||||
}
|
||||
if p.Offset == 0 && (p.Line > 1 || p.Column > 1) {
|
||||
p.Offset = -1
|
||||
}
|
||||
}
|
||||
|
||||
// Format implements fmt.Formatter to print the Location in a standard form.
|
||||
// The format produced is one that can be read back in using Parse.
|
||||
func (s Span) Format(f fmt.State, c rune) {
|
||||
fullForm := f.Flag('+')
|
||||
preferOffset := f.Flag('#')
|
||||
// we should always have a uri, simplify if it is file format
|
||||
//TODO: make sure the end of the uri is unambiguous
|
||||
uri := string(s.v.URI)
|
||||
if c == 'f' {
|
||||
uri = path.Base(uri)
|
||||
} else if !fullForm {
|
||||
uri = s.v.URI.Filename()
|
||||
}
|
||||
fmt.Fprint(f, uri)
|
||||
if !s.IsValid() || (!fullForm && s.v.Start.isZero() && s.v.End.isZero()) {
|
||||
return
|
||||
}
|
||||
// see which bits of start to write
|
||||
printOffset := s.HasOffset() && (fullForm || preferOffset || !s.HasPosition())
|
||||
printLine := s.HasPosition() && (fullForm || !printOffset)
|
||||
printColumn := printLine && (fullForm || (s.v.Start.Column > 1 || s.v.End.Column > 1))
|
||||
fmt.Fprint(f, ":")
|
||||
if printLine {
|
||||
fmt.Fprintf(f, "%d", s.v.Start.Line)
|
||||
}
|
||||
if printColumn {
|
||||
fmt.Fprintf(f, ":%d", s.v.Start.Column)
|
||||
}
|
||||
if printOffset {
|
||||
fmt.Fprintf(f, "#%d", s.v.Start.Offset)
|
||||
}
|
||||
// start is written, do we need end?
|
||||
if s.IsPoint() {
|
||||
return
|
||||
}
|
||||
// we don't print the line if it did not change
|
||||
printLine = fullForm || (printLine && s.v.End.Line > s.v.Start.Line)
|
||||
fmt.Fprint(f, "-")
|
||||
if printLine {
|
||||
fmt.Fprintf(f, "%d", s.v.End.Line)
|
||||
}
|
||||
if printColumn {
|
||||
if printLine {
|
||||
fmt.Fprint(f, ":")
|
||||
}
|
||||
fmt.Fprintf(f, "%d", s.v.End.Column)
|
||||
}
|
||||
if printOffset {
|
||||
fmt.Fprintf(f, "#%d", s.v.End.Offset)
|
||||
}
|
||||
}
|
||||
|
||||
func (s Span) WithPosition(c Converter) (Span, error) {
|
||||
if err := s.update(c, true, false); err != nil {
|
||||
return Span{}, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s Span) WithOffset(c Converter) (Span, error) {
|
||||
if err := s.update(c, false, true); err != nil {
|
||||
return Span{}, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s Span) WithAll(c Converter) (Span, error) {
|
||||
if err := s.update(c, true, true); err != nil {
|
||||
return Span{}, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Span) update(c Converter, withPos, withOffset bool) error {
|
||||
if !s.IsValid() {
|
||||
return fmt.Errorf("cannot add information to an invalid span")
|
||||
}
|
||||
if withPos && !s.HasPosition() {
|
||||
if err := s.v.Start.updatePosition(c); err != nil {
|
||||
return err
|
||||
}
|
||||
if s.v.End.Offset == s.v.Start.Offset {
|
||||
s.v.End = s.v.Start
|
||||
} else if err := s.v.End.updatePosition(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if withOffset && (!s.HasOffset() || (s.v.End.hasPosition() && !s.v.End.hasOffset())) {
|
||||
if err := s.v.Start.updateOffset(c); err != nil {
|
||||
return err
|
||||
}
|
||||
if s.v.End.Line == s.v.Start.Line && s.v.End.Column == s.v.Start.Column {
|
||||
s.v.End.Offset = s.v.Start.Offset
|
||||
} else if err := s.v.End.updateOffset(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *point) updatePosition(c Converter) error {
|
||||
line, col, err := c.ToPosition(p.Offset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.Line = line
|
||||
p.Column = col
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *point) updateOffset(c Converter) error {
|
||||
offset, err := c.ToOffset(p.Line, p.Column)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.Offset = offset
|
||||
return nil
|
||||
}
|
179
vendor/golang.org/x/tools/internal/span/token.go
generated
vendored
179
vendor/golang.org/x/tools/internal/span/token.go
generated
vendored
|
@ -1,179 +0,0 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package span
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/token"
|
||||
)
|
||||
|
||||
// Range represents a source code range in token.Pos form.
|
||||
// It also carries the FileSet that produced the positions, so that it is
|
||||
// self contained.
|
||||
type Range struct {
|
||||
FileSet *token.FileSet
|
||||
Start token.Pos
|
||||
End token.Pos
|
||||
Converter Converter
|
||||
}
|
||||
|
||||
// TokenConverter is a Converter backed by a token file set and file.
|
||||
// It uses the file set methods to work out the conversions, which
|
||||
// makes it fast and does not require the file contents.
|
||||
type TokenConverter struct {
|
||||
fset *token.FileSet
|
||||
file *token.File
|
||||
}
|
||||
|
||||
// NewRange creates a new Range from a FileSet and two positions.
|
||||
// To represent a point pass a 0 as the end pos.
|
||||
func NewRange(fset *token.FileSet, start, end token.Pos) Range {
|
||||
return Range{
|
||||
FileSet: fset,
|
||||
Start: start,
|
||||
End: end,
|
||||
}
|
||||
}
|
||||
|
||||
// NewTokenConverter returns an implementation of Converter backed by a
|
||||
// token.File.
|
||||
func NewTokenConverter(fset *token.FileSet, f *token.File) *TokenConverter {
|
||||
return &TokenConverter{fset: fset, file: f}
|
||||
}
|
||||
|
||||
// NewContentConverter returns an implementation of Converter for the
|
||||
// given file content.
|
||||
func NewContentConverter(filename string, content []byte) *TokenConverter {
|
||||
fset := token.NewFileSet()
|
||||
f := fset.AddFile(filename, -1, len(content))
|
||||
f.SetLinesForContent(content)
|
||||
return &TokenConverter{fset: fset, file: f}
|
||||
}
|
||||
|
||||
// IsPoint returns true if the range represents a single point.
|
||||
func (r Range) IsPoint() bool {
|
||||
return r.Start == r.End
|
||||
}
|
||||
|
||||
// Span converts a Range to a Span that represents the Range.
|
||||
// It will fill in all the members of the Span, calculating the line and column
|
||||
// information.
|
||||
func (r Range) Span() (Span, error) {
|
||||
f := r.FileSet.File(r.Start)
|
||||
if f == nil {
|
||||
return Span{}, fmt.Errorf("file not found in FileSet")
|
||||
}
|
||||
var s Span
|
||||
var err error
|
||||
var startFilename string
|
||||
startFilename, s.v.Start.Line, s.v.Start.Column, err = position(f, r.Start)
|
||||
if err != nil {
|
||||
return Span{}, err
|
||||
}
|
||||
s.v.URI = FileURI(startFilename)
|
||||
if r.End.IsValid() {
|
||||
var endFilename string
|
||||
endFilename, s.v.End.Line, s.v.End.Column, err = position(f, r.End)
|
||||
if err != nil {
|
||||
return Span{}, err
|
||||
}
|
||||
// In the presence of line directives, a single File can have sections from
|
||||
// multiple file names.
|
||||
if endFilename != startFilename {
|
||||
return Span{}, fmt.Errorf("span begins in file %q but ends in %q", startFilename, endFilename)
|
||||
}
|
||||
}
|
||||
s.v.Start.clean()
|
||||
s.v.End.clean()
|
||||
s.v.clean()
|
||||
if r.Converter != nil {
|
||||
return s.WithOffset(r.Converter)
|
||||
}
|
||||
if startFilename != f.Name() {
|
||||
return Span{}, fmt.Errorf("must supply Converter for file %q containing lines from %q", f.Name(), startFilename)
|
||||
}
|
||||
return s.WithOffset(NewTokenConverter(r.FileSet, f))
|
||||
}
|
||||
|
||||
func position(f *token.File, pos token.Pos) (string, int, int, error) {
|
||||
off, err := offset(f, pos)
|
||||
if err != nil {
|
||||
return "", 0, 0, err
|
||||
}
|
||||
return positionFromOffset(f, off)
|
||||
}
|
||||
|
||||
func positionFromOffset(f *token.File, offset int) (string, int, int, error) {
|
||||
if offset > f.Size() {
|
||||
return "", 0, 0, fmt.Errorf("offset %v is past the end of the file %v", offset, f.Size())
|
||||
}
|
||||
pos := f.Pos(offset)
|
||||
p := f.Position(pos)
|
||||
if offset == f.Size() {
|
||||
return p.Filename, p.Line + 1, 1, nil
|
||||
}
|
||||
return p.Filename, p.Line, p.Column, nil
|
||||
}
|
||||
|
||||
// offset is a copy of the Offset function in go/token, but with the adjustment
|
||||
// that it does not panic on invalid positions.
|
||||
func offset(f *token.File, pos token.Pos) (int, error) {
|
||||
if int(pos) < f.Base() || int(pos) > f.Base()+f.Size() {
|
||||
return 0, fmt.Errorf("invalid pos")
|
||||
}
|
||||
return int(pos) - f.Base(), nil
|
||||
}
|
||||
|
||||
// Range converts a Span to a Range that represents the Span for the supplied
|
||||
// File.
|
||||
func (s Span) Range(converter *TokenConverter) (Range, error) {
|
||||
s, err := s.WithOffset(converter)
|
||||
if err != nil {
|
||||
return Range{}, err
|
||||
}
|
||||
// go/token will panic if the offset is larger than the file's size,
|
||||
// so check here to avoid panicking.
|
||||
if s.Start().Offset() > converter.file.Size() {
|
||||
return Range{}, fmt.Errorf("start offset %v is past the end of the file %v", s.Start(), converter.file.Size())
|
||||
}
|
||||
if s.End().Offset() > converter.file.Size() {
|
||||
return Range{}, fmt.Errorf("end offset %v is past the end of the file %v", s.End(), converter.file.Size())
|
||||
}
|
||||
return Range{
|
||||
FileSet: converter.fset,
|
||||
Start: converter.file.Pos(s.Start().Offset()),
|
||||
End: converter.file.Pos(s.End().Offset()),
|
||||
Converter: converter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (l *TokenConverter) ToPosition(offset int) (int, int, error) {
|
||||
_, line, col, err := positionFromOffset(l.file, offset)
|
||||
return line, col, err
|
||||
}
|
||||
|
||||
func (l *TokenConverter) ToOffset(line, col int) (int, error) {
|
||||
if line < 0 {
|
||||
return -1, fmt.Errorf("line is not valid")
|
||||
}
|
||||
lineMax := l.file.LineCount() + 1
|
||||
if line > lineMax {
|
||||
return -1, fmt.Errorf("line is beyond end of file %v", lineMax)
|
||||
} else if line == lineMax {
|
||||
if col > 1 {
|
||||
return -1, fmt.Errorf("column is beyond end of file")
|
||||
}
|
||||
// at the end of the file, allowing for a trailing eol
|
||||
return l.file.Size(), nil
|
||||
}
|
||||
pos := lineStart(l.file, line)
|
||||
if !pos.IsValid() {
|
||||
return -1, fmt.Errorf("line is not in file")
|
||||
}
|
||||
// we assume that column is in bytes here, and that the first byte of a
|
||||
// line is at column 1
|
||||
pos += token.Pos(col - 1)
|
||||
return offset(l.file, pos)
|
||||
}
|
39
vendor/golang.org/x/tools/internal/span/token111.go
generated
vendored
39
vendor/golang.org/x/tools/internal/span/token111.go
generated
vendored
|
@ -1,39 +0,0 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.12
|
||||
|
||||
package span
|
||||
|
||||
import (
|
||||
"go/token"
|
||||
)
|
||||
|
||||
// lineStart is the pre-Go 1.12 version of (*token.File).LineStart. For Go
|
||||
// versions <= 1.11, we borrow logic from the analysisutil package.
|
||||
// TODO(rstambler): Delete this file when we no longer support Go 1.11.
|
||||
func lineStart(f *token.File, line int) token.Pos {
|
||||
// Use binary search to find the start offset of this line.
|
||||
|
||||
min := 0 // inclusive
|
||||
max := f.Size() // exclusive
|
||||
for {
|
||||
offset := (min + max) / 2
|
||||
pos := f.Pos(offset)
|
||||
posn := f.Position(pos)
|
||||
if posn.Line == line {
|
||||
return pos - (token.Pos(posn.Column) - 1)
|
||||
}
|
||||
|
||||
if min+1 >= max {
|
||||
return token.NoPos
|
||||
}
|
||||
|
||||
if posn.Line < line {
|
||||
min = offset
|
||||
} else {
|
||||
max = offset
|
||||
}
|
||||
}
|
||||
}
|
16
vendor/golang.org/x/tools/internal/span/token112.go
generated
vendored
16
vendor/golang.org/x/tools/internal/span/token112.go
generated
vendored
|
@ -1,16 +0,0 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.12
|
||||
|
||||
package span
|
||||
|
||||
import (
|
||||
"go/token"
|
||||
)
|
||||
|
||||
// TODO(rstambler): Delete this file when we no longer support Go 1.11.
|
||||
func lineStart(f *token.File, line int) token.Pos {
|
||||
return f.LineStart(line)
|
||||
}
|
152
vendor/golang.org/x/tools/internal/span/uri.go
generated
vendored
152
vendor/golang.org/x/tools/internal/span/uri.go
generated
vendored
|
@ -1,152 +0,0 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package span
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
const fileScheme = "file"
|
||||
|
||||
// URI represents the full URI for a file.
|
||||
type URI string
|
||||
|
||||
// Filename returns the file path for the given URI.
|
||||
// It is an error to call this on a URI that is not a valid filename.
|
||||
func (uri URI) Filename() string {
|
||||
filename, err := filename(uri)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return filepath.FromSlash(filename)
|
||||
}
|
||||
|
||||
func filename(uri URI) (string, error) {
|
||||
if uri == "" {
|
||||
return "", nil
|
||||
}
|
||||
u, err := url.ParseRequestURI(string(uri))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if u.Scheme != fileScheme {
|
||||
return "", fmt.Errorf("only file URIs are supported, got %q from %q", u.Scheme, uri)
|
||||
}
|
||||
if isWindowsDriveURI(u.Path) {
|
||||
u.Path = u.Path[1:]
|
||||
}
|
||||
return u.Path, nil
|
||||
}
|
||||
|
||||
// NewURI returns a span URI for the string.
|
||||
// It will attempt to detect if the string is a file path or uri.
|
||||
func NewURI(s string) URI {
|
||||
if u, err := url.PathUnescape(s); err == nil {
|
||||
s = u
|
||||
}
|
||||
if strings.HasPrefix(s, fileScheme+"://") {
|
||||
return URI(s)
|
||||
}
|
||||
return FileURI(s)
|
||||
}
|
||||
|
||||
func CompareURI(a, b URI) int {
|
||||
if equalURI(a, b) {
|
||||
return 0
|
||||
}
|
||||
if a < b {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
func equalURI(a, b URI) bool {
|
||||
if a == b {
|
||||
return true
|
||||
}
|
||||
// If we have the same URI basename, we may still have the same file URIs.
|
||||
if !strings.EqualFold(path.Base(string(a)), path.Base(string(b))) {
|
||||
return false
|
||||
}
|
||||
fa, err := filename(a)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
fb, err := filename(b)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
// Stat the files to check if they are equal.
|
||||
infoa, err := os.Stat(filepath.FromSlash(fa))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
infob, err := os.Stat(filepath.FromSlash(fb))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return os.SameFile(infoa, infob)
|
||||
}
|
||||
|
||||
// FileURI returns a span URI for the supplied file path.
|
||||
// It will always have the file scheme.
|
||||
func FileURI(path string) URI {
|
||||
if path == "" {
|
||||
return ""
|
||||
}
|
||||
// Handle standard library paths that contain the literal "$GOROOT".
|
||||
// TODO(rstambler): The go/packages API should allow one to determine a user's $GOROOT.
|
||||
const prefix = "$GOROOT"
|
||||
if len(path) >= len(prefix) && strings.EqualFold(prefix, path[:len(prefix)]) {
|
||||
suffix := path[len(prefix):]
|
||||
path = runtime.GOROOT() + suffix
|
||||
}
|
||||
if !isWindowsDrivePath(path) {
|
||||
if abs, err := filepath.Abs(path); err == nil {
|
||||
path = abs
|
||||
}
|
||||
}
|
||||
// Check the file path again, in case it became absolute.
|
||||
if isWindowsDrivePath(path) {
|
||||
path = "/" + path
|
||||
}
|
||||
path = filepath.ToSlash(path)
|
||||
u := url.URL{
|
||||
Scheme: fileScheme,
|
||||
Path: path,
|
||||
}
|
||||
uri := u.String()
|
||||
if unescaped, err := url.PathUnescape(uri); err == nil {
|
||||
uri = unescaped
|
||||
}
|
||||
return URI(uri)
|
||||
}
|
||||
|
||||
// isWindowsDrivePath returns true if the file path is of the form used by
|
||||
// Windows. We check if the path begins with a drive letter, followed by a ":".
|
||||
func isWindowsDrivePath(path string) bool {
|
||||
if len(path) < 4 {
|
||||
return false
|
||||
}
|
||||
return unicode.IsLetter(rune(path[0])) && path[1] == ':'
|
||||
}
|
||||
|
||||
// isWindowsDriveURI returns true if the file URI is of the format used by
|
||||
// Windows URIs. The url.Parse package does not specially handle Windows paths
|
||||
// (see https://golang.org/issue/6027). We check if the URI path has
|
||||
// a drive prefix (e.g. "/C:"). If so, we trim the leading "/".
|
||||
func isWindowsDriveURI(uri string) bool {
|
||||
if len(uri) < 4 {
|
||||
return false
|
||||
}
|
||||
return uri[0] == '/' && unicode.IsLetter(rune(uri[1])) && uri[2] == ':'
|
||||
}
|
94
vendor/golang.org/x/tools/internal/span/utf16.go
generated
vendored
94
vendor/golang.org/x/tools/internal/span/utf16.go
generated
vendored
|
@ -1,94 +0,0 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package span
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unicode/utf16"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// ToUTF16Column calculates the utf16 column expressed by the point given the
|
||||
// supplied file contents.
|
||||
// This is used to convert from the native (always in bytes) column
|
||||
// representation and the utf16 counts used by some editors.
|
||||
func ToUTF16Column(p Point, content []byte) (int, error) {
|
||||
if content == nil {
|
||||
return -1, fmt.Errorf("ToUTF16Column: missing content")
|
||||
}
|
||||
if !p.HasPosition() {
|
||||
return -1, fmt.Errorf("ToUTF16Column: point is missing position")
|
||||
}
|
||||
if !p.HasOffset() {
|
||||
return -1, fmt.Errorf("ToUTF16Column: point is missing offset")
|
||||
}
|
||||
offset := p.Offset() // 0-based
|
||||
colZero := p.Column() - 1 // 0-based
|
||||
if colZero == 0 {
|
||||
// 0-based column 0, so it must be chr 1
|
||||
return 1, nil
|
||||
} else if colZero < 0 {
|
||||
return -1, fmt.Errorf("ToUTF16Column: column is invalid (%v)", colZero)
|
||||
}
|
||||
// work out the offset at the start of the line using the column
|
||||
lineOffset := offset - colZero
|
||||
if lineOffset < 0 || offset > len(content) {
|
||||
return -1, fmt.Errorf("ToUTF16Column: offsets %v-%v outside file contents (%v)", lineOffset, offset, len(content))
|
||||
}
|
||||
// Use the offset to pick out the line start.
|
||||
// This cannot panic: offset > len(content) and lineOffset < offset.
|
||||
start := content[lineOffset:]
|
||||
|
||||
// Now, truncate down to the supplied column.
|
||||
start = start[:colZero]
|
||||
|
||||
// and count the number of utf16 characters
|
||||
// in theory we could do this by hand more efficiently...
|
||||
return len(utf16.Encode([]rune(string(start)))) + 1, nil
|
||||
}
|
||||
|
||||
// FromUTF16Column advances the point by the utf16 character offset given the
|
||||
// supplied line contents.
|
||||
// This is used to convert from the utf16 counts used by some editors to the
|
||||
// native (always in bytes) column representation.
|
||||
func FromUTF16Column(p Point, chr int, content []byte) (Point, error) {
|
||||
if !p.HasOffset() {
|
||||
return Point{}, fmt.Errorf("FromUTF16Column: point is missing offset")
|
||||
}
|
||||
// if chr is 1 then no adjustment needed
|
||||
if chr <= 1 {
|
||||
return p, nil
|
||||
}
|
||||
if p.Offset() >= len(content) {
|
||||
return p, fmt.Errorf("FromUTF16Column: offset (%v) greater than length of content (%v)", p.Offset(), len(content))
|
||||
}
|
||||
remains := content[p.Offset():]
|
||||
// scan forward the specified number of characters
|
||||
for count := 1; count < chr; count++ {
|
||||
if len(remains) <= 0 {
|
||||
return Point{}, fmt.Errorf("FromUTF16Column: chr goes beyond the content")
|
||||
}
|
||||
r, w := utf8.DecodeRune(remains)
|
||||
if r == '\n' {
|
||||
// Per the LSP spec:
|
||||
//
|
||||
// > If the character value is greater than the line length it
|
||||
// > defaults back to the line length.
|
||||
break
|
||||
}
|
||||
remains = remains[w:]
|
||||
if r >= 0x10000 {
|
||||
// a two point rune
|
||||
count++
|
||||
// if we finished in a two point rune, do not advance past the first
|
||||
if count >= chr {
|
||||
break
|
||||
}
|
||||
}
|
||||
p.v.Column += w
|
||||
p.v.Offset += w
|
||||
}
|
||||
return p, nil
|
||||
}
|
19
vendor/modules.txt
vendored
19
vendor/modules.txt
vendored
|
@ -14,8 +14,9 @@ github.com/BurntSushi/toml
|
|||
github.com/VictoriaMetrics/fastcache
|
||||
# github.com/VictoriaMetrics/metrics v1.9.2
|
||||
github.com/VictoriaMetrics/metrics
|
||||
# github.com/aws/aws-sdk-go v1.25.43
|
||||
# github.com/aws/aws-sdk-go v1.25.48
|
||||
github.com/aws/aws-sdk-go/aws
|
||||
github.com/aws/aws-sdk-go/aws/arn
|
||||
github.com/aws/aws-sdk-go/aws/awserr
|
||||
github.com/aws/aws-sdk-go/aws/awsutil
|
||||
github.com/aws/aws-sdk-go/aws/client
|
||||
|
@ -50,6 +51,7 @@ github.com/aws/aws-sdk-go/private/protocol/rest
|
|||
github.com/aws/aws-sdk-go/private/protocol/restxml
|
||||
github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil
|
||||
github.com/aws/aws-sdk-go/service/s3
|
||||
github.com/aws/aws-sdk-go/service/s3/internal/arn
|
||||
github.com/aws/aws-sdk-go/service/s3/s3iface
|
||||
github.com/aws/aws-sdk-go/service/s3/s3manager
|
||||
github.com/aws/aws-sdk-go/service/sts
|
||||
|
@ -80,7 +82,7 @@ github.com/jmespath/go-jmespath
|
|||
github.com/jstemmer/go-junit-report
|
||||
github.com/jstemmer/go-junit-report/formatter
|
||||
github.com/jstemmer/go-junit-report/parser
|
||||
# github.com/klauspost/compress v1.9.2
|
||||
# github.com/klauspost/compress v1.9.4
|
||||
github.com/klauspost/compress/fse
|
||||
github.com/klauspost/compress/huff0
|
||||
github.com/klauspost/compress/snappy
|
||||
|
@ -116,13 +118,13 @@ go.opencensus.io/trace
|
|||
go.opencensus.io/trace/internal
|
||||
go.opencensus.io/trace/propagation
|
||||
go.opencensus.io/trace/tracestate
|
||||
# golang.org/x/exp v0.0.0-20191127035308-9964a5a80460
|
||||
# golang.org/x/exp v0.0.0-20191129062945-2f5052295587
|
||||
golang.org/x/exp/apidiff
|
||||
golang.org/x/exp/cmd/apidiff
|
||||
# golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f
|
||||
golang.org/x/lint
|
||||
golang.org/x/lint/golint
|
||||
# golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933
|
||||
# golang.org/x/net v0.0.0-20191207000613-e7e4b65ae663
|
||||
golang.org/x/net/context
|
||||
golang.org/x/net/context/ctxhttp
|
||||
golang.org/x/net/http/httpguts
|
||||
|
@ -131,20 +133,20 @@ golang.org/x/net/http2/hpack
|
|||
golang.org/x/net/idna
|
||||
golang.org/x/net/internal/timeseries
|
||||
golang.org/x/net/trace
|
||||
# golang.org/x/oauth2 v0.0.0-20191122200657-5d9234df094c
|
||||
# golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6
|
||||
golang.org/x/oauth2
|
||||
golang.org/x/oauth2/google
|
||||
golang.org/x/oauth2/internal
|
||||
golang.org/x/oauth2/jws
|
||||
golang.org/x/oauth2/jwt
|
||||
# golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2
|
||||
# golang.org/x/sys v0.0.0-20191206220618-eeba5f6aabab
|
||||
golang.org/x/sys/unix
|
||||
# golang.org/x/text v0.3.2
|
||||
golang.org/x/text/secure/bidirule
|
||||
golang.org/x/text/transform
|
||||
golang.org/x/text/unicode/bidi
|
||||
golang.org/x/text/unicode/norm
|
||||
# golang.org/x/tools v0.0.0-20191127064951-724660f1afeb
|
||||
# golang.org/x/tools v0.0.0-20191206204035-259af5ff87bd
|
||||
golang.org/x/tools/cmd/goimports
|
||||
golang.org/x/tools/go/analysis
|
||||
golang.org/x/tools/go/analysis/passes/inspect
|
||||
|
@ -162,7 +164,6 @@ golang.org/x/tools/internal/gopathwalk
|
|||
golang.org/x/tools/internal/imports
|
||||
golang.org/x/tools/internal/module
|
||||
golang.org/x/tools/internal/semver
|
||||
golang.org/x/tools/internal/span
|
||||
# google.golang.org/api v0.14.0
|
||||
google.golang.org/api/googleapi
|
||||
google.golang.org/api/googleapi/transport
|
||||
|
@ -185,7 +186,7 @@ google.golang.org/appengine/internal/modules
|
|||
google.golang.org/appengine/internal/remote_api
|
||||
google.golang.org/appengine/internal/urlfetch
|
||||
google.golang.org/appengine/urlfetch
|
||||
# google.golang.org/genproto v0.0.0-20191115221424-83cc0476cb11
|
||||
# google.golang.org/genproto v0.0.0-20191206224255-0243a4be9c8f
|
||||
google.golang.org/genproto/googleapis/api/annotations
|
||||
google.golang.org/genproto/googleapis/iam/v1
|
||||
google.golang.org/genproto/googleapis/rpc/code
|
||||
|
|
Loading…
Reference in a new issue